diff --git a/.cicd/build.sh b/.cicd/build.sh index e22658d57b6..8c2aa4c6ed2 100755 --- a/.cicd/build.sh +++ b/.cicd/build.sh @@ -7,7 +7,6 @@ if [[ "$(uname)" == 'Darwin' ]]; then # You can't use chained commands in execute if [[ "$TRAVIS" == 'true' ]]; then export PINNED=false - brew install https://raw.githubusercontent.com/Homebrew/homebrew-core/1d91e94e8ecdf6877ad2c24a7cda1114e50f2a14/Formula/llvm@4.rb # Workaround for Travis cannot build LLVM4 from source. brew reinstall openssl@1.1 # Fixes issue where builds in Travis cannot find libcrypto. ccache -s CMAKE_EXTRAS="$CMAKE_EXTRAS -DCMAKE_CXX_COMPILER_LAUNCHER=ccache" @@ -40,9 +39,10 @@ else # Linux CMAKE_EXTRAS="$CMAKE_EXTRAS -DCMAKE_CXX_COMPILER='clang++' -DCMAKE_C_COMPILER='clang'" elif [[ "$IMAGE_TAG" == 'centos-7.7-unpinned' ]]; then PRE_COMMANDS="$PRE_COMMANDS && source /opt/rh/devtoolset-8/enable && source /opt/rh/rh-python36/enable && export PATH=/usr/lib64/ccache:\\\$PATH" + CMAKE_EXTRAS="$CMAKE_EXTRAS -DLLVM_DIR='/opt/rh/llvm-toolset-7.0/root/usr/lib64/cmake/llvm'" elif [[ "$IMAGE_TAG" == 'ubuntu-18.04-unpinned' ]]; then PRE_COMMANDS="$PRE_COMMANDS && export PATH=/usr/lib/ccache:\\\$PATH" - CMAKE_EXTRAS="$CMAKE_EXTRAS -DCMAKE_CXX_COMPILER='clang++' -DCMAKE_C_COMPILER='clang'" + CMAKE_EXTRAS="$CMAKE_EXTRAS -DCMAKE_CXX_COMPILER='clang++-7' -DCMAKE_C_COMPILER='clang-7' -DLLVM_DIR='/usr/lib/llvm-7/lib/cmake/llvm'" fi BUILD_COMMANDS="cmake $CMAKE_EXTRAS .. && make -j$JOBS" # Docker Commands diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index 2ab222638c3..22df0570fcf 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -7,13 +7,17 @@ export MOJAVE_ANKA_TEMPLATE_NAME=${MOJAVE_ANKA_TEMPLATE_NAME:-'10.14.6_6C_14G_40 export PLATFORMS_JSON_ARRAY='[]' BUILDKITE_BUILD_AGENT_QUEUE='automation-eks-eos-builder-fleet' BUILDKITE_TEST_AGENT_QUEUE='automation-eks-eos-tester-fleet' +[[ -z "$ROUNDS" ]] && export ROUNDS='1' +LINUX_CONCURRENCY='8' +MAC_CONCURRENCY='2' +LINUX_CONCURRENCY_GROUP='eos-scheduled-build' +MAC_CONCURRENCY_GROUP='eos-scheduled-build-mac' + # Determine if it's a forked PR and make sure to add git fetch so we don't have to git clone the forked repo's url if [[ $BUILDKITE_BRANCH =~ ^pull/[0-9]+/head: ]]; then PR_ID=$(echo $BUILDKITE_BRANCH | cut -d/ -f2) export GIT_FETCH="git fetch -v --prune origin refs/pull/$PR_ID/head &&" fi - -[[ -z "$ROUNDS" ]] && export ROUNDS='1' # Determine which dockerfiles/scripts to use for the pipeline. if [[ $PINNED == false ]]; then export PLATFORM_TYPE="unpinned" @@ -89,6 +93,8 @@ echo '' echo ' # builds' echo $PLATFORMS_JSON_ARRAY | jq -cr '.[]' | while read -r PLATFORM_JSON; do if [[ ! "$(echo "$PLATFORM_JSON" | jq -r .FILE_NAME)" =~ 'macos' ]]; then + CONCURRENCY=$LINUX_CONCURRENCY + CONCURRENCY_GROUP=$LINUX_CONCURRENCY_GROUP cat <_RUNTIME_ENABLED defined in public libchain interface +# * ctest entries with --runtime +list(APPEND EOSIO_WASM_RUNTIMES wabt) #always enable wabt; it works everywhere and parts of eosio still assume it's always available +if(CMAKE_SIZEOF_VOID_P EQUAL 8 AND NOT WIN32) + if("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux" AND "${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86_64") + list(APPEND EOSIO_WASM_RUNTIMES eos-vm-oc) + # EOS VM OC requires LLVM, but move the check up here to a central location so that the EosioTester.cmakes + # can be created with the exact version found + find_package(LLVM REQUIRED CONFIG) + if(LLVM_VERSION_MAJOR VERSION_LESS 7 OR LLVM_VERSION_MAJOR VERSION_GREATER 9) + message(FATAL_ERROR "EOSIO requires an LLVM version 7.0 to 9.0") + endif() + endif() +endif() + +if(CMAKE_SIZEOF_VOID_P EQUAL 8 AND NOT WIN32) + list(APPEND EOSIO_WASM_RUNTIMES eos-vm) + if(CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64) + list(APPEND EOSIO_WASM_RUNTIMES eos-vm-jit) endif() endif() @@ -95,95 +99,33 @@ else() set(no_whole_archive_flag "--no-whole-archive") endif() -SET( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -IF( WIN32 ) - SET(BOOST_ROOT $ENV{BOOST_ROOT}) - set(Boost_USE_MULTITHREADED ON) - set(BOOST_ALL_DYN_LINK OFF) # force dynamic linking for all libraries -ENDIF(WIN32) -### Remove after Boost 1.70 CMake fixes are in place -set( Boost_NO_BOOST_CMAKE ON CACHE STRING "ON or OFF" ) -find_package(Boost 1.67 REQUIRED COMPONENTS - date_time - filesystem - system - program_options - chrono - unit_test_framework - iostreams) - -add_definitions(-DBOOST_ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW) - -set(CMAKE_THREAD_PREFER_PTHREAD TRUE) -set(THREADS_PREFER_PTHREAD_FLAG TRUE) -find_package(Threads) -link_libraries(Threads::Threads) - -if( WIN32 ) - - message( STATUS "Configuring EOSIO on WIN32") - set( DB_VERSION 60 ) - set( BDB_STATIC_LIBS 1 ) - - set( ZLIB_LIBRARIES "" ) - SET( DEFAULT_EXECUTABLE_INSTALL_DIR bin/ ) - - set(CRYPTO_LIB) - - #looks like this flag can have different default on some machines. - SET(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /SAFESEH:NO") - SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /SAFESEH:NO") - - # Probably cmake has a bug and vcxproj generated for executable in Debug conf. has disabled debug info - set(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} /DEBUG") - - # On windows tcl should be installed to the directory pointed by setenv.bat script - SET(TCL_INCLUDE_PATH $ENV{TCL_ROOT}/include) - MESSAGE(STATUS "tcl INCLUDE PATH: ${TCL_INCLUDE_PATH}") - - FIND_PACKAGE(TCL) - MESSAGE(STATUS "tcl_library: ${TCL_LIBRARY}") - - SET(TCL_LIBS "optimized;${TCL_LIBRARY};debug;") - get_filename_component(TCL_LIB_PATH "${TCL_LIBRARY}" PATH) - get_filename_component(TCL_LIB_NAME "${TCL_LIBRARY}" NAME_WE) - get_filename_component(TCL_LIB_EXT "${TCL_LIBRARY}" EXT) - - SET(TCL_LIBS "${TCL_LIBS}${TCL_LIB_PATH}/${TCL_LIB_NAME}g${TCL_LIB_EXT}") - SET(TCL_LIBRARY ${TCL_LIBS}) - -else( WIN32 ) # Apple AND Linux - - if( APPLE ) - # Apple Specific Options Here - message( STATUS "Configuring EOSIO on OS X" ) - set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall -Wno-deprecated-declarations" ) - else( APPLE ) - # Linux Specific Options Here - message( STATUS "Configuring EOSIO on Linux" ) - set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall" ) - if ( FULL_STATIC_BUILD ) - set( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libstdc++ -static-libgcc") - endif ( FULL_STATIC_BUILD ) - - if( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" ) - if( CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.0.0 ) - set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-invalid-partial-specialization" ) - endif() - endif() - endif( APPLE ) +set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) +# Most boost deps get implictly picked up via fc, as just about everything links to fc. In addition we pick up +# the pthread dependency through fc. +find_package(Boost 1.67 REQUIRED COMPONENTS program_options unit_test_framework) - if( "${CMAKE_GENERATOR}" STREQUAL "Ninja" ) - if( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" ) - set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fcolor-diagnostics" ) +if( APPLE AND UNIX ) +# Apple Specific Options Here + message( STATUS "Configuring EOSIO on macOS" ) + set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall -Wno-deprecated-declarations" ) +else() + # Linux Specific Options Here + message( STATUS "Configuring EOSIO on Linux" ) + set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall" ) + if ( FULL_STATIC_BUILD ) + set( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libstdc++ -static-libgcc") + endif ( FULL_STATIC_BUILD ) + + if( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" ) + if( CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.0.0 ) + set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-invalid-partial-specialization" ) endif() endif() +endif() - # based on http://www.delorie.com/gnu/docs/gdb/gdb_70.html - # uncomment this line to tell GDB about macros (slows compile times) - # set( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -gdwarf-2 -g3" ) - -endif( WIN32 ) +# based on http://www.delorie.com/gnu/docs/gdb/gdb_70.html +# uncomment this line to tell GDB about macros (slows compile times) +# set( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -gdwarf-2 -g3" ) set(ENABLE_COVERAGE_TESTING FALSE CACHE BOOL "Build EOSIO for code coverage analysis") @@ -221,7 +163,10 @@ add_subdirectory( scripts ) add_subdirectory( unittests ) add_subdirectory( tests ) add_subdirectory( tools ) -add_subdirectory( debian ) + +if (NOT DISABLE_WASM_SPEC_TESTS) +add_subdirectory( eosio-wasm-spec-tests/generated-tests ) +endif() install_directory_permissions(DIRECTORY ${CMAKE_INSTALL_FULL_SYSCONFDIR}/eosio) @@ -257,6 +202,8 @@ configure_file(${CMAKE_SOURCE_DIR}/libraries/fc/src/network/LICENSE.go ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.go COPYONLY) configure_file(${CMAKE_SOURCE_DIR}/libraries/yubihsm/LICENSE ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.yubihsm COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/libraries/eos-vm/LICENSE + ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.eos-vm COPYONLY) install(FILES LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ COMPONENT base) install(FILES libraries/wabt/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.wabt COMPONENT base) @@ -265,6 +212,7 @@ install(FILES libraries/wasm-jit/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATARO install(FILES libraries/fc/secp256k1/upstream/COPYING DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.secp256k1 COMPONENT base) install(FILES libraries/fc/src/network/LICENSE.go DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ COMPONENT base) install(FILES libraries/yubihsm/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.yubihsm COMPONENT base) +install(FILES libraries/eos-vm/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.eos-vm COMPONENT base) add_custom_target(base-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" diff --git a/CMakeModules/CMakeASM-LLVMWARInformation.cmake b/CMakeModules/CMakeASM-LLVMWARInformation.cmake new file mode 100755 index 00000000000..a94bfa5bfb0 --- /dev/null +++ b/CMakeModules/CMakeASM-LLVMWARInformation.cmake @@ -0,0 +1,9 @@ +# These trio of files implement a workaround for LLVM bug 39427 + +set(ASM_DIALECT "-LLVMWAR") +set(CMAKE_ASM${ASM_DIALECT}_SOURCE_FILE_EXTENSIONS llvmwar) + +set(CMAKE_ASM${ASM_DIALECT}_COMPILE_OBJECT "g++ -x c++ -O3 --std=c++11 -c -o ") + +include(CMakeASMInformation) +set(ASM_DIALECT) diff --git a/CMakeModules/CMakeDetermineASM-LLVMWARCompiler.cmake b/CMakeModules/CMakeDetermineASM-LLVMWARCompiler.cmake new file mode 100755 index 00000000000..b424ff97858 --- /dev/null +++ b/CMakeModules/CMakeDetermineASM-LLVMWARCompiler.cmake @@ -0,0 +1,6 @@ +# These trio of files implement a workaround for LLVM bug 39427 + +set(ASM_DIALECT "-LLVMWAR") +set(CMAKE_ASM${ASM_DIALECT}_COMPILER_INIT "g++") +include(CMakeDetermineASMCompiler) +set(ASM_DIALECT) diff --git a/CMakeModules/CMakeTestASM-LLVMWARCompiler.cmake b/CMakeModules/CMakeTestASM-LLVMWARCompiler.cmake new file mode 100755 index 00000000000..de20fb5064f --- /dev/null +++ b/CMakeModules/CMakeTestASM-LLVMWARCompiler.cmake @@ -0,0 +1,5 @@ +# These trio of files implement a workaround for LLVM bug 39427 + +set(ASM_DIALECT "-LLVMWAR") +include(CMakeTestASMCompiler) +set(ASM_DIALECT) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index b22e638bb53..988fd241095 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -20,9 +20,11 @@ if( GPERFTOOLS_FOUND ) list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) endif() -find_package(LLVM 4.0 REQUIRED CONFIG) - -link_directories(${LLVM_LIBRARY_DIR}) +if(NOT "@LLVM_FOUND@" STREQUAL "") + find_package(LLVM @LLVM_VERSION@ EXACT REQUIRED CONFIG) + llvm_map_components_to_libnames(LLVM_LIBS support core passes mcjit native DebugInfoDWARF orcjit) + link_directories(${LLVM_LIBRARY_DIR}) +endif() set( CMAKE_CXX_STANDARD 17 ) set( CMAKE_CXX_EXTENSIONS ON ) @@ -63,8 +65,10 @@ find_library(libplatform Platform @CMAKE_INSTALL_PREFIX@/lib64 @CMAKE_INSTALL_PR find_library(liblogging Logging @CMAKE_INSTALL_PREFIX@/lib64 @CMAKE_INSTALL_PREFIX@/lib NO_DEFAULT_PATH) find_library(libruntime Runtime @CMAKE_INSTALL_PREFIX@/lib64 @CMAKE_INSTALL_PREFIX@/lib NO_DEFAULT_PATH) find_library(libsoftfloat softfloat @CMAKE_INSTALL_PREFIX@/lib64 @CMAKE_INSTALL_PREFIX@/lib NO_DEFAULT_PATH) -find_library(liboscrypto crypto @OPENSSL_ROOT_DIR@/lib NO_DEFAULT_PATH) -find_library(libosssl ssl @OPENSSL_ROOT_DIR@/lib NO_DEFAULT_PATH) +get_filename_component(cryptodir @OPENSSL_CRYPTO_LIBRARY@ DIRECTORY) +find_library(liboscrypto crypto "${cryptodir}" NO_DEFAULT_PATH) +get_filename_component(ssldir @OPENSSL_SSL_LIBRARY@ DIRECTORY) +find_library(libosssl ssl "${ssldir}" NO_DEFAULT_PATH) find_library(libchainbase chainbase @CMAKE_INSTALL_PREFIX@/lib64 @CMAKE_INSTALL_PREFIX@/lib NO_DEFAULT_PATH) find_library(libbuiltins builtins @CMAKE_INSTALL_PREFIX@/lib64 @CMAKE_INSTALL_PREFIX@/lib NO_DEFAULT_PATH) find_library(GMP_LIBRARIES NAMES libgmp.a gmp.lib gmp libgmp-10 mpir @@ -74,10 +78,14 @@ find_library(GMP_LIBRARIES NAMES libgmp.a gmp.lib gmp libgmp-10 mpir DOC "Path to the GMP library" ) +set(EOSIO_WASM_RUNTIMES @EOSIO_WASM_RUNTIMES@) +if("eos-vm-oc" IN_LIST EOSIO_WASM_RUNTIMES) + set(WRAP_MAIN "-Wl,-wrap=main") +endif() + macro(add_eosio_test_executable test_name) add_executable( ${test_name} ${ARGN} ) target_link_libraries( ${test_name} - ${LLVM} ${libtester} ${libchain} ${libfc} @@ -96,37 +104,18 @@ macro(add_eosio_test_executable test_name) ${libsecp256k1} ${GMP_LIBRARIES} - LLVMX86Disassembler - LLVMX86AsmParser - LLVMX86AsmPrinter - LLVMX86CodeGen - - LLVMSelectionDAG - - LLVMDebugInfoDWARF - LLVMAsmPrinter - LLVMMCParser - LLVMX86Info - - LLVMOrcJIT - LLVMExecutionEngine - - LLVMCodeGen - LLVMScalarOpts - LLVMTransformUtils - - LLVMipo - LLVMAnalysis - LLVMTarget - LLVMMC - LLVMCore - LLVMSupport ${Boost_FILESYSTEM_LIBRARY} ${Boost_SYSTEM_LIBRARY} ${Boost_CHRONO_LIBRARY} ${Boost_IOSTREAMS_LIBRARY} + "-lz" # Needed by Boost iostreams ${Boost_DATE_TIME_LIBRARY} + + ${LLVM_LIBS} + ${PLATFORM_SPECIFIC_LIBS} + + ${WRAP_MAIN} ) target_include_directories( ${test_name} PUBLIC diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 4149ef5251d..62704dcf78d 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -18,9 +18,11 @@ if( GPERFTOOLS_FOUND ) list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) endif() -find_package(LLVM 4.0 REQUIRED CONFIG) - -link_directories(${LLVM_LIBRARY_DIR}) +if(NOT "@LLVM_FOUND@" STREQUAL "") + find_package(LLVM @LLVM_VERSION@ EXACT REQUIRED CONFIG) + llvm_map_components_to_libnames(LLVM_LIBS support core passes mcjit native DebugInfoDWARF orcjit) + link_directories(${LLVM_LIBRARY_DIR}) +endif() set( CMAKE_CXX_STANDARD 17 ) set( CMAKE_CXX_EXTENSIONS ON ) @@ -62,8 +64,10 @@ find_library(libplatform Platform @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/P find_library(liblogging Logging @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/Logging NO_DEFAULT_PATH) find_library(libruntime Runtime @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/Runtime NO_DEFAULT_PATH) find_library(libsoftfloat softfloat @CMAKE_BINARY_DIR@/libraries/softfloat NO_DEFAULT_PATH) -find_library(liboscrypto crypto @OPENSSL_ROOT_DIR@/lib NO_DEFAULT_PATH) -find_library(libosssl ssl @OPENSSL_ROOT_DIR@/lib NO_DEFAULT_PATH) +get_filename_component(cryptodir @OPENSSL_CRYPTO_LIBRARY@ DIRECTORY) +find_library(liboscrypto crypto "${cryptodir}" NO_DEFAULT_PATH) +get_filename_component(ssldir @OPENSSL_SSL_LIBRARY@ DIRECTORY) +find_library(libosssl ssl "${ssldir}" NO_DEFAULT_PATH) find_library(libchainbase chainbase @CMAKE_BINARY_DIR@/libraries/chainbase NO_DEFAULT_PATH) find_library(libbuiltins builtins @CMAKE_BINARY_DIR@/libraries/builtins NO_DEFAULT_PATH) find_library(GMP_LIBRARIES NAMES libgmp.a gmp.lib gmp libgmp-10 mpir @@ -73,10 +77,14 @@ find_library(GMP_LIBRARIES NAMES libgmp.a gmp.lib gmp libgmp-10 mpir DOC "Path to the GMP library" ) +set(EOSIO_WASM_RUNTIMES @EOSIO_WASM_RUNTIMES@) +if("eos-vm-oc" IN_LIST EOSIO_WASM_RUNTIMES) + set(WRAP_MAIN "-Wl,-wrap=main") +endif() + macro(add_eosio_test_executable test_name) add_executable( ${test_name} ${ARGN} ) target_link_libraries( ${test_name} - ${LLVM} ${libtester} ${libchain} ${libfc} @@ -95,37 +103,18 @@ macro(add_eosio_test_executable test_name) ${libsecp256k1} ${GMP_LIBRARIES} - LLVMX86Disassembler - LLVMX86AsmParser - LLVMX86AsmPrinter - LLVMX86CodeGen - - LLVMSelectionDAG - - LLVMDebugInfoDWARF - LLVMAsmPrinter - LLVMMCParser - LLVMX86Info - - LLVMOrcJIT - LLVMExecutionEngine - - LLVMCodeGen - LLVMScalarOpts - LLVMTransformUtils - - LLVMipo - LLVMAnalysis - LLVMTarget - LLVMMC - LLVMCore - LLVMSupport ${Boost_FILESYSTEM_LIBRARY} ${Boost_SYSTEM_LIBRARY} ${Boost_CHRONO_LIBRARY} ${Boost_IOSTREAMS_LIBRARY} + "-lz" # Needed by Boost iostreams ${Boost_DATE_TIME_LIBRARY} + + ${LLVM_LIBS} + ${PLATFORM_SPECIFIC_LIBS} + + ${WRAP_MAIN} ) target_include_directories( ${test_name} PUBLIC diff --git a/CMakeModules/VersionUtils.cmake b/CMakeModules/VersionUtils.cmake new file mode 100644 index 00000000000..d4ef7b0d561 --- /dev/null +++ b/CMakeModules/VersionUtils.cmake @@ -0,0 +1,40 @@ +cmake_minimum_required(VERSION 3.5) + +function(GENERATE_VERSION_METADATA) + # Execute `git` to grab the corresponding data. + execute_process( + COMMAND ${GIT_EXEC} rev-parse HEAD + WORKING_DIRECTORY ${SRC_DIR} + OUTPUT_VARIABLE V_HASH + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE ) + execute_process( + COMMAND ${GIT_EXEC} diff --quiet + WORKING_DIRECTORY ${SRC_DIR} + RESULT_VARIABLE V_DIRTY + ERROR_QUIET + OUTPUT_STRIP_TRAILING_WHITESPACE ) + + # If `V_DIRTY` is equal to 1, we know that the repository is dirty and vice versa. + if(${V_DIRTY}) + set(V_DIRTY "true") + else() + set(V_DIRTY "false") + endif() + + # Define the proper version metadata for the file `version_impl.cpp.in`. + set(_VERSION_MAJOR_ ${V_MAJOR}) + set(_VERSION_MINOR_ ${V_MINOR}) + set(_VERSION_PATCH_ ${V_PATCH}) + set(_VERSION_SUFFIX_ ${V_SUFFIX}) + set(_VERSION_HASH_ ${V_HASH}) + set(_VERSION_DIRTY_ ${V_DIRTY}) + + # Modify and substitute the `.cpp.in` file for a `.cpp` in the build directory. + configure_file( + ${CUR_SRC_DIR}/src/version_impl.cpp.in + ${CUR_BIN_DIR}/src/version_impl.cpp + @ONLY ) +endfunction(GENERATE_VERSION_METADATA) + +GENERATE_VERSION_METADATA() diff --git a/CMakeModules/installer.cmake b/CMakeModules/installer.cmake deleted file mode 100644 index e4ca2b7e01f..00000000000 --- a/CMakeModules/installer.cmake +++ /dev/null @@ -1,49 +0,0 @@ -include(InstallRequiredSystemLibraries) - -#install_directory_permissions( DIRECTORY usr/${CMAKE_INSTALL_INCLUDEDIR}/eosio ) - -set(CPACK_PACKAGE_CONTACT "support@block.one") -set(CPACK_OUTPUT_FILE_PREFIX ${CMAKE_BINARY_DIR}/packages) -if(NOT DEFINED CMAKE_INSTALL_PREFIX) - set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install) -endif() - -SET(CPACK_PACKAGE_DIRECTORY "${CMAKE_BINARY_DIR}/install") -set(CPACK_PACKAGE_NAME "EOS.IO") -set(CPACK_PACKAGE_VENDOR "block.one") -set(CPACK_PACKAGE_VERSION_MAJOR "${VERSION_MAJOR}") -set(CPACK_PACKAGE_VERSION_MINOR "${VERSION_MINOR}") -set(CPACK_PACKAGE_VERSION_PATCH "${VERSION_PATCH}") -set(CPACK_PACKAGE_VERSION "${CPACK_PACKAGE_VERSION_MAJOR}.${CPACK_PACKAGE_VERSION_MINOR}.${CPACK_PACKAGE_VERSION_PATCH}") -set(CPACK_PACKAGE_DESCRIPTION "Software for the EOS.IO network") -set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Software for the EOS.IO network") -set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE") -set(CPACK_PACKAGE_INSTALL_DIRECTORY "EOS.IO ${CPACK_PACKAGE_VERSION}") - -if(WIN32) - set(CPACK_GENERATOR "ZIP;NSIS") - set(CPACK_NSIS_EXECUTABLES_DIRECTORY .) - set(CPACK_NSIS_PACKAGE_NAME "EOS.IO v${CPACK_PACKAGE_VERSION}") - set(CPACK_NSIS_DISPLAY_NAME "${CPACK_NSIS_PACKAGE_NAME}") - set(CPACK_NSIS_DEFINES " !define MUI_STARTMENUPAGE_DEFAULTFOLDER \\\"EOS.IO\\\"") - # windows zip files usually don't have a single directory inside them, unix tgz usually do - set(CPACK_INCLUDE_TOPLEVEL_DIRECTORY 0) -elseif(APPLE) - set(CPACK_GENERATOR "DragNDrop") -else() - set(CPACK_GENERATOR "DEB") - set(CPACK_DEBIAN_PACKAGE_RELEASE 0) - if(CMAKE_VERSION VERSION_GREATER 3.6.0) # Buggy in 3.5, behaves like VERSION_GREATER_EQUAL - set(CPACK_DEBIAN_FILE_NAME "DEB-DEFAULT") - else() - string(TOLOWER ${CPACK_PACKAGE_NAME} CPACK_DEBIAN_PACKAGE_NAME) - execute_process(COMMAND dpkg --print-architecture OUTPUT_VARIABLE CPACK_DEBIAN_PACKAGE_ARCHITECTURE OUTPUT_STRIP_TRAILING_WHITESPACE) - SET(CPACK_PACKAGE_FILE_NAME ${CPACK_DEBIAN_PACKAGE_NAME}_${CPACK_PACKAGE_VERSION}-${CPACK_DEBIAN_PACKAGE_RELEASE}_${CPACK_DEBIAN_PACKAGE_ARCHITECTURE}) - endif() - set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON) - set(CPACK_INCLUDE_TOPLEVEL_DIRECTORY TRUE) - set(CPACK_DEBIAN_PACKAGE_CONTROL_STRICT_PERMISSION TRUE) - set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://github.com/EOSIO/eos") -endif() - -include(CPack) diff --git a/CMakeModules/utils.cmake b/CMakeModules/utils.cmake index 2b15f3d7d28..d0d55283e75 100644 --- a/CMakeModules/utils.cmake +++ b/CMakeModules/utils.cmake @@ -1,4 +1,4 @@ macro( copy_bin file ) add_custom_command( TARGET ${file} POST_BUILD COMMAND mkdir -p ${CMAKE_BINARY_DIR}/bin ) - add_custom_command( TARGET ${file} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_BINARY_DIR}/${file} ${CMAKE_BINARY_DIR}/bin/ ) + add_custom_command( TARGET ${file} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_BINARY_DIR}/${file}${CMAKE_EXECUTABLE_SUFFIX} ${CMAKE_BINARY_DIR}/bin/ ) endmacro( copy_bin ) diff --git a/README.md b/README.md index 1723638aa15..652c195e26a 100644 --- a/README.md +++ b/README.md @@ -20,18 +20,46 @@ Some of the groundbreaking features of EOSIO include: 1. Designed for Parallel Execution of Context Free Validation Logic 1. Designed for Inter Blockchain Communication -EOSIO is released under the open source MIT license and is offered “AS IS” without warranty of any kind, express or implied. Any security provided by the EOSIO software depends in part on how it is used, configured, and deployed. EOSIO is built upon many third-party libraries such as WABT (Apache License) and WAVM (BSD 3-clause) which are also provided “AS IS” without warranty of any kind. Without limiting the generality of the foregoing, Block.one makes no representation or guarantee that EOSIO or any third-party libraries will perform as intended or will be free of errors, bugs or faulty code. Both may fail in large or small ways that could completely or partially limit functionality or compromise computer systems. If you use or implement EOSIO, you do so at your own risk. In no event will Block.one be liable to any party for any damages whatsoever, even if it had been advised of the possibility of damage. +## Disclaimer Block.one is neither launching nor operating any initial public blockchains based upon the EOSIO software. This release refers only to version 1.0 of our open source software. We caution those who wish to use blockchains built on EOSIO to carefully vet the companies and organizations launching blockchains based on EOSIO before disclosing any private keys to their derivative software. +## Testnets + There is no public testnet running currently. +## Supported Operating Systems + +EOSIO currently supports the following operating systems: + +1. Amazon Linux 2 +2. CentOS 7 +3. Ubuntu 16.04 +4. Ubuntu 18.04 +5. MacOS 10.14 (Mojave) + +--- + +**Note: It may be possible to install EOSIO on other Unix-based operating systems. This is not officially supported, though.** + +--- + +## Software Installation + +If you are new to EOSIO, it is recommended that you install the [EOSIO Prebuilt Binaries](#prebuilt-binaries), then proceed to the [Getting Started](https://developers.eos.io/eosio-home/docs) walkthrough. If you are an advanced developer, a block producer, or no binaries are available for your platform, you may need to [Build EOSIO from source](https://eosio.github.io/eos/latest/install/build-from-source). + --- -**If you used our build scripts to install eosio, [please be sure to uninstall](#build-script-uninstall) before using our packages.** +**Note: If you used our scripts to build/install EOSIO, please run the [Uninstall Script](#uninstall-script) before using our prebuilt binary packages.** --- +## Prebuilt Binaries + +Prebuilt EOSIO software packages are available for the operating systems below. Find and follow the instructions for your OS: + +### Mac OS X: + #### Mac OS X Brew Install ```sh $ brew tap eosio/eosio @@ -42,44 +70,49 @@ $ brew install eosio $ brew remove eosio ``` +### Ubuntu Linux: + #### Ubuntu 18.04 Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.8.9/eosio_1.8.9-1-ubuntu-18.04_amd64.deb -$ sudo apt install ./eosio_1.8.9-1-ubuntu-18.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v2.0.0/eosio_2.0.0-1-ubuntu-18.04_amd64.deb +$ sudo apt install ./eosio_2.0.0-1-ubuntu-18.04_amd64.deb ``` #### Ubuntu 16.04 Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.8.9/eosio_1.8.9-1-ubuntu-16.04_amd64.deb -$ sudo apt install ./eosio_1.8.9-1-ubuntu-16.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v2.0.0/eosio_2.0.0-1-ubuntu-16.04_amd64.deb +$ sudo apt install ./eosio_2.0.0-1-ubuntu-16.04_amd64.deb ``` #### Ubuntu Package Uninstall ```sh $ sudo apt remove eosio ``` -#### Centos RPM Package Install + +### RPM-based (CentOS, Amazon Linux, etc.): + +#### RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.8.9/eosio-1.8.9-1.el7.x86_64.rpm -$ sudo yum install ./eosio-1.8.9-1.el7.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v2.0.0/eosio-2.0.0-1.el7.x86_64.rpm +$ sudo yum install ./eosio-2.0.0-1.el7.x86_64.rpm ``` -#### Centos RPM Package Uninstall +#### RPM Package Uninstall ```sh $ sudo yum remove eosio ``` -#### Build Script Uninstall - -If you have previously installed EOSIO using build scripts, you can execute `eosio_uninstall.sh` to uninstall. -- Passing `-y` will answer yes to all prompts (does not remove data directories) -- Passing `-f` will remove data directories (be very careful with this) -- Passing in `-i` allows you to specify where your eosio installation is located +## Uninstall Script +To uninstall the EOSIO built/installed binaries and dependencies, run: +```sh +./scripts/eosio_uninstall.sh +``` -## Supported Operating Systems -EOSIO currently supports the following operating systems: -1. Amazon Linux 2 -2. CentOS 7 -3. Ubuntu 16.04 -4. Ubuntu 18.04 -5. MacOS 10.14 (Mojave) +## Documentation +1. [Nodeos](http://eosio.github.io/eos/nodeos/) + - [Usage](http://eosio.github.io/eos/nodeos/usage/index) + - [Replays](http://eosio.github.io/eos/nodeos/replays/index) + - [Chain API Reference](http://eosio.github.io/eos/nodeos/plugins/chain_api_plugin/api-reference/index) + - [Troubleshooting](http://eosio.github.io/eos/nodeos/troubleshooting/index) +1. [Cleos](http://eosio.github.io/eos/cleos/) +1. [Keosd](http://eosio.github.io/eos/keosd/) ## Resources 1. [Website](https://eos.io) @@ -93,7 +126,7 @@ EOSIO currently supports the following operating systems: ## Getting Started -Instructions detailing the process of getting the software, building it, running a simple test network that produces blocks, account creation and uploading a sample contract to the blockchain can be found in [Getting Started](https://developers.eos.io/eosio-home/docs) on the [EOSIO Developer Portal](https://developers.eos.io). +Instructions detailing the process of getting the software, building it, running a simple test network that produces blocks, account creation and uploading a sample contract to the blockchain can be found in the [Getting Started](https://developers.eos.io/eosio-home/docs) walkthrough. ## Contributing @@ -103,7 +136,7 @@ Instructions detailing the process of getting the software, building it, running ## License -[MIT](./LICENSE) +EOSIO is released under the open source [MIT](./LICENSE) license and is offered “AS IS” without warranty of any kind, express or implied. Any security provided by the EOSIO software depends in part on how it is used, configured, and deployed. EOSIO is built upon many third-party libraries such as WABT (Apache License) and WAVM (BSD 3-clause) which are also provided “AS IS” without warranty of any kind. Without limiting the generality of the foregoing, Block.one makes no representation or guarantee that EOSIO or any third-party libraries will perform as intended or will be free of errors, bugs or faulty code. Both may fail in large or small ways that could completely or partially limit functionality or compromise computer systems. If you use or implement EOSIO, you do so at your own risk. In no event will Block.one be liable to any party for any damages whatsoever, even if it had been advised of the possibility of damage. ## Important diff --git a/debian/CMakeLists.txt b/debian/CMakeLists.txt deleted file mode 100644 index d56b72cde2a..00000000000 --- a/debian/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${CMAKE_CURRENT_SOURCE_DIR}/postinst" PARENT_SCOPE) diff --git a/debian/postinst b/debian/postinst deleted file mode 100755 index 42123695c90..00000000000 --- a/debian/postinst +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/sh -# postinst script for eosio - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -set -e - -PACKAGE="eosio" -USER="eosio" -GROUP=${USER} - -# summary of how this script can be called: -# * `configure' -# * `abort-upgrade' -# * `abort-remove' `in-favour' -# -# * `abort-remove' -# * `abort-deconfigure' `in-favour' -# `removing' -# -# for details, see http://www.debian.org/doc/debian-policy/ or -# the debian-policy package - -# source debconf library -. /usr/share/debconf/confmodule - -case "$1" in - - configure) - set +e - getent passwd ${USER} > /dev/null 2>&1 - if [ $? -ne 0 ]; then - adduser --no-create-home --group --system ${USER} - fi - set -e - chown ${USER}:${GROUP} /var/log/${PACKAGE} - chown ${USER}:${GROUP} /var/lib/${PACKAGE} - chown ${USER}:${GROUP} /etc/${PACKAGE} - chown ${USER}:${GROUP} /etc/${PACKAGE}/node_00 - chown ${USER} /usr/bin/nodeos - chmod u+s /usr/bin/nodeos - ;; - - abort-upgrade|abort-remove|abort-deconfigure) - exit 0 - ;; - - *) - echo "postinst called with unknown argument \`$1'" >&2 - exit 1 - ;; - -esac - -exit 0 diff --git a/docs/00_install/00_install-prebuilt-binaries.md b/docs/00_install/00_install-prebuilt-binaries.md new file mode 100644 index 00000000000..d2c9beb9208 --- /dev/null +++ b/docs/00_install/00_install-prebuilt-binaries.md @@ -0,0 +1,71 @@ +--- +content_title: Install Prebuilt Binaries +--- + +[[info | Previous Builds]] +| If you have previously installed EOSIO from source using shell scripts, you must first run the [Uninstall Script](01_build-from-source/01_shell-scripts/05_uninstall-eosio.md) before installing any prebuilt binaries on the same OS. + +## Prebuilt Binaries + +Prebuilt EOSIO software packages are available for the operating systems below. Find and follow the instructions for your OS: + +### Mac OS X: + +#### Mac OS X Brew Install +```sh +$ brew tap eosio/eosio +$ brew install eosio +``` +#### Mac OS X Brew Uninstall +```sh +$ brew remove eosio +``` + +### Ubuntu Linux: + +#### Ubuntu 18.04 Package Install +```sh +$ wget https://github.com/eosio/eos/releases/download/v2.0.0/eosio_2.0.0-1-ubuntu-18.04_amd64.deb +$ sudo apt install ./eosio_2.0.0-1-ubuntu-18.04_amd64.deb +``` +#### Ubuntu 16.04 Package Install +```sh +$ wget https://github.com/eosio/eos/releases/download/v2.0.0/eosio_2.0.0-1-ubuntu-16.04_amd64.deb +$ sudo apt install ./eosio_2.0.0-1-ubuntu-16.04_amd64.deb +``` +#### Ubuntu Package Uninstall +```sh +$ sudo apt remove eosio +``` + +### RPM-based (CentOS, Amazon Linux, etc.): + +#### RPM Package Install +```sh +$ wget https://github.com/eosio/eos/releases/download/v2.0.0/eosio-2.0.0-1.el7.x86_64.rpm +$ sudo yum install ./eosio-2.0.0-1.el7.x86_64.rpm +``` +#### RPM Package Uninstall +```sh +$ sudo yum remove eosio +``` + +## Location of EOSIO binaries + +After installing the prebuilt packages, the actual EOSIO binaries will be located under: +* `/usr/opt/eosio//bin` (Linux-based); or +* `/usr/local/Cellar/eosio//bin` (MacOS ) + +where `version-string` is the EOSIO version that was installed; e.g. `2.0.0-rc2`. + +Also, soft links for each EOSIO program (`nodeos`, `cleos`, `keosd`, etc.) will be created under `usr/bin` or `usr/local/bin` to allow them to be executed from any directory. + +## Previous Versions + +To install previous versions of the EOSIO prebuilt binaries: + +1. Browse to https://github.com/EOSIO/eos/tags and select the actual version of the EOSIO software you need to install. + +2. Scroll down past the `Release Notes` and download the package or archive that you need for your OS. + +3. Follow the instructions on the first paragraph above to install the selected prebuilt binaries on the given OS. diff --git a/docs/00_install/01_build-from-source/01_shell-scripts/01_download-eosio-source.md b/docs/00_install/01_build-from-source/01_shell-scripts/01_download-eosio-source.md new file mode 100644 index 00000000000..b5fd55849e6 --- /dev/null +++ b/docs/00_install/01_build-from-source/01_shell-scripts/01_download-eosio-source.md @@ -0,0 +1,31 @@ +--- +content_title: Download EOSIO Source +--- + +To download the EOSIO source code, clone the `eos` repo and its submodules. It is adviced to create a home `eosio` folder first and download all the EOSIO related software there: + +```sh +$ mkdir -p ~/eosio && cd ~/eosio +$ git clone --recursive https://github.com/EOSIO/eos +``` + +## Update Submodules + +If a repository is cloned without the `--recursive` flag, the submodules *must* be updated before starting the build process: + +```sh +$ cd ~/eosio/eos +$ git submodule update --init --recursive +``` + +## Pull Changes + +When pulling changes, especially after switching branches, the submodules *must* also be updated. This can be achieved with the `git submodule` command as above, or using `git pull` directly: + +```sh +$ [git checkout ] (optional) +$ git pull --recurse-submodules +``` + +[[info | What's Next?]] +| [Build EOSIO binaries](02_build-eosio-binaries.md) diff --git a/docs/00_install/01_build-from-source/01_shell-scripts/02_build-eosio-binaries.md b/docs/00_install/01_build-from-source/01_shell-scripts/02_build-eosio-binaries.md new file mode 100644 index 00000000000..aec16f98e43 --- /dev/null +++ b/docs/00_install/01_build-from-source/01_shell-scripts/02_build-eosio-binaries.md @@ -0,0 +1,18 @@ +--- +content_title: Build EOSIO Binaries +--- + +[[info | Shell Scripts]] +| The build script is one of various automated shell scripts provided in the EOSIO repository for building, installing, and optionally uninstalling the EOSIO software and its dependencies. They are available in the `eos/scripts` folder. + +The build script first installs all dependencies and then builds EOSIO. The script supports these [Operating Systems](../../index.md#supported-operating-systems). To run it, first change to the `~/eosio/eos` folder, then launch the script: + +```sh +$ cd ~/eosio/eos +$ ./scripts/eosio_build.sh +``` + +The build process writes temporary content to the `eos/build` folder. After building, the program binaries can be found at `eos/build/programs`. + +[[info | What's Next?]] +| [Installing EOSIO](03_install-eosio-binaries.md) is strongly recommended after building from source as it makes local development significantly more friendly. diff --git a/docs/00_install/01_build-from-source/01_shell-scripts/03_install-eosio-binaries.md b/docs/00_install/01_build-from-source/01_shell-scripts/03_install-eosio-binaries.md new file mode 100644 index 00000000000..5ea08c6ddbc --- /dev/null +++ b/docs/00_install/01_build-from-source/01_shell-scripts/03_install-eosio-binaries.md @@ -0,0 +1,24 @@ +--- +content_title: Install EOSIO Binaries +--- + +## EOSIO install script + +For ease of contract development, content can be installed at the `/usr/local` folder using the `eosio_install.sh` script within the `eos/scripts` folder. Adequate permission is required to install on system folders: + +```sh +$ cd ~/eosio/eos +$ sudo ./scripts/eosio_install.sh +``` + +## EOSIO manual install + +In lieu of the `eosio_install.sh` script, you can install the EOSIO binaries directly by invoking `make install` within the `eos/build` folder. Again, adequate permission is required to install on system folders: + +```sh +$ cd ~/eosio/eos/build +$ sudo make install +``` + +[[info | What's Next?]] +| Configure and use [Nodeos](../../../01_nodeos/index.md), or optionally [Test the EOSIO binaries](04_test-eosio-binaries.md). diff --git a/docs/00_install/01_build-from-source/01_shell-scripts/04_test-eosio-binaries.md b/docs/00_install/01_build-from-source/01_shell-scripts/04_test-eosio-binaries.md new file mode 100644 index 00000000000..c3b0cf4c9b3 --- /dev/null +++ b/docs/00_install/01_build-from-source/01_shell-scripts/04_test-eosio-binaries.md @@ -0,0 +1,32 @@ +--- +content_title: Test EOSIO Binaries +--- + +Optionally, a set of tests can be run against your build to perform some basic validation of the EOSIO software installation. + +To run the test suite after building, start `mongod`: + +On Linux platforms: +```sh +~/opt/mongodb/bin/mongod -f ~/opt/mongodb/mongod.conf & +``` + +On MacOS: +```sh +/usr/local/bin/mongod -f /usr/local/etc/mongod.conf & +``` + +then set the build path to EOSIO_HOME: +```sh +export EOSIO_HOME=~/eosio/eos/build +``` + +then run `make test` on all platforms: + +```sh +cd ~/eosio/eos/build +make test +``` + +[[info | What's Next?]] +| Configure and use [Nodeos](../../../01_nodeos/index.md). diff --git a/docs/00_install/01_build-from-source/01_shell-scripts/05_uninstall-eosio.md b/docs/00_install/01_build-from-source/01_shell-scripts/05_uninstall-eosio.md new file mode 100644 index 00000000000..00688a07bab --- /dev/null +++ b/docs/00_install/01_build-from-source/01_shell-scripts/05_uninstall-eosio.md @@ -0,0 +1,13 @@ +--- +content_title: Uninstall EOSIO +--- + +If you have previously built EOSIO from source and now wish to install the prebuilt binaries, or to build from source again, it is recommended to run the `eosio_uninstall.sh` script within the `eos/scripts` folder: + +```sh +$ cd ~/eosio/eos +$ sudo ./scripts/eosio_uninstall.sh +``` + +[[info | Uninstall Dependencies]] +| The uninstall script will also prompt the user to uninstall EOSIO dependencies. This is recommended if installing prebuilt EOSIO binaries or building EOSIO for the first time. diff --git a/docs/00_install/01_build-from-source/01_shell-scripts/index.md b/docs/00_install/01_build-from-source/01_shell-scripts/index.md new file mode 100644 index 00000000000..6e1f1ffbbe0 --- /dev/null +++ b/docs/00_install/01_build-from-source/01_shell-scripts/index.md @@ -0,0 +1,17 @@ +--- +content_title: Shell Scripts +--- + +[[info | Did you know?]] +| Shell scripts automate the process of building, installing, testing, and uninstalling the EOSIO software and dependencies. + +To build EOSIO from the source code using shell scripts, visit the sections below: + +1. [Download EOSIO Source](01_download-eosio-source.md) +2. [Build EOSIO Binaries](02_build-eosio-binaries.md) +3. [Install EOSIO Binaries](03_install-eosio-binaries.md) +4. [Test EOSIO Binaries](04_test-eosio-binaries.md) +5. [Uninstall EOSIO](05_uninstall-eosio.md) + +[[info | Building EOSIO is for Advanced Developers]] +| If you are new to EOSIO, it is recommended that you install the [EOSIO Prebuilt Binaries](../../00_install-prebuilt-binaries.md) instead of building from source. diff --git a/docs/00_install/01_build-from-source/02_manual-build/00_eosio-dependencies.md b/docs/00_install/01_build-from-source/02_manual-build/00_eosio-dependencies.md new file mode 100644 index 00000000000..2b52fdfe7e4 --- /dev/null +++ b/docs/00_install/01_build-from-source/02_manual-build/00_eosio-dependencies.md @@ -0,0 +1,45 @@ +--- +content_title: EOSIO Software Dependencies +--- + +The EOSIO software requires specific software dependencies to build the EOSIO binaries. These dependencies can be built from source or installed from binaries directly. Dependencies can be pinned to a specific version release or unpinned to the current version, usually the latest one. The main EOSIO dependencies hosted outside the EOSIO repos are: + +* Clang - the C++17 compliant compiler used by EOSIO +* CMake - the build system used by EOSIO +* Boost - the C++ Boost library used by EOSIO +* OpenSSL - the secure communications (and crypto) library +* LLVM - the LLVM compiler/toolchain infrastructure + +Other dependencies are either inside the EOSIO repo, such as the `secp256k1` elliptic curve DSA library, or they are otherwise used for testing or housekeeping purposes, such as: + +* automake, autoconf, autotools +* doxygen, graphviz +* python2, python3 +* bzip2, zlib +* etc. + +## Pinned Dependencies + +To guarantee interoperability across different EOSIO software releases, developers may opt to reproduce the exact "pinned" dependency binaries used in-house. Block producers may want to install and run the EOSIO software built with these pinned dependencies for portability and stability reasons. Pinned dependencies are usually built from source. + +## Unpinned Dependencies + +Regular users or application developers may prefer installing unpinned versions of the EOSIO dependencies. These correspond to the latest or otherwise stable versions of the dependencies. The main advantage of unpinned dependencies is reduced installation times and perhaps better performance. Pinned dependencies are typically installed from binaries. + +## Automatic Installation of Dependencies + +EOSIO dependencies can be built or installed automatically from the [Build Script](../01_shell-scripts/02_build-eosio-binaries.md) when building EOSIO from source. To build the pinned dependencies, the optional `-P` parameter can be specified when invoking the script. Otherwise, the unpinned dependencies will be installed instead, with the exception of `boost` and `cmake` which are always pinned: + +```sh +$ cd ~/eosio/eos +$ ./scripts/eosio_build.sh [-P] +``` + +### Unupported Platforms + +EOSIO dependencies can also be built and installed manually by reproducing the same commands invoked by the [Build Script](../01_shell-scripts/02_build-eosio-binaries.md). The actual commands can be generated from the script directly by exporting specific environment variables and CLI parameters to the script when invoked: + +```sh +$ cd ~/eosio/eos +$ export VERBOSE=true && export DRYRUN=true && ./scripts/eosio_build.sh -y [-P] +``` diff --git a/docs/00_install/01_build-from-source/02_manual-build/03_platforms/amazon_linux-2-pinned.md b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/amazon_linux-2-pinned.md new file mode 100644 index 00000000000..2cde1ba4ada --- /dev/null +++ b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/amazon_linux-2-pinned.md @@ -0,0 +1,136 @@ +--- +content_title: Amazon Linux 2 (pinned) +--- + +This section contains shell commands to manually download, build, install, test, and uninstall EOSIO and dependencies on Amazon Linux 2. + +[[info | Building EOSIO is for Advanced Developers]] +| If you are new to EOSIO, it is recommended that you install the [EOSIO Prebuilt Binaries](../../../00_install-prebuilt-binaries.md) instead of building from source. + +Select a task below, then copy/paste the shell commands to a Unix terminal to execute: + +* [Download EOSIO Repository](#download-eosio-repository) +* [Install EOSIO Dependencies](#install-eosio-dependencies) +* [Build EOSIO](#build-eosio) +* [Install EOSIO](#install-eosio) +* [Test EOSIO](#test-eosio) +* [Uninstall EOSIO](#uninstall-eosio) + +[[info | Building EOSIO on another OS?]] +| Visit the [Build EOSIO from Source](../../index.md) section. + +## Download EOSIO Repository +These commands set the EOSIO directories, install git, and clone the EOSIO repository. +```sh +# set EOSIO directories +export EOSIO_LOCATION=~/eosio/eos +export EOSIO_INSTALL_LOCATION=$EOSIO_LOCATION/../install +mkdir -p $EOSIO_INSTALL_LOCATION +# install git +yum update -y && yum install -y git +# clone EOSIO repository +git clone https://github.com/EOSIO/eos.git $EOSIO_LOCATION +cd $EOSIO_LOCATION && git submodule update --init --recursive +``` + +## Install EOSIO Dependencies +These commands install the EOSIO software dependencies. Make sure to [Download the EOSIO Repository](#download-eosio-repository) first and set the EOSIO directories. +```sh +# install dependencies +yum install -y which sudo procps-ng util-linux autoconf automake \ + libtool make bzip2 bzip2-devel openssl-devel gmp-devel libstdc++ libcurl-devel \ + libusbx-devel python3 python3-devel python-devel libedit-devel doxygen \ + graphviz patch gcc gcc-c++ vim-common jq +# build cmake +export PATH=$EOSIO_INSTALL_LOCATION/bin:$PATH +cd $EOSIO_INSTALL_LOCATION && curl -LO https://cmake.org/files/v3.13/cmake-3.13.2.tar.gz && \ + tar -xzf cmake-3.13.2.tar.gz && \ + cd cmake-3.13.2 && \ + ./bootstrap --prefix=$EOSIO_INSTALL_LOCATION && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/cmake-3.13.2.tar.gz $EOSIO_INSTALL_LOCATION/cmake-3.13.2 +# build clang +cd $EOSIO_INSTALL_LOCATION && git clone --single-branch --branch release_80 https://git.llvm.org/git/llvm.git clang8 && cd clang8 && git checkout 18e41dc && \ + cd tools && git clone --single-branch --branch release_80 https://git.llvm.org/git/lld.git && cd lld && git checkout d60a035 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/polly.git && cd polly && git checkout 1bc06e5 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang.git clang && cd clang && git checkout a03da8b && \ + cd tools && mkdir extra && cd extra && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang-tools-extra.git && cd clang-tools-extra && git checkout 6b34834 && \ + cd $EOSIO_INSTALL_LOCATION/clang8/projects && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxx.git && cd libcxx && git checkout 1853712 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxxabi.git && cd libcxxabi && git checkout d7338a4 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libunwind.git && cd libunwind && git checkout 57f6739 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/compiler-rt.git && cd compiler-rt && git checkout 5bc7979 && \ + mkdir $EOSIO_INSTALL_LOCATION/clang8/build && cd $EOSIO_INSTALL_LOCATION/clang8/build && \ + cmake -G 'Unix Makefiles' -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=X86 -DCMAKE_BUILD_TYPE=Release .. && \ + make -j $(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/clang8 +# build llvm +cd $EOSIO_INSTALL_LOCATION && git clone --depth 1 --single-branch --branch release_80 https://github.com/llvm-mirror/llvm.git llvm && \ + cd llvm && \ + mkdir build && \ + cd build && \ + cmake -G 'Unix Makefiles' -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DCMAKE_TOOLCHAIN_FILE=$EOSIO_LOCATION/scripts/pinned_toolchain.cmake -DCMAKE_EXE_LINKER_FLAGS=-pthread -DCMAKE_SHARED_LINKER_FLAGS=-pthread -DLLVM_ENABLE_PIC=NO .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/llvm +# build boost +cd $EOSIO_INSTALL_LOCATION && curl -LO https://dl.bintray.com/boostorg/release/1.71.0/source/boost_1_71_0.tar.bz2 && \ + tar -xjf boost_1_71_0.tar.bz2 && \ + cd boost_1_71_0 && \ + ./bootstrap.sh --with-toolset=clang --prefix=$EOSIO_INSTALL_LOCATION && \ + ./b2 toolset=clang cxxflags='-stdlib=libc++ -D__STRICT_ANSI__ -nostdinc++ -I$EOSIO_INSTALL_LOCATION/include/c++/v1 -D_FORTIFY_SOURCE=2 -fstack-protector-strong -fpie' linkflags='-stdlib=libc++ -pie' link=static threading=multi --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test -q -j$(nproc) install && \ + rm -rf $EOSIO_INSTALL_LOCATION/boost_1_71_0.tar.bz2 $EOSIO_INSTALL_LOCATION/boost_1_71_0 +# build mongodb +cd $EOSIO_INSTALL_LOCATION && curl -LO https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-3.6.3.tgz && \ + tar -xzf mongodb-linux-x86_64-amazon-3.6.3.tgz && rm -f mongodb-linux-x86_64-amazon-3.6.3.tgz && \ + mv $EOSIO_INSTALL_LOCATION/mongodb-linux-x86_64-amazon-3.6.3/bin/* $EOSIO_INSTALL_LOCATION/bin/ && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongodb-linux-x86_64-amazon-3.6.3 +# build mongodb c driver +cd $EOSIO_INSTALL_LOCATION && curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/1.13.0/mongo-c-driver-1.13.0.tar.gz && \ + tar -xzf mongo-c-driver-1.13.0.tar.gz && cd mongo-c-driver-1.13.0 && \ + mkdir -p build && cd build && \ + cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SNAPPY=OFF -DCMAKE_TOOLCHAIN_FILE=$EOSIO_LOCATION/scripts/pinned_toolchain.cmake .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0 +# build mongodb cxx driver +cd $EOSIO_INSTALL_LOCATION && curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r3.4.0.tar.gz -o mongo-cxx-driver-r3.4.0.tar.gz && \ + tar -xzf mongo-cxx-driver-r3.4.0.tar.gz && cd mongo-cxx-driver-r3.4.0 && \ + sed -i 's/\"maxAwaitTimeMS\", count/\"maxAwaitTimeMS\", static_cast(count)/' src/mongocxx/options/change_stream.cpp && \ + sed -i 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt && \ + mkdir -p build && cd build && \ + cmake -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DCMAKE_TOOLCHAIN_FILE=$EOSIO_LOCATION/scripts/pinned_toolchain.cmake .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0 +``` + +## Build EOSIO +These commands build the EOSIO software on the specified OS. Make sure to [Install EOSIO Dependencies](#install-eosio-dependencies) first. +```sh +export EOSIO_BUILD_LOCATION=$EOSIO_LOCATION/build +mkdir -p $EOSIO_BUILD_LOCATION +cd $EOSIO_BUILD_LOCATION && $EOSIO_INSTALL_LOCATION/bin/cmake -DCMAKE_BUILD_TYPE='Release' -DCMAKE_TOOLCHAIN_FILE=$EOSIO_LOCATION/scripts/pinned_toolchain.cmake -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DBUILD_MONGO_DB_PLUGIN=true .. +cd $EOSIO_BUILD_LOCATION && make -j$(nproc) +``` + +## Install EOSIO +This command installs the EOSIO software on the specified OS. Make sure to [Build EOSIO](#build-eosio) first. +```sh +cd $EOSIO_BUILD_LOCATION && make install +``` + +## Test EOSIO +These commands validate the EOSIO software installation on the specified OS. This task is optional but recommended. Make sure to [Install EOSIO](#install-eosio) first. +```sh +$EOSIO_INSTALL_LOCATION/bin/mongod --fork --logpath $(pwd)/mongod.log --dbpath $(pwd)/mongodata +cd $EOSIO_BUILD_LOCATION && make test +``` + +## Uninstall EOSIO +These commands uninstall the EOSIO software from the specified OS. +```sh +xargs rm < $EOSIO_BUILD_LOCATION/install_manifest.txt +rm -rf $EOSIO_BUILD_LOCATION +``` diff --git a/docs/00_install/01_build-from-source/02_manual-build/03_platforms/amazon_linux-2-unpinned.md b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/amazon_linux-2-unpinned.md new file mode 100644 index 00000000000..804e50fa2cc --- /dev/null +++ b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/amazon_linux-2-unpinned.md @@ -0,0 +1,112 @@ +--- +content_title: Amazon Linux 2 (unpinned) +--- + +This section contains shell commands to manually download, build, install, test, and uninstall EOSIO and dependencies on Amazon Linux 2. + +[[info | Building EOSIO is for Advanced Developers]] +| If you are new to EOSIO, it is recommended that you install the [EOSIO Prebuilt Binaries](../../../00_install-prebuilt-binaries.md) instead of building from source. + +Select a task below, then copy/paste the shell commands to a Unix terminal to execute: + +* [Download EOSIO Repository](#download-eosio-repository) +* [Install EOSIO Dependencies](#install-eosio-dependencies) +* [Build EOSIO](#build-eosio) +* [Install EOSIO](#install-eosio) +* [Test EOSIO](#test-eosio) +* [Uninstall EOSIO](#uninstall-eosio) + +[[info | Building EOSIO on another OS?]] +| Visit the [Build EOSIO from Source](../../index.md) section. + +## Download EOSIO Repository +These commands set the EOSIO directories, install git, and clone the EOSIO repository. +```sh +# set EOSIO directories +export EOSIO_LOCATION=~/eosio/eos +export EOSIO_INSTALL_LOCATION=$EOSIO_LOCATION/../install +mkdir -p $EOSIO_INSTALL_LOCATION +# install git +yum update -y && yum install -y git +# clone EOSIO repository +git clone https://github.com/EOSIO/eos.git $EOSIO_LOCATION +cd $EOSIO_LOCATION && git submodule update --init --recursive +``` + +## Install EOSIO Dependencies +These commands install the EOSIO software dependencies. Make sure to [Download the EOSIO Repository](#download-eosio-repository) first and set the EOSIO directories. +```sh +# install dependencies +yum install -y which sudo procps-ng util-linux autoconf automake \ + libtool make bzip2 bzip2-devel openssl-devel gmp-devel libstdc++ libcurl-devel \ + libusbx-devel python3 python3-devel python-devel libedit-devel doxygen \ + graphviz clang patch llvm-devel llvm-static vim-common jq +# build cmake +export PATH=$EOSIO_INSTALL_LOCATION/bin:$PATH +cd $EOSIO_INSTALL_LOCATION && curl -LO https://cmake.org/files/v3.13/cmake-3.13.2.tar.gz && \ + tar -xzf cmake-3.13.2.tar.gz && \ + cd cmake-3.13.2 && \ + ./bootstrap --prefix=$EOSIO_INSTALL_LOCATION && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/cmake-3.13.2.tar.gz $EOSIO_INSTALL_LOCATION/cmake-3.13.2 +# build boost +cd $EOSIO_INSTALL_LOCATION && curl -LO https://dl.bintray.com/boostorg/release/1.71.0/source/boost_1_71_0.tar.bz2 && \ + tar -xjf boost_1_71_0.tar.bz2 && \ + cd boost_1_71_0 && \ + ./bootstrap.sh --prefix=$EOSIO_INSTALL_LOCATION && \ + ./b2 --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test -q -j$(nproc) install && \ + rm -rf $EOSIO_INSTALL_LOCATION/boost_1_71_0.tar.bz2 $EOSIO_INSTALL_LOCATION/boost_1_71_0 +# build mongodb +cd $EOSIO_INSTALL_LOCATION && curl -LO https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-3.6.3.tgz && \ + tar -xzf mongodb-linux-x86_64-amazon-3.6.3.tgz && rm -f mongodb-linux-x86_64-amazon-3.6.3.tgz && \ + mv $EOSIO_INSTALL_LOCATION/mongodb-linux-x86_64-amazon-3.6.3/bin/* $EOSIO_INSTALL_LOCATION/bin/ && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongodb-linux-x86_64-amazon-3.6.3 +# build mongodb c driver +cd $EOSIO_INSTALL_LOCATION && curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/1.13.0/mongo-c-driver-1.13.0.tar.gz && \ + tar -xzf mongo-c-driver-1.13.0.tar.gz && cd mongo-c-driver-1.13.0 && \ + mkdir -p build && cd build && \ + cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SNAPPY=OFF .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0 +# build mongodb cxx driver +cd $EOSIO_INSTALL_LOCATION && curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r3.4.0.tar.gz -o mongo-cxx-driver-r3.4.0.tar.gz && \ + tar -xzf mongo-cxx-driver-r3.4.0.tar.gz && cd mongo-cxx-driver-r3.4.0 && \ + sed -i 's/\"maxAwaitTimeMS\", count/\"maxAwaitTimeMS\", static_cast(count)/' src/mongocxx/options/change_stream.cpp && \ + sed -i 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt && \ + mkdir -p build && cd build && \ + cmake -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0 +``` + +## Build EOSIO +These commands build the EOSIO software on the specified OS. Make sure to [Install EOSIO Dependencies](#install-eosio-dependencies) first. +```sh +export EOSIO_BUILD_LOCATION=$EOSIO_LOCATION/build +mkdir -p $EOSIO_BUILD_LOCATION +cd $EOSIO_BUILD_LOCATION && $EOSIO_INSTALL_LOCATION/bin/cmake -DCMAKE_BUILD_TYPE='Release' -DCMAKE_CXX_COMPILER='clang++' -DCMAKE_C_COMPILER='clang' -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DBUILD_MONGO_DB_PLUGIN=true .. +cd $EOSIO_BUILD_LOCATION && make -j$(nproc) +``` + +## Install EOSIO +This command installs the EOSIO software on the specified OS. Make sure to [Build EOSIO](#build-eosio) first. +```sh +cd $EOSIO_BUILD_LOCATION && make install +``` + +## Test EOSIO +These commands validate the EOSIO software installation on the specified OS. This task is optional but recommended. Make sure to [Install EOSIO](#install-eosio) first. +```sh +$EOSIO_INSTALL_LOCATION/bin/mongod --fork --logpath $(pwd)/mongod.log --dbpath $(pwd)/mongodata +cd $EOSIO_BUILD_LOCATION && make test +``` + +## Uninstall EOSIO +These commands uninstall the EOSIO software from the specified OS. +```sh +xargs rm < $EOSIO_BUILD_LOCATION/install_manifest.txt +rm -rf $EOSIO_BUILD_LOCATION +``` diff --git a/docs/00_install/01_build-from-source/02_manual-build/03_platforms/centos-7.7-pinned.md b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/centos-7.7-pinned.md new file mode 100644 index 00000000000..ead772956c7 --- /dev/null +++ b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/centos-7.7-pinned.md @@ -0,0 +1,146 @@ +--- +content_title: Centos 7.7 (pinned) +--- + +This section contains shell commands to manually download, build, install, test, and uninstall EOSIO and dependencies on Centos 7.7. + +[[info | Building EOSIO is for Advanced Developers]] +| If you are new to EOSIO, it is recommended that you install the [EOSIO Prebuilt Binaries](../../../00_install-prebuilt-binaries.md) instead of building from source. + +Select a task below, then copy/paste the shell commands to a Unix terminal to execute: + +* [Download EOSIO Repository](#download-eosio-repository) +* [Install EOSIO Dependencies](#install-eosio-dependencies) +* [Build EOSIO](#build-eosio) +* [Install EOSIO](#install-eosio) +* [Test EOSIO](#test-eosio) +* [Uninstall EOSIO](#uninstall-eosio) + +[[info | Building EOSIO on another OS?]] +| Visit the [Build EOSIO from Source](../../index.md) section. + +## Download EOSIO Repository +These commands set the EOSIO directories, install git, and clone the EOSIO repository. +```sh +# set EOSIO directories +export EOSIO_LOCATION=~/eosio/eos +export EOSIO_INSTALL_LOCATION=$EOSIO_LOCATION/../install +mkdir -p $EOSIO_INSTALL_LOCATION +# install git +yum update -y && yum install -y git +# clone EOSIO repository +git clone https://github.com/EOSIO/eos.git $EOSIO_LOCATION +cd $EOSIO_LOCATION && git submodule update --init --recursive +``` + +## Install EOSIO Dependencies +These commands install the EOSIO software dependencies. Make sure to [Download the EOSIO Repository](#download-eosio-repository) first and set the EOSIO directories. +```sh +# install dependencies +yum update -y && \ + yum install -y epel-release && \ + yum --enablerepo=extras install -y centos-release-scl && \ + yum --enablerepo=extras install -y devtoolset-8 && \ + yum --enablerepo=extras install -y which autoconf automake libtool make bzip2 doxygen \ + graphviz bzip2-devel openssl-devel gmp-devel ocaml libicu-devel python python-devel \ + rh-python36 file libusbx-devel libcurl-devel patch vim-common jq +# build cmake +export PATH=$EOSIO_INSTALL_LOCATION/bin:$PATH +cd $EOSIO_INSTALL_LOCATION && curl -LO https://cmake.org/files/v3.13/cmake-3.13.2.tar.gz && \ + source /opt/rh/devtoolset-8/enable && \ + source /opt/rh/rh-python36/enable && \ + tar -xzf cmake-3.13.2.tar.gz && \ + cd cmake-3.13.2 && \ + ./bootstrap --prefix=$EOSIO_INSTALL_LOCATION && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/cmake-3.13.2.tar.gz $EOSIO_INSTALL_LOCATION/cmake-3.13.2 +# apply clang patch +cp -f $EOSIO_LOCATION/scripts/clang-devtoolset8-support.patch /tmp/clang-devtoolset8-support.patch +# build clang +cd $EOSIO_INSTALL_LOCATION && git clone --single-branch --branch release_80 https://git.llvm.org/git/llvm.git clang8 && cd clang8 && git checkout 18e41dc && \ + cd tools && git clone --single-branch --branch release_80 https://git.llvm.org/git/lld.git && cd lld && git checkout d60a035 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/polly.git && cd polly && git checkout 1bc06e5 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang.git clang && cd clang && git checkout a03da8b && \ + patch -p2 < /tmp/clang-devtoolset8-support.patch && \ + cd tools && mkdir extra && cd extra && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang-tools-extra.git && cd clang-tools-extra && git checkout 6b34834 && \ + cd $EOSIO_INSTALL_LOCATION/clang8/projects && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxx.git && cd libcxx && git checkout 1853712 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxxabi.git && cd libcxxabi && git checkout d7338a4 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libunwind.git && cd libunwind && git checkout 57f6739 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/compiler-rt.git && cd compiler-rt && git checkout 5bc7979 && \ + mkdir $EOSIO_INSTALL_LOCATION/clang8/build && cd $EOSIO_INSTALL_LOCATION/clang8/build && \ + source /opt/rh/devtoolset-8/enable && \ + source /opt/rh/rh-python36/enable && \ + cmake -G 'Unix Makefiles' -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=X86 -DCMAKE_BUILD_TYPE=Release .. && \ + make -j $(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/clang8 +# build llvm +cd $EOSIO_INSTALL_LOCATION && git clone --depth 1 --single-branch --branch release_80 https://github.com/llvm-mirror/llvm.git llvm && \ + cd llvm && \ + mkdir build && \ + cd build && \ + cmake -G 'Unix Makefiles' -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DCMAKE_TOOLCHAIN_FILE=$EOSIO_LOCATION/scripts/pinned_toolchain.cmake -DCMAKE_EXE_LINKER_FLAGS=-pthread -DCMAKE_SHARED_LINKER_FLAGS=-pthread -DLLVM_ENABLE_PIC=NO .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/llvm +# build boost +cd $EOSIO_INSTALL_LOCATION && curl -LO https://dl.bintray.com/boostorg/release/1.71.0/source/boost_1_71_0.tar.bz2 && \ + tar -xjf boost_1_71_0.tar.bz2 && \ + cd boost_1_71_0 && \ + ./bootstrap.sh --with-toolset=clang --prefix=$EOSIO_INSTALL_LOCATION && \ + ./b2 toolset=clang cxxflags='-stdlib=libc++ -D__STRICT_ANSI__ -nostdinc++ -I$EOSIO_INSTALL_LOCATION/include/c++/v1 -D_FORTIFY_SOURCE=2 -fstack-protector-strong -fpie' linkflags='-stdlib=libc++ -pie' link=static threading=multi --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test -q -j$(nproc) install && \ + rm -rf $EOSIO_INSTALL_LOCATION/boost_1_71_0.tar.bz2 $EOSIO_INSTALL_LOCATION/boost_1_71_0 +# build mongodb +cd $EOSIO_INSTALL_LOCATION && curl -LO https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-3.6.3.tgz && \ + tar -xzf mongodb-linux-x86_64-amazon-3.6.3.tgz && rm -f mongodb-linux-x86_64-amazon-3.6.3.tgz && \ + mv $EOSIO_INSTALL_LOCATION/mongodb-linux-x86_64-amazon-3.6.3/bin/* $EOSIO_INSTALL_LOCATION/bin/ && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongodb-linux-x86_64-amazon-3.6.3 +# build mongodb c driver +cd $EOSIO_INSTALL_LOCATION && curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/1.13.0/mongo-c-driver-1.13.0.tar.gz && \ + tar -xzf mongo-c-driver-1.13.0.tar.gz && cd mongo-c-driver-1.13.0 && \ + mkdir -p build && cd build && \ + cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SNAPPY=OFF -DCMAKE_TOOLCHAIN_FILE=$EOSIO_LOCATION/scripts/pinned_toolchain.cmake .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0 +# build mongodb cxx driver +cd $EOSIO_INSTALL_LOCATION && curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r3.4.0.tar.gz -o mongo-cxx-driver-r3.4.0.tar.gz && \ + tar -xzf mongo-cxx-driver-r3.4.0.tar.gz && cd mongo-cxx-driver-r3.4.0 && \ + sed -i 's/\"maxAwaitTimeMS\", count/\"maxAwaitTimeMS\", static_cast(count)/' src/mongocxx/options/change_stream.cpp && \ + sed -i 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt && \ + mkdir -p build && cd build && \ + cmake -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DCMAKE_TOOLCHAIN_FILE=$EOSIO_LOCATION/scripts/pinned_toolchain.cmake .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0 +``` + +## Build EOSIO +These commands build the EOSIO software on the specified OS. Make sure to [Install EOSIO Dependencies](#install-eosio-dependencies) first. +```sh +export EOSIO_BUILD_LOCATION=$EOSIO_LOCATION/build +mkdir -p $EOSIO_BUILD_LOCATION +cd $EOSIO_BUILD_LOCATION && cmake -DCMAKE_BUILD_TYPE='Release' -DCMAKE_TOOLCHAIN_FILE=$EOSIO_LOCATION/scripts/pinned_toolchain.cmake -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DBUILD_MONGO_DB_PLUGIN=true .. +cd $EOSIO_BUILD_LOCATION && make -j$(nproc) +``` + +## Install EOSIO +This command installs the EOSIO software on the specified OS. Make sure to [Build EOSIO](#build-eosio) first. +```sh +cd $EOSIO_BUILD_LOCATION && make install +``` + +## Test EOSIO +These commands validate the EOSIO software installation on the specified OS. This task is optional but recommended. Make sure to [Install EOSIO](#install-eosio) first. +```sh +$EOSIO_INSTALL_LOCATION/bin/mongod --fork --logpath $(pwd)/mongod.log --dbpath $(pwd)/mongodata +cd $EOSIO_BUILD_LOCATION && make test +``` + +## Uninstall EOSIO +These commands uninstall the EOSIO software from the specified OS. +```sh +xargs rm < $EOSIO_BUILD_LOCATION/install_manifest.txt +rm -rf $EOSIO_BUILD_LOCATION +``` diff --git a/docs/00_install/01_build-from-source/02_manual-build/03_platforms/centos-7.7-unpinned.md b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/centos-7.7-unpinned.md new file mode 100644 index 00000000000..d43af130696 --- /dev/null +++ b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/centos-7.7-unpinned.md @@ -0,0 +1,122 @@ +--- +content_title: Centos 7.7 (unpinned) +--- + +This section contains shell commands to manually download, build, install, test, and uninstall EOSIO and dependencies on Centos 7.7. + +[[info | Building EOSIO is for Advanced Developers]] +| If you are new to EOSIO, it is recommended that you install the [EOSIO Prebuilt Binaries](../../../00_install-prebuilt-binaries.md) instead of building from source. + +Select a task below, then copy/paste the shell commands to a Unix terminal to execute: + +* [Download EOSIO Repository](#download-eosio-repository) +* [Install EOSIO Dependencies](#install-eosio-dependencies) +* [Build EOSIO](#build-eosio) +* [Install EOSIO](#install-eosio) +* [Test EOSIO](#test-eosio) +* [Uninstall EOSIO](#uninstall-eosio) + +[[info | Building EOSIO on another OS?]] +| Visit the [Build EOSIO from Source](../../index.md) section. + +## Download EOSIO Repository +These commands set the EOSIO directories, install git, and clone the EOSIO repository. +```sh +# set EOSIO directories +export EOSIO_LOCATION=~/eosio/eos +export EOSIO_INSTALL_LOCATION=$EOSIO_LOCATION/../install +mkdir -p $EOSIO_INSTALL_LOCATION +# install git +yum update -y && yum install -y git +# clone EOSIO repository +git clone https://github.com/EOSIO/eos.git $EOSIO_LOCATION +cd $EOSIO_LOCATION && git submodule update --init --recursive +``` + +## Install EOSIO Dependencies +These commands install the EOSIO software dependencies. Make sure to [Download the EOSIO Repository](#download-eosio-repository) first and set the EOSIO directories. +```sh +# install dependencies +yum update -y && \ + yum install -y epel-release && \ + yum --enablerepo=extras install -y centos-release-scl && \ + yum --enablerepo=extras install -y devtoolset-8 && \ + yum --enablerepo=extras install -y which git autoconf automake libtool make bzip2 doxygen \ + graphviz bzip2-devel openssl-devel gmp-devel ocaml libicu-devel \ + python python-devel rh-python36 file libusbx-devel \ + libcurl-devel patch vim-common jq llvm-toolset-7.0-llvm-devel llvm-toolset-7.0-llvm-static +# build cmake +export PATH=$EOSIO_INSTALL_LOCATION/bin:$PATH +cd $EOSIO_INSTALL_LOCATION && curl -LO https://cmake.org/files/v3.13/cmake-3.13.2.tar.gz && \ + source /opt/rh/devtoolset-8/enable && \ + tar -xzf cmake-3.13.2.tar.gz && \ + cd cmake-3.13.2 && \ + ./bootstrap --prefix=$EOSIO_INSTALL_LOCATION && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/cmake-3.13.2.tar.gz $EOSIO_INSTALL_LOCATION/cmake-3.13.2 +# apply clang patch +cp -f $EOSIO_LOCATION/scripts/clang-devtoolset8-support.patch /tmp/clang-devtoolset8-support.patch +# build boost +cd $EOSIO_INSTALL_LOCATION && curl -LO https://dl.bintray.com/boostorg/release/1.71.0/source/boost_1_71_0.tar.bz2 && \ + source /opt/rh/devtoolset-8/enable && \ + tar -xjf boost_1_71_0.tar.bz2 && \ + cd boost_1_71_0 && \ + ./bootstrap.sh --prefix=$EOSIO_INSTALL_LOCATION && \ + ./b2 --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test -q -j$(nproc) install && \ + rm -rf $EOSIO_INSTALL_LOCATION/boost_1_71_0.tar.bz2 $EOSIO_INSTALL_LOCATION/boost_1_71_0 +# build mongodb +cd $EOSIO_INSTALL_LOCATION && curl -LO https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-3.6.3.tgz && \ + tar -xzf mongodb-linux-x86_64-amazon-3.6.3.tgz && rm -f mongodb-linux-x86_64-amazon-3.6.3.tgz && \ + mv $EOSIO_INSTALL_LOCATION/mongodb-linux-x86_64-amazon-3.6.3/bin/* $EOSIO_INSTALL_LOCATION/bin/ && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongodb-linux-x86_64-amazon-3.6.3 +# build mongodb c driver +cd $EOSIO_INSTALL_LOCATION && curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/1.13.0/mongo-c-driver-1.13.0.tar.gz && \ + source /opt/rh/devtoolset-8/enable && \ + tar -xzf mongo-c-driver-1.13.0.tar.gz && cd mongo-c-driver-1.13.0 && \ + mkdir -p build && cd build && \ + cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SNAPPY=OFF .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0 +# build mongodb cxx driver +cd $EOSIO_INSTALL_LOCATION && curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r3.4.0.tar.gz -o mongo-cxx-driver-r3.4.0.tar.gz && \ + source /opt/rh/devtoolset-8/enable && \ + tar -xzf mongo-cxx-driver-r3.4.0.tar.gz && cd mongo-cxx-driver-r3.4.0 && \ + sed -i 's/\"maxAwaitTimeMS\", count/\"maxAwaitTimeMS\", static_cast(count)/' src/mongocxx/options/change_stream.cpp && \ + sed -i 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt && \ + mkdir -p build && cd build && \ + cmake -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0 +``` + +## Build EOSIO +These commands build the EOSIO software on the specified OS. Make sure to [Install EOSIO Dependencies](#install-eosio-dependencies) first. +```sh +export EOSIO_BUILD_LOCATION=$EOSIO_LOCATION/build +mkdir -p $EOSIO_BUILD_LOCATION +cd $EOSIO_BUILD_LOCATION && source /opt/rh/devtoolset-8/enable && cmake -DCMAKE_BUILD_TYPE='Release' -DLLVM_DIR='/opt/rh/llvm-toolset-7.0/root/usr/lib64/cmake/llvm' -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DBUILD_MONGO_DB_PLUGIN=true .. +cd $EOSIO_BUILD_LOCATION && make -j$(nproc) +``` + +## Install EOSIO +This command installs the EOSIO software on the specified OS. Make sure to [Build EOSIO](#build-eosio) first. +```sh +cd $EOSIO_BUILD_LOCATION && make install +``` + +## Test EOSIO +These commands validate the EOSIO software installation on the specified OS. This task is optional but recommended. Make sure to [Install EOSIO](#install-eosio) first. +```sh +$EOSIO_INSTALL_LOCATION/bin/mongod --fork --logpath $(pwd)/mongod.log --dbpath $(pwd)/mongodata +cd $EOSIO_BUILD_LOCATION && source /opt/rh/rh-python36/enable && make test +``` + +## Uninstall EOSIO +These commands uninstall the EOSIO software from the specified OS. +```sh +xargs rm < $EOSIO_BUILD_LOCATION/install_manifest.txt +rm -rf $EOSIO_BUILD_LOCATION +``` diff --git a/docs/00_install/01_build-from-source/02_manual-build/03_platforms/index.md b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/index.md new file mode 100644 index 00000000000..2a8b2680830 --- /dev/null +++ b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/index.md @@ -0,0 +1,13 @@ +--- +content_title: Platforms +--- + +* [Amazon Linux 2 (pinned)](amazon_linux-2-pinned.md) +* [Amazon Linux 2 (unpinned)](amazon_linux-2-unpinned.md) +* [CentOS 7.7 (pinned)](centos-7.7-pinned.md) +* [CentOS 7.7 (unpinned)](centos-7.7-unpinned.md) +* [MacOS 10.14 (pinned)](macos-10.14-pinned.md) +* [MacOS 10.14 (unpinned)](macos-10.14-unpinned.md) +* [Ubuntu 16.04 (pinned)](ubuntu-16.04-pinned.md) +* [Ubuntu 18.04 (pinned)](ubuntu-18.04-pinned.md) +* [Ubuntu 18.04 (unpinned)](ubuntu-18.04-unpinned.md) diff --git a/docs/00_install/01_build-from-source/02_manual-build/03_platforms/macos-10.14-pinned.md b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/macos-10.14-pinned.md new file mode 100644 index 00000000000..edd27859c0b --- /dev/null +++ b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/macos-10.14-pinned.md @@ -0,0 +1,113 @@ +--- +content_title: MacOS 10.14 (pinned compiler) +--- + +This section contains shell commands to manually download, build, install, test, and uninstall EOSIO and dependencies on MacOS 10.14. + +[[info | Building EOSIO is for Advanced Developers]] +| If you are new to EOSIO, it is recommended that you install the [EOSIO Prebuilt Binaries](../../../00_install-prebuilt-binaries.md) instead of building from source. + +Select a task below, then copy/paste the shell commands to a Unix terminal to execute: + +* [Download EOSIO Repository](#download-eosio-repository) +* [Install EOSIO Dependencies](#install-eosio-dependencies) +* [Build EOSIO](#build-eosio) +* [Install EOSIO](#install-eosio) +* [Test EOSIO](#test-eosio) +* [Uninstall EOSIO](#uninstall-eosio) + +[[info | Building EOSIO on another OS?]] +| Visit the [Build EOSIO from Source](../../index.md) section. + +## Download EOSIO Repository +These commands set the EOSIO directories, install git, and clone the EOSIO repository. +```sh +# set EOSIO directories +export EOSIO_LOCATION=~/eosio/eos +export EOSIO_INSTALL_LOCATION=$EOSIO_LOCATION/../install +mkdir -p $EOSIO_INSTALL_LOCATION +# install git +brew update && brew install git +# clone EOSIO repository +git clone https://github.com/EOSIO/eos.git $EOSIO_LOCATION +cd $EOSIO_LOCATION && git submodule update --init --recursive +``` + +## Install EOSIO Dependencies +These commands install the EOSIO software dependencies. Make sure to [Download the EOSIO Repository](#download-eosio-repository) first and set the EOSIO directories. +```sh +# install dependencies +brew install cmake python@2 python libtool libusb graphviz automake wget gmp pkgconfig doxygen openssl@1.1 jq || : +# Boost Fix: eosio/install/bin/../include/c++/v1/stdlib.h:94:15: fatal error: 'stdlib.h' file not found +SDKROOT="$(xcrun --sdk macosx --show-sdk-path)" +# build clang +export PATH=$EOSIO_INSTALL_LOCATION/bin:$PATH +cd $EOSIO_INSTALL_LOCATION && git clone --single-branch --branch release_80 https://git.llvm.org/git/llvm.git clang8 && cd clang8 && git checkout 18e41dc && \ + cd tools && git clone --single-branch --branch release_80 https://git.llvm.org/git/lld.git && cd lld && git checkout d60a035 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/polly.git && cd polly && git checkout 1bc06e5 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang.git clang && cd clang && git checkout a03da8b && \ + cd tools && mkdir extra && cd extra && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang-tools-extra.git && cd clang-tools-extra && git checkout 6b34834 && \ + mkdir -p $EOSIO_INSTALL_LOCATION/clang8/projects && cd $EOSIO_INSTALL_LOCATION/clang8/projects && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxx.git && cd libcxx && git checkout 1853712 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxxabi.git && cd libcxxabi && git checkout d7338a4 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libunwind.git && cd libunwind && git checkout 57f6739 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/compiler-rt.git && cd compiler-rt && git checkout 5bc7979 && \ + mkdir $EOSIO_INSTALL_LOCATION/clang8/build && cd $EOSIO_INSTALL_LOCATION/clang8/build && \ + cmake -G 'Unix Makefiles' -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=X86 -DCMAKE_BUILD_TYPE=Release .. && \ + make -j $(getconf _NPROCESSORS_ONLN) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/clang8 +cd $EOSIO_INSTALL_LOCATION && curl -LO https://dl.bintray.com/boostorg/release/1.71.0/source/boost_1_71_0.tar.bz2 && \ + tar -xjf boost_1_71_0.tar.bz2 && cd boost_1_71_0 && \ + SDKROOT="$SDKROOT" ./bootstrap.sh --prefix=$EOSIO_INSTALL_LOCATION && \ + SDKROOT="$SDKROOT" ./b2 --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test -q -j$(getconf _NPROCESSORS_ONLN) install && \ + rm -rf $EOSIO_INSTALL_LOCATION/boost_1_71_0.tar.bz2 $EOSIO_INSTALL_LOCATION/boost_1_71_0 +# install mongodb +cd $EOSIO_INSTALL_LOCATION && curl -OL https://fastdl.mongodb.org/osx/mongodb-osx-ssl-x86_64-3.6.3.tgz + tar -xzf mongodb-osx-ssl-x86_64-3.6.3.tgz && rm -f mongodb-osx-ssl-x86_64-3.6.3.tgz && \ + mv $EOSIO_INSTALL_LOCATION/mongodb-osx-x86_64-3.6.3/bin/* $EOSIO_INSTALL_LOCATION/bin/ && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongodb-osx-x86_64-3.6.3 +# install mongo-c-driver from source +cd $EOSIO_INSTALL_LOCATION && curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/1.13.0/mongo-c-driver-1.13.0.tar.gz && \ + tar -xzf mongo-c-driver-1.13.0.tar.gz && cd mongo-c-driver-1.13.0 && \ + mkdir -p cmake-build && cd cmake-build && \ + cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DENABLE_BSON=ON -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SASL=OFF -DENABLE_SNAPPY=OFF .. && \ + make -j $(getconf _NPROCESSORS_ONLN) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0 +# install mongo-cxx-driver from source +cd $EOSIO_INSTALL_LOCATION && curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r3.4.0.tar.gz -o mongo-cxx-driver-r3.4.0.tar.gz && \ + tar -xzf mongo-cxx-driver-r3.4.0.tar.gz && cd mongo-cxx-driver-r3.4.0/build && \ + cmake -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION .. && \ + make -j $(getconf _NPROCESSORS_ONLN) VERBOSE=1 && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0 +``` + +## Build EOSIO +These commands build the EOSIO software on the specified OS. Make sure to [Install EOSIO Dependencies](#install-eosio-dependencies) first. +```sh +export EOSIO_BUILD_LOCATION=$EOSIO_LOCATION/build +mkdir -p $EOSIO_BUILD_LOCATION +cd $EOSIO_BUILD_LOCATION && cmake -DCMAKE_BUILD_TYPE='Release' -DCMAKE_TOOLCHAIN_FILE=$EOSIO_LOCATION/scripts/pinned_toolchain.cmake -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DBUILD_MONGO_DB_PLUGIN=true .. +cd $EOSIO_BUILD_LOCATION && make -j$(getconf _NPROCESSORS_ONLN) +``` + +## Install EOSIO +This command installs the EOSIO software on the specified OS. Make sure to [Build EOSIO](#build-eosio) first. +```sh +cd $EOSIO_BUILD_LOCATION && make install +``` + +## Test EOSIO +These commands validate the EOSIO software installation on the specified OS. This task is optional but recommended. Make sure to [Install EOSIO](#install-eosio) first. +```sh +$EOSIO_INSTALL_LOCATION/bin/mongod --fork --logpath $(pwd)/mongod.log --dbpath $(pwd)/mongodata +cd $EOSIO_BUILD_LOCATION && make test +``` + +## Uninstall EOSIO +These commands uninstall the EOSIO software from the specified OS. +```sh +xargs rm < $EOSIO_BUILD_LOCATION/install_manifest.txt +rm -rf $EOSIO_BUILD_LOCATION +``` diff --git a/docs/00_install/01_build-from-source/02_manual-build/03_platforms/macos-10.14-unpinned.md b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/macos-10.14-unpinned.md new file mode 100644 index 00000000000..e3596fb21e4 --- /dev/null +++ b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/macos-10.14-unpinned.md @@ -0,0 +1,92 @@ +--- +content_title: MacOS 10.14 (native compiler) +--- + +This section contains shell commands to manually download, build, install, test, and uninstall EOSIO and dependencies on MacOS 10.14. + +[[info | Building EOSIO is for Advanced Developers]] +| If you are new to EOSIO, it is recommended that you install the [EOSIO Prebuilt Binaries](../../../00_install-prebuilt-binaries.md) instead of building from source. + +Select a task below, then copy/paste the shell commands to a Unix terminal to execute: + +* [Download EOSIO Repository](#download-eosio-repository) +* [Install EOSIO Dependencies](#install-eosio-dependencies) +* [Build EOSIO](#build-eosio) +* [Install EOSIO](#install-eosio) +* [Test EOSIO](#test-eosio) +* [Uninstall EOSIO](#uninstall-eosio) + +[[info | Building EOSIO on another OS?]] +| Visit the [Build EOSIO from Source](../../index.md) section. + +## Download EOSIO Repository +These commands set the EOSIO directories, install git, and clone the EOSIO repository. +```sh +# set EOSIO directories +export EOSIO_LOCATION=~/eosio/eos +export EOSIO_INSTALL_LOCATION=$EOSIO_LOCATION/../install +mkdir -p $EOSIO_INSTALL_LOCATION +# install git +brew update && brew install git +# clone EOSIO repository +git clone https://github.com/EOSIO/eos.git $EOSIO_LOCATION +cd $EOSIO_LOCATION && git submodule update --init --recursive +``` + +## Install Dependencies +These commands install the EOSIO software dependencies. Make sure to [Download the EOSIO Repository](#download-eosio-repository) first and set the EOSIO directories. +```sh +# install dependencies +brew install cmake python@2 python libtool libusb graphviz automake wget gmp pkgconfig doxygen openssl@1.1 jq boost || : +export PATH=$EOSIO_INSTALL_LOCATION/bin:$PATH +# install mongodb +mkdir -p $EOSIO_INSTALL_LOCATION/bin +cd $EOSIO_INSTALL_LOCATION && curl -OL https://fastdl.mongodb.org/osx/mongodb-osx-ssl-x86_64-3.6.3.tgz + tar -xzf mongodb-osx-ssl-x86_64-3.6.3.tgz && rm -f mongodb-osx-ssl-x86_64-3.6.3.tgz && \ + mv $EOSIO_INSTALL_LOCATION/mongodb-osx-x86_64-3.6.3/bin/* $EOSIO_INSTALL_LOCATION/bin/ && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongodb-osx-x86_64-3.6.3 && rm -rf $EOSIO_INSTALL_LOCATION/mongodb-osx-ssl-x86_64-3.6.3.tgz +# install mongo-c-driver from source +cd $EOSIO_INSTALL_LOCATION && curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/1.13.0/mongo-c-driver-1.13.0.tar.gz && \ + tar -xzf mongo-c-driver-1.13.0.tar.gz && cd mongo-c-driver-1.13.0 && \ + mkdir -p cmake-build && cd cmake-build && \ + cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DENABLE_BSON=ON -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SASL=OFF -DENABLE_SNAPPY=OFF .. && \ + make -j $(getconf _NPROCESSORS_ONLN) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0 +# install mongo-cxx-driver from source +cd $EOSIO_INSTALL_LOCATION && curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r3.4.0.tar.gz -o mongo-cxx-driver-r3.4.0.tar.gz && \ + tar -xzf mongo-cxx-driver-r3.4.0.tar.gz && cd mongo-cxx-driver-r3.4.0/build && \ + cmake -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION .. && \ + make -j $(getconf _NPROCESSORS_ONLN) VERBOSE=1 && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0 +``` + +## Build EOSIO +These commands build the EOSIO software on the specified OS. Make sure to [Install EOSIO Dependencies](#install-eosio-dependencies) first. +```sh +export EOSIO_BUILD_LOCATION=$EOSIO_LOCATION/build +mkdir -p $EOSIO_BUILD_LOCATION +cd $EOSIO_BUILD_LOCATION && cmake -DCMAKE_BUILD_TYPE='Release' -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DBUILD_MONGO_DB_PLUGIN=true .. +cd $EOSIO_BUILD_LOCATION && make -j$(getconf _NPROCESSORS_ONLN) +``` + +## Install EOSIO +This command installs the EOSIO software on the specified OS. Make sure to [Build EOSIO](#build-eosio) first. +```sh +cd $EOSIO_BUILD_LOCATION && make install +``` + +## Test EOSIO +These commands validate the EOSIO software installation on the specified OS. This task is optional but recommended. Make sure to [Install EOSIO](#install-eosio) first. +```sh +$EOSIO_INSTALL_LOCATION/bin/mongod --fork --logpath $(pwd)/mongod.log --dbpath $(pwd)/mongodata +cd $EOSIO_BUILD_LOCATION && make test +``` + +## Uninstall EOSIO +These commands uninstall the EOSIO software from the specified OS. +```sh +xargs rm < $EOSIO_BUILD_LOCATION/install_manifest.txt +rm -rf $EOSIO_BUILD_LOCATION +``` diff --git a/docs/00_install/01_build-from-source/02_manual-build/03_platforms/ubuntu-16.04-pinned.md b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/ubuntu-16.04-pinned.md new file mode 100644 index 00000000000..d127b13bdcd --- /dev/null +++ b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/ubuntu-16.04-pinned.md @@ -0,0 +1,135 @@ +--- +content_title: Ubuntu 16.04 (pinned) +--- + +This section contains shell commands to manually download, build, install, test, and uninstall EOSIO and dependencies on Ubuntu 16.04. + +[[info | Building EOSIO is for Advanced Developers]] +| If you are new to EOSIO, it is recommended that you install the [EOSIO Prebuilt Binaries](../../../00_install-prebuilt-binaries.md) instead of building from source. + +Select a task below, then copy/paste the shell commands to a Unix terminal to execute: + +* [Download EOSIO Repository](#download-eosio-repository) +* [Install EOSIO Dependencies](#install-eosio-dependencies) +* [Build EOSIO](#build-eosio) +* [Install EOSIO](#install-eosio) +* [Test EOSIO](#test-eosio) +* [Uninstall EOSIO](#uninstall-eosio) + +[[info | Building EOSIO on another OS?]] +| Visit the [Build EOSIO from Source](../../index.md) section. + +## Download EOSIO Repository +These commands set the EOSIO directories, install git, and clone the EOSIO repository. +```sh +# set EOSIO directories +export EOSIO_LOCATION=~/eosio/eos +export EOSIO_INSTALL_LOCATION=$EOSIO_LOCATION/../install +mkdir -p $EOSIO_INSTALL_LOCATION +# install git +apt-get update && apt-get upgrade -y && DEBIAN_FRONTEND=noninteractive apt-get install -y git +# clone EOSIO repository +git clone https://github.com/EOSIO/eos.git $EOSIO_LOCATION +cd $EOSIO_LOCATION && git submodule update --init --recursive +``` + +## Install EOSIO Dependencies +These commands install the EOSIO software dependencies. Make sure to [Download the EOSIO Repository](#download-eosio-repository) first and set the EOSIO directories. +```sh +# install dependencies +apt-get install -y build-essential automake \ + libbz2-dev libssl-dev doxygen graphviz libgmp3-dev autotools-dev libicu-dev python2.7 python2.7-dev \ + python3 python3-dev autoconf libtool curl zlib1g-dev sudo ruby libusb-1.0-0-dev libcurl4-gnutls-dev \ + pkg-config apt-transport-https vim-common jq +# build cmake +export PATH=$EOSIO_INSTALL_LOCATION/bin:$PATH +cd $EOSIO_INSTALL_LOCATION && curl -LO https://cmake.org/files/v3.13/cmake-3.13.2.tar.gz && \ + tar -xzf cmake-3.13.2.tar.gz && \ + cd cmake-3.13.2 && \ + ./bootstrap --prefix=$EOSIO_INSTALL_LOCATION && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/cmake-3.13.2.tar.gz $EOSIO_INSTALL_LOCATION/cmake-3.13.2 +# build clang +cd $EOSIO_INSTALL_LOCATION && git clone --single-branch --branch release_80 https://git.llvm.org/git/llvm.git clang8 && cd clang8 && git checkout 18e41dc && \ + cd tools && git clone --single-branch --branch release_80 https://git.llvm.org/git/lld.git && cd lld && git checkout d60a035 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/polly.git && cd polly && git checkout 1bc06e5 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang.git clang && cd clang && git checkout a03da8b && \ + cd tools && mkdir extra && cd extra && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang-tools-extra.git && cd clang-tools-extra && git checkout 6b34834 && \ + cd $EOSIO_INSTALL_LOCATION/clang8/projects && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxx.git && cd libcxx && git checkout 1853712 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxxabi.git && cd libcxxabi && git checkout d7338a4 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libunwind.git && cd libunwind && git checkout 57f6739 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/compiler-rt.git && cd compiler-rt && git checkout 5bc7979 && \ + mkdir $EOSIO_INSTALL_LOCATION/clang8/build && cd $EOSIO_INSTALL_LOCATION/clang8/build && \ + cmake -G 'Unix Makefiles' -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=X86 -DCMAKE_BUILD_TYPE=Release .. && \ + make -j $(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/clang8 +# build llvm +cd $EOSIO_INSTALL_LOCATION && git clone --depth 1 --single-branch --branch release_80 https://github.com/llvm-mirror/llvm.git llvm && \ + cd llvm && \ + mkdir build && cd build && \ + cmake -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DCMAKE_TOOLCHAIN_FILE=$EOSIO_LOCATION/scripts/pinned_toolchain.cmake -DCMAKE_EXE_LINKER_FLAGS=-pthread -DCMAKE_SHARED_LINKER_FLAGS=-pthread -DLLVM_ENABLE_PIC=NO .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/llvm +# build boost +cd $EOSIO_INSTALL_LOCATION && curl -LO https://dl.bintray.com/boostorg/release/1.71.0/source/boost_1_71_0.tar.bz2 && \ + tar -xjf boost_1_71_0.tar.bz2 && \ + cd boost_1_71_0 && \ + ./bootstrap.sh --with-toolset=clang --prefix=$EOSIO_INSTALL_LOCATION && \ + ./b2 toolset=clang cxxflags='-stdlib=libc++ -D__STRICT_ANSI__ -nostdinc++ -I$EOSIO_INSTALL_LOCATION/include/c++/v1 -D_FORTIFY_SOURCE=2 -fstack-protector-strong -fpie' linkflags='-stdlib=libc++ -pie' link=static threading=multi --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test -q -j$(nproc) install && \ + rm -rf $EOSIO_INSTALL_LOCATION/boost_1_71_0.tar.bz2 $EOSIO_INSTALL_LOCATION/boost_1_71_0 +# build mongodb +cd $EOSIO_INSTALL_LOCATION && curl -LO https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu1604-3.6.3.tgz && \ + tar -xzf mongodb-linux-x86_64-ubuntu1604-3.6.3.tgz && rm -f mongodb-linux-x86_64-ubuntu1604-3.6.3.tgz && \ + mv $EOSIO_INSTALL_LOCATION/mongodb-linux-x86_64-ubuntu1604-3.6.3/bin/* $EOSIO_INSTALL_LOCATION/bin/ && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongodb-linux-x86_64-ubuntu1604-3.6.3 +# build mongodb c driver +cd $EOSIO_INSTALL_LOCATION && curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/1.13.0/mongo-c-driver-1.13.0.tar.gz && \ + tar -xzf mongo-c-driver-1.13.0.tar.gz && cd mongo-c-driver-1.13.0 && \ + mkdir -p build && cd build && \ + cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SNAPPY=OFF -DCMAKE_TOOLCHAIN_FILE=$EOSIO_LOCATION/scripts/pinned_toolchain.cmake .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0 +# build mongodb cxx driver +cd $EOSIO_INSTALL_LOCATION && curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r3.4.0.tar.gz -o mongo-cxx-driver-r3.4.0.tar.gz && \ + tar -xzf mongo-cxx-driver-r3.4.0.tar.gz && cd mongo-cxx-driver-r3.4.0 && \ + sed -i 's/\"maxAwaitTimeMS\", count/\"maxAwaitTimeMS\", static_cast(count)/' src/mongocxx/options/change_stream.cpp && \ + sed -i 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt && \ + mkdir -p build && cd build && \ + cmake -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DCMAKE_TOOLCHAIN_FILE=$EOSIO_LOCATION/scripts/pinned_toolchain.cmake .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0 +``` + +## Build EOSIO +These commands build the EOSIO software on the specified OS. Make sure to [Install EOSIO Dependencies](#install-eosio-dependencies) first. +```sh +export EOSIO_BUILD_LOCATION=$EOSIO_LOCATION/build +mkdir -p $EOSIO_BUILD_LOCATION +cd $EOSIO_BUILD_LOCATION && cmake -DCMAKE_BUILD_TYPE='Release' -DCMAKE_TOOLCHAIN_FILE=$EOSIO_LOCATION/scripts/pinned_toolchain.cmake -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DBUILD_MONGO_DB_PLUGIN=true .. +cd $EOSIO_BUILD_LOCATION && make -j$(nproc) +``` + +## Install EOSIO +This command installs the EOSIO software on the specified OS. Make sure to [Build EOSIO](#build-eosio) first. +```sh +cd $EOSIO_BUILD_LOCATION && make install +``` + +## Test EOSIO +These commands validate the EOSIO software installation on the specified OS. This task is optional but recommended. Make sure to [Install EOSIO](#install-eosio) first. +```sh +$EOSIO_INSTALL_LOCATION/bin/mongod --fork --logpath $(pwd)/mongod.log --dbpath $(pwd)/mongodata +cd $EOSIO_BUILD_LOCATION && make test +``` + +## Uninstall EOSIO +These commands uninstall the EOSIO software from the specified OS. +```sh +xargs rm < $EOSIO_BUILD_LOCATION/install_manifest.txt +rm -rf $EOSIO_BUILD_LOCATION +``` diff --git a/docs/00_install/01_build-from-source/02_manual-build/03_platforms/ubuntu-18.04-pinned.md b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/ubuntu-18.04-pinned.md new file mode 100644 index 00000000000..81e7952c931 --- /dev/null +++ b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/ubuntu-18.04-pinned.md @@ -0,0 +1,135 @@ +--- +content_title: Ubuntu 18.04 (pinned) +--- + +This section contains shell commands to manually download, build, install, test, and uninstall EOSIO and dependencies on Ubuntu 18.04. + +[[info | Building EOSIO is for Advanced Developers]] +| If you are new to EOSIO, it is recommended that you install the [EOSIO Prebuilt Binaries](../../../00_install-prebuilt-binaries.md) instead of building from source. + +Select a task below, then copy/paste the shell commands to a Unix terminal to execute: + +* [Download EOSIO Repository](#download-eosio-repository) +* [Install EOSIO Dependencies](#install-eosio-dependencies) +* [Build EOSIO](#build-eosio) +* [Install EOSIO](#install-eosio) +* [Test EOSIO](#test-eosio) +* [Uninstall EOSIO](#uninstall-eosio) + +[[info | Building EOSIO on another OS?]] +| Visit the [Build EOSIO from Source](../../index.md) section. + +## Download EOSIO Repository +These commands set the EOSIO directories, install git, and clone the EOSIO repository. +```sh +# set EOSIO directories +export EOSIO_LOCATION=~/eosio/eos +export EOSIO_INSTALL_LOCATION=$EOSIO_LOCATION/../install +mkdir -p $EOSIO_INSTALL_LOCATION +# install git +apt-get update && apt-get upgrade -y && DEBIAN_FRONTEND=noninteractive apt-get install -y git +# clone EOSIO repository +git clone https://github.com/EOSIO/eos.git $EOSIO_LOCATION +cd $EOSIO_LOCATION && git submodule update --init --recursive +``` + +## Install EOSIO Dependencies +These commands install the EOSIO software dependencies. Make sure to [Download the EOSIO Repository](#download-eosio-repository) first and set the EOSIO directories. +```sh +# install dependencies +apt-get install -y make bzip2 automake libbz2-dev libssl-dev doxygen graphviz libgmp3-dev autotools-dev libicu-dev \ + python2.7 python2.7-dev python3 python3-dev autoconf libtool g++ gcc curl zlib1g-dev sudo ruby libusb-1.0-0-dev \ + libcurl4-gnutls-dev pkg-config patch vim-common jq +# build cmake +export PATH=$EOSIO_INSTALL_LOCATION/bin:$PATH +cd $EOSIO_INSTALL_LOCATION && curl -LO https://cmake.org/files/v3.13/cmake-3.13.2.tar.gz && \ + tar -xzf cmake-3.13.2.tar.gz && \ + cd cmake-3.13.2 && \ + ./bootstrap --prefix=$EOSIO_INSTALL_LOCATION && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/cmake-3.13.2.tar.gz $EOSIO_INSTALL_LOCATION/cmake-3.13.2 +# build clang +cd $EOSIO_INSTALL_LOCATION && git clone --single-branch --branch release_80 https://git.llvm.org/git/llvm.git clang8 && cd clang8 && git checkout 18e41dc && \ + cd tools && git clone --single-branch --branch release_80 https://git.llvm.org/git/lld.git && cd lld && git checkout d60a035 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/polly.git && cd polly && git checkout 1bc06e5 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang.git clang && cd clang && git checkout a03da8b && \ + cd tools && mkdir extra && cd extra && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang-tools-extra.git && cd clang-tools-extra && git checkout 6b34834 && \ + cd $EOSIO_INSTALL_LOCATION/clang8/projects && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxx.git && cd libcxx && git checkout 1853712 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxxabi.git && cd libcxxabi && git checkout d7338a4 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libunwind.git && cd libunwind && git checkout 57f6739 && \ + cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/compiler-rt.git && cd compiler-rt && git checkout 5bc7979 && \ + mkdir $EOSIO_INSTALL_LOCATION/clang8/build && cd $EOSIO_INSTALL_LOCATION/clang8/build && \ + cmake -G 'Unix Makefiles' -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=X86 -DCMAKE_BUILD_TYPE=Release .. && \ + make -j $(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/clang8 +# build llvm +cd $EOSIO_INSTALL_LOCATION && git clone --depth 1 --single-branch --branch release_80 https://github.com/llvm-mirror/llvm.git llvm && \ + cd llvm && \ + mkdir build && \ + cd build && \ + cmake -G 'Unix Makefiles' -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DCMAKE_TOOLCHAIN_FILE=$EOSIO_LOCATION/scripts/pinned_toolchain.cmake -DCMAKE_EXE_LINKER_FLAGS=-pthread -DCMAKE_SHARED_LINKER_FLAGS=-pthread -DLLVM_ENABLE_PIC=NO .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/llvm +# build boost +cd $EOSIO_INSTALL_LOCATION && curl -LO https://dl.bintray.com/boostorg/release/1.71.0/source/boost_1_71_0.tar.bz2 && \ + tar -xjf boost_1_71_0.tar.bz2 && \ + cd boost_1_71_0 && \ + ./bootstrap.sh --with-toolset=clang --prefix=$EOSIO_INSTALL_LOCATION && \ + ./b2 toolset=clang cxxflags='-stdlib=libc++ -D__STRICT_ANSI__ -nostdinc++ -I$EOSIO_INSTALL_LOCATION/include/c++/v1 -D_FORTIFY_SOURCE=2 -fstack-protector-strong -fpie' linkflags='-stdlib=libc++ -pie' link=static threading=multi --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test -q -j$(nproc) install && \ + rm -rf $EOSIO_INSTALL_LOCATION/boost_1_71_0.tar.bz2 $EOSIO_INSTALL_LOCATION/boost_1_71_0 +# build mongodb +cd $EOSIO_INSTALL_LOCATION && curl -LO https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu1804-4.1.1.tgz && \ + tar -xzf mongodb-linux-x86_64-ubuntu1804-4.1.1.tgz && rm -f mongodb-linux-x86_64-ubuntu1804-4.1.1.tgz && \ + mv $EOSIO_INSTALL_LOCATION/mongodb-linux-x86_64-ubuntu1804-4.1.1/bin/* $EOSIO_INSTALL_LOCATION/bin/ && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongodb-linux-x86_64-ubuntu1804-4.1.1 +# build mongodb c driver +cd $EOSIO_INSTALL_LOCATION && curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/1.13.0/mongo-c-driver-1.13.0.tar.gz && \ + tar -xzf mongo-c-driver-1.13.0.tar.gz && cd mongo-c-driver-1.13.0 && \ + mkdir -p build && cd build && \ + cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SNAPPY=OFF -DCMAKE_TOOLCHAIN_FILE=$EOSIO_LOCATION/scripts/pinned_toolchain.cmake .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0 +# build mongodb cxx driver +cd $EOSIO_INSTALL_LOCATION && curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r3.4.0.tar.gz -o mongo-cxx-driver-r3.4.0.tar.gz && \ + tar -xzf mongo-cxx-driver-r3.4.0.tar.gz && cd mongo-cxx-driver-r3.4.0 && \ + sed -i 's/\"maxAwaitTimeMS\", count/\"maxAwaitTimeMS\", static_cast(count)/' src/mongocxx/options/change_stream.cpp && \ + sed -i 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt && \ + mkdir -p build && cd build && \ + cmake -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DCMAKE_TOOLCHAIN_FILE=$EOSIO_LOCATION/scripts/pinned_toolchain.cmake .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0 +``` + +## Build EOSIO +These commands build the EOSIO software on the specified OS. Make sure to [Install EOSIO Dependencies](#install-eosio-dependencies) first. +```sh +export EOSIO_BUILD_LOCATION=$EOSIO_LOCATION/build +mkdir -p $EOSIO_BUILD_LOCATION +cd $EOSIO_BUILD_LOCATION && cmake -DCMAKE_BUILD_TYPE='Release' -DCMAKE_TOOLCHAIN_FILE=$EOSIO_LOCATION/scripts/pinned_toolchain.cmake -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DBUILD_MONGO_DB_PLUGIN=true .. +cd $EOSIO_BUILD_LOCATION && make -j$(nproc) +``` + +## Install EOSIO +This command installs the EOSIO software on the specified OS. Make sure to [Build EOSIO](#build-eosio) first. +```sh +cd $EOSIO_BUILD_LOCATION && make install +``` + +## Test EOSIO +These commands validate the EOSIO software installation on the specified OS. This task is optional but recommended. Make sure to [Install EOSIO](#install-eosio) first. +```sh +$EOSIO_INSTALL_LOCATION/bin/mongod --fork --logpath $(pwd)/mongod.log --dbpath $(pwd)/mongodata +cd $EOSIO_BUILD_LOCATION && make test +``` + +## Uninstall EOSIO +These commands uninstall the EOSIO software from the specified OS. +```sh +xargs rm < $EOSIO_BUILD_LOCATION/install_manifest.txt +rm -rf $EOSIO_BUILD_LOCATION +``` diff --git a/docs/00_install/01_build-from-source/02_manual-build/03_platforms/ubuntu-18.04-unpinned.md b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/ubuntu-18.04-unpinned.md new file mode 100644 index 00000000000..6def97a519a --- /dev/null +++ b/docs/00_install/01_build-from-source/02_manual-build/03_platforms/ubuntu-18.04-unpinned.md @@ -0,0 +1,112 @@ +--- +content_title: Ubuntu 18.04 (unpinned) +--- + +This section contains shell commands to manually download, build, install, test, and uninstall EOSIO and dependencies on Ubuntu 18.04. + +[[info | Building EOSIO is for Advanced Developers]] +| If you are new to EOSIO, it is recommended that you install the [EOSIO Prebuilt Binaries](../../../00_install-prebuilt-binaries.md) instead of building from source. + +Select a task below, then copy/paste the shell commands to a Unix terminal to execute: + +* [Download EOSIO Repository](#download-eosio-repository) +* [Install EOSIO Dependencies](#install-eosio-dependencies) +* [Build EOSIO](#build-eosio) +* [Install EOSIO](#install-eosio) +* [Test EOSIO](#test-eosio) +* [Uninstall EOSIO](#uninstall-eosio) + +[[info | Building EOSIO on another OS?]] +| Visit the [Build EOSIO from Source](../../index.md) section. + +## Download EOSIO Repository +These commands set the EOSIO directories, install git, and clone the EOSIO repository. +```sh +# set EOSIO directories +export EOSIO_LOCATION=~/eosio/eos +export EOSIO_INSTALL_LOCATION=$EOSIO_LOCATION/../install +mkdir -p $EOSIO_INSTALL_LOCATION +# install git +apt-get update && apt-get upgrade -y && DEBIAN_FRONTEND=noninteractive apt-get install -y git +# clone EOSIO repository +git clone https://github.com/EOSIO/eos.git $EOSIO_LOCATION +cd $EOSIO_LOCATION && git submodule update --init --recursive +``` + +## Install EOSIO Dependencies +These commands install the EOSIO software dependencies. Make sure to [Download the EOSIO Repository](#download-eosio-repository) first and set the EOSIO directories. +```sh +# install dependencies +apt-get install -y make bzip2 automake libbz2-dev libssl-dev doxygen graphviz libgmp3-dev \ + autotools-dev libicu-dev python2.7 python2.7-dev python3 python3-dev \ + autoconf libtool curl zlib1g-dev sudo ruby libusb-1.0-0-dev \ + libcurl4-gnutls-dev pkg-config patch llvm-7-dev clang-7 vim-common jq +# build cmake +export PATH=$EOSIO_INSTALL_LOCATION/bin:$PATH +cd $EOSIO_INSTALL_LOCATION && curl -LO https://cmake.org/files/v3.13/cmake-3.13.2.tar.gz && \ + tar -xzf cmake-3.13.2.tar.gz && \ + cd cmake-3.13.2 && \ + ./bootstrap --prefix=$EOSIO_INSTALL_LOCATION && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/cmake-3.13.2.tar.gz $EOSIO_INSTALL_LOCATION/cmake-3.13.2 +# build boost +cd $EOSIO_INSTALL_LOCATION && curl -LO https://dl.bintray.com/boostorg/release/1.71.0/source/boost_1_71_0.tar.bz2 && \ + tar -xjf boost_1_71_0.tar.bz2 && \ + cd boost_1_71_0 && \ + ./bootstrap.sh --prefix=$EOSIO_INSTALL_LOCATION && \ + ./b2 --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test -q -j$(nproc) install && \ + rm -rf $EOSIO_INSTALL_LOCATION/boost_1_71_0.tar.bz2 $EOSIO_INSTALL_LOCATION/boost_1_71_0 +# build mongodb +cd $EOSIO_INSTALL_LOCATION && curl -LO https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu1804-4.1.1.tgz && \ + tar -xzf mongodb-linux-x86_64-ubuntu1804-4.1.1.tgz && rm -f mongodb-linux-x86_64-ubuntu1804-4.1.1.tgz && \ + mv $EOSIO_INSTALL_LOCATION/mongodb-linux-x86_64-ubuntu1804-4.1.1/bin/* $EOSIO_INSTALL_LOCATION/bin/ && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongodb-linux-x86_64-ubuntu1804-4.1.1 +# build mongodb c driver +cd $EOSIO_INSTALL_LOCATION && curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/1.13.0/mongo-c-driver-1.13.0.tar.gz && \ + tar -xzf mongo-c-driver-1.13.0.tar.gz && cd mongo-c-driver-1.13.0 && \ + mkdir -p build && cd build && \ + cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SNAPPY=OFF .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-c-driver-1.13.0 +# build mongodb cxx driver +cd $EOSIO_INSTALL_LOCATION && curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r3.4.0.tar.gz -o mongo-cxx-driver-r3.4.0.tar.gz && \ + tar -xzf mongo-cxx-driver-r3.4.0.tar.gz && cd mongo-cxx-driver-r3.4.0 && \ + sed -i 's/\"maxAwaitTimeMS\", count/\"maxAwaitTimeMS\", static_cast(count)/' src/mongocxx/options/change_stream.cpp && \ + sed -i 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt && \ + mkdir -p build && cd build && \ + cmake -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION .. && \ + make -j$(nproc) && \ + make install && \ + rm -rf $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0.tar.gz $EOSIO_INSTALL_LOCATION/mongo-cxx-driver-r3.4.0 +``` + +## Build EOSIO +These commands build the EOSIO software on the specified OS. Make sure to [Install EOSIO Dependencies](#install-eosio-dependencies) first. +```sh +export EOSIO_BUILD_LOCATION=$EOSIO_LOCATION/build +mkdir -p $EOSIO_BUILD_LOCATION +cd $EOSIO_BUILD_LOCATION && cmake -DCMAKE_BUILD_TYPE='Release' -DCMAKE_CXX_COMPILER='clang++-7' -DCMAKE_C_COMPILER='clang-7' -DLLVM_DIR='/usr/lib/llvm-7/lib/cmake/llvm' -DCMAKE_INSTALL_PREFIX=$EOSIO_INSTALL_LOCATION -DBUILD_MONGO_DB_PLUGIN=true .. +cd $EOSIO_BUILD_LOCATION && make -j$(nproc) +``` + +## Install EOSIO +This command installs the EOSIO software on the specified OS. Make sure to [Build EOSIO](#build-eosio) first. +```sh +cd $EOSIO_BUILD_LOCATION && make install +``` + +## Test EOSIO +These commands validate the EOSIO software installation on the specified OS. Make sure to [Install EOSIO](#install-eosio) first. (**Note**: This task is optional but recommended.) +```sh +$EOSIO_INSTALL_LOCATION/bin/mongod --fork --logpath $(pwd)/mongod.log --dbpath $(pwd)/mongodata +cd $EOSIO_BUILD_LOCATION && make test +``` + +## Uninstall EOSIO +These commands uninstall the EOSIO software from the specified OS. +```sh +xargs rm < $EOSIO_BUILD_LOCATION/install_manifest.txt +rm -rf $EOSIO_BUILD_LOCATION +``` diff --git a/docs/00_install/01_build-from-source/02_manual-build/index.md b/docs/00_install/01_build-from-source/02_manual-build/index.md new file mode 100644 index 00000000000..0852795c3fe --- /dev/null +++ b/docs/00_install/01_build-from-source/02_manual-build/index.md @@ -0,0 +1,28 @@ +--- +content_title: EOSIO Manual Build +--- + +[[info | Manual Builds are for Advanced Developers]] +| These manual instructions are intended for advanced developers. The [Shell Scripts](../01_shell-scripts/index.md) should be the preferred method to build EOSIO from source. If the script fails or your platform is not supported, continue with the instructions below. + +## EOSIO Dependencies + +When performing a manual build, it is necessary to install specific software packages that the EOSIO software depends on. To learn more about these dependencies, visit the [EOSIO Software Dependencies](00_eosio-dependencies.md) section. + +## Platforms + +Shell commands are available to manually download, build, install, test, and uninstall the EOSIO software and dependencies for these [platforms](03_platforms/index.md). + +## Out-of-source Builds + +While building dependencies and EOSIO binaries, out-of-source builds are also supported. Refer to the `cmake` help for more information. + +## Other Compilers + +To override `clang`'s default compiler toolchain, add these flags to the `cmake` command within the above instructions: + +`-DCMAKE_CXX_COMPILER=/path/to/c++ -DCMAKE_C_COMPILER=/path/to/cc` + +## Debug Builds + +For a debug build, add `-DCMAKE_BUILD_TYPE=Debug`. Other common build types include `Release` and `RelWithDebInfo`. diff --git a/docs/00_install/01_build-from-source/index.md b/docs/00_install/01_build-from-source/index.md new file mode 100644 index 00000000000..03f3db858f0 --- /dev/null +++ b/docs/00_install/01_build-from-source/index.md @@ -0,0 +1,14 @@ +--- +content_title: Build EOSIO from Source +--- + +[[info | Building EOSIO is for Advanced Developers]] +| If you are new to EOSIO, it is recommended that you install the [EOSIO Prebuilt Binaries](../00_install-prebuilt-binaries.md) instead of building from source. + +EOSIO can be built on several platforms using different build methods. Advanced users may opt to build EOSIO using our shell scripts. Node operators or block producers who wish to deploy a public node, may prefer our manual build instructions. + +* [Shell Scripts](01_shell-scripts/index.md) - Suitable for the majority of developers, these scripts build on Mac OS and many flavors of Linux. +* [Manual Build](02_manual-build/index.md) - Suitable for those platforms that may be hostile to the shell scripts or for operators who need more control over their builds. + +[[info | EOSIO Installation Recommended]] +| After building EOSIO successfully, it is highly recommended to install the EOSIO binaries from their default build directory. This copies the EOSIO binaries to a central location, such as `/usr/local/bin`, or `~/eosio/x.y/bin`, where `x.y` is the EOSIO release version. diff --git a/docs/00_install/index.md b/docs/00_install/index.md new file mode 100644 index 00000000000..3153b2547c6 --- /dev/null +++ b/docs/00_install/index.md @@ -0,0 +1,24 @@ +--- +content_title: EOSIO Software Installation +--- + +There are various ways to install and use the EOSIO software: + +* [Install EOSIO Prebuilt Binaries](00_install-prebuilt-binaries.md) +* [Build EOSIO from Source](01_build-from-source/index.md) + +[[info]] +| If you are new to EOSIO, it is recommended that you install the [EOSIO Prebuilt Binaries](00_install-prebuilt-binaries.md), then proceed to the [Getting Started](https://developers.eos.io/eosio-home/docs/) section of the [EOSIO Developer Portal](https://developers.eos.io/). If you are an advanced developer, a block producer, or no binaries are available for your platform, you may need to [Build EOSIO from source](01_build-from-source/index.md) instead. + +## Supported Operating Systems + +EOSIO currently supports the following operating systems: + +1. Amazon Linux 2 +2. CentOS 7 +3. Ubuntu 16.04 +4. Ubuntu 18.04 +5. MacOS 10.14 (Mojave) + +[[info | Note]] +| It may be possible to install EOSIO on other Unix-based operating systems. This is not officially supported, though. diff --git a/docs/01_nodeos/02_usage/00_nodeos-options.md b/docs/01_nodeos/02_usage/00_nodeos-options.md new file mode 100644 index 00000000000..d81d133ee36 --- /dev/null +++ b/docs/01_nodeos/02_usage/00_nodeos-options.md @@ -0,0 +1,451 @@ +--- +content_title: Nodeos Options +--- + +`Nodeos` is a command line interface (CLI) application. As such, it can be started manually from the command line or through an automated script. The behavior of `nodeos` is determined mainly by which plugins are loaded and which plugin options are used. The `nodeos` application features two main option categories: *nodeos-specific* options and *plugin-specific* options. + +## Nodeos-specific Options + +Nodeos-specific options are used mainly for housekeeping purposes, such as setting the directory where the blockchain data resides, specifying the name of the `nodeos` configuraton file, setting the name and path of the logging configuration file, etc. A sample output from running `$ nodeos --help` is displayed below, showing the nodeos-specific options (Note: the plugin-specific options have been excluded for clarity): + +```console +Application Config Options: + --plugin arg Plugin(s) to enable, may be specified + multiple times + +Application Command Line Options: + -h [ --help ] Print this help message and exit. + -v [ --version ] Print version information. + --full-version Print full version information. + --print-default-config Print default configuration template + -d [ --data-dir ] arg Directory containing program runtime + data + --config-dir arg Directory containing configuration + files such as config.ini + -c [ --config ] arg (=config.ini) Configuration file name relative to + config-dir + -l [ --logconf ] arg (=logging.json) Logging configuration file name/path + for library users +``` + +## Plugin-specific Options + +Plugin-specific options control the behavior of the nodeos plugins. Every plugin-specific option has a unique name, so it can be specified in any order within the command line or `config.ini` file. When specifying one or more plugin-specific option(s), the applicable plugin(s) must also be enabled using the `--plugin` option or else the corresponding option(s) will be ignored. A sample output from running `$ nodeos --help` is displayed below, showing an excerpt from the plugin-specific options: + +```console +Config Options for eosio::chain_plugin: + --blocks-dir arg (="blocks") the location of the blocks directory + (absolute path or relative to + application data dir) + --protocol-features-dir arg (="protocol_features") + the location of the protocol_features + directory (absolute path or relative to + application config dir) + --checkpoint arg Pairs of [BLOCK_NUM,BLOCK_ID] that + should be enforced as checkpoints. + --wasm-runtime runtime Override default WASM runtime + --abi-serializer-max-time-ms arg (=15000) + Override default maximum ABI + serialization time allowed in ms + --chain-state-db-size-mb arg (=1024) Maximum size (in MiB) of the chain + state database + --chain-state-db-guard-size-mb arg (=128) + Safely shut down node when free space + remaining in the chain state database + drops below this size (in MiB). + --reversible-blocks-db-size-mb arg (=340) + Maximum size (in MiB) of the reversible + blocks database + --reversible-blocks-db-guard-size-mb arg (=2) + Safely shut down node when free space + remaining in the reverseible blocks + database drops below this size (in + MiB). + --signature-cpu-billable-pct arg (=50) + Percentage of actual signature recovery + cpu to bill. Whole number percentages, + e.g. 50 for 50% + --chain-threads arg (=2) Number of worker threads in controller + thread pool + --contracts-console print contract's output to console + --actor-whitelist arg Account added to actor whitelist (may + specify multiple times) + --actor-blacklist arg Account added to actor blacklist (may + specify multiple times) + --contract-whitelist arg Contract account added to contract + whitelist (may specify multiple times) + --contract-blacklist arg Contract account added to contract + blacklist (may specify multiple times) + --action-blacklist arg Action (in the form code::action) added + to action blacklist (may specify + multiple times) + --key-blacklist arg Public key added to blacklist of keys + that should not be included in + authorities (may specify multiple + times) + --sender-bypass-whiteblacklist arg Deferred transactions sent by accounts + in this list do not have any of the + subjective whitelist/blacklist checks + applied to them (may specify multiple + times) + --read-mode arg (=speculative) Database read mode ("speculative", + "head", "read-only", "irreversible"). + In "speculative" mode database contains + changes done up to the head block plus + changes made by transactions not yet + included to the blockchain. + In "head" mode database contains + changes done up to the current head + block. + In "read-only" mode database contains + changes done up to the current head + block and transactions cannot be pushed + to the chain API. + In "irreversible" mode database + contains changes done up to the last + irreversible block and transactions + cannot be pushed to the chain API. + + --validation-mode arg (=full) Chain validation mode ("full" or + "light"). + In "full" mode all incoming blocks will + be fully validated. + In "light" mode all incoming blocks + headers will be fully validated; + transactions in those validated blocks + will be trusted + + --disable-ram-billing-notify-checks Disable the check which subjectively + fails a transaction if a contract bills + more RAM to another account within the + context of a notification handler (i.e. + when the receiver is not the code of + the action). + --maximum-variable-signature-length arg (=16384) + Subjectively limit the maximum length + of variable components in a variable + legnth signature to this size in bytes + --trusted-producer arg Indicate a producer whose blocks + headers signed by it will be fully + validated, but transactions in those + validated blocks will be trusted. + --database-map-mode arg (=mapped) Database map mode ("mapped", "heap", or + "locked"). + In "mapped" mode database is memory + mapped as a file. + In "heap" mode database is preloaded in + to swappable memory. + In "locked" mode database is preloaded + and locked in to memory. + + +Command Line Options for eosio::chain_plugin: + --genesis-json arg File to read Genesis State from + --genesis-timestamp arg override the initial timestamp in the + Genesis State file + --print-genesis-json extract genesis_state from blocks.log + as JSON, print to console, and exit + --extract-genesis-json arg extract genesis_state from blocks.log + as JSON, write into specified file, and + exit + --print-build-info print build environment information to + console as JSON and exit + --extract-build-info arg extract build environment information + as JSON, write into specified file, and + exit + --fix-reversible-blocks recovers reversible block database if + that database is in a bad state + --force-all-checks do not skip any checks that can be + skipped while replaying irreversible + blocks + --disable-replay-opts disable optimizations that specifically + target replay + --replay-blockchain clear chain state database and replay + all blocks + --hard-replay-blockchain clear chain state database, recover as + many blocks as possible from the block + log, and then replay those blocks + --delete-all-blocks clear chain state database and block + log + --truncate-at-block arg (=0) stop hard replay / block log recovery + at this block number (if set to + non-zero number) + --import-reversible-blocks arg replace reversible block database with + blocks imported from specified file and + then exit + --export-reversible-blocks arg export reversible block database in + portable format into specified file and + then exit + --snapshot arg File to read Snapshot State from + +Config Options for eosio::history_plugin: + -f [ --filter-on ] arg Track actions which match + receiver:action:actor. Actor may be + blank to include all. Action and Actor + both blank allows all from Recieiver. + Receiver may not be blank. + -F [ --filter-out ] arg Do not track actions which match + receiver:action:actor. Action and Actor + both blank excludes all from Reciever. + Actor blank excludes all from + reciever:action. Receiver may not be + blank. + +Config Options for eosio::http_client_plugin: + --https-client-root-cert arg PEM encoded trusted root certificate + (or path to file containing one) used + to validate any TLS connections made. + (may specify multiple times) + + --https-client-validate-peers arg (=1) + true: validate that the peer + certificates are valid and trusted, + false: ignore cert errors + +Config Options for eosio::http_plugin: + --unix-socket-path arg The filename (relative to data-dir) to + create a unix socket for HTTP RPC; set + blank to disable. + --http-server-address arg (=127.0.0.1:8888) + The local IP and port to listen for + incoming http connections; set blank to + disable. + --https-server-address arg The local IP and port to listen for + incoming https connections; leave blank + to disable. + --https-certificate-chain-file arg Filename with the certificate chain to + present on https connections. PEM + format. Required for https. + --https-private-key-file arg Filename with https private key in PEM + format. Required for https + --https-ecdh-curve arg (=secp384r1) Configure https ECDH curve to use: + secp384r1 or prime256v1 + --access-control-allow-origin arg Specify the Access-Control-Allow-Origin + to be returned on each request. + --access-control-allow-headers arg Specify the Access-Control-Allow-Header + s to be returned on each request. + --access-control-max-age arg Specify the Access-Control-Max-Age to + be returned on each request. + --access-control-allow-credentials Specify if Access-Control-Allow-Credent + ials: true should be returned on each + request. + --max-body-size arg (=1048576) The maximum body size in bytes allowed + for incoming RPC requests + --http-max-bytes-in-flight-mb arg (=500) + Maximum size in megabytes http_plugin + should use for processing http + requests. 503 error response when + exceeded. + --verbose-http-errors Append the error log to HTTP responses + --http-validate-host arg (=1) If set to false, then any incoming + "Host" header is considered valid + --http-alias arg Additionaly acceptable values for the + "Host" header of incoming HTTP + requests, can be specified multiple + times. Includes http/s_server_address + by default. + --http-threads arg (=2) Number of worker threads in http thread + pool + +Config Options for eosio::login_plugin: + --max-login-requests arg (=1000000) The maximum number of pending login + requests + --max-login-timeout arg (=60) The maximum timeout for pending login + requests (in seconds) + +Config Options for eosio::net_plugin: + --p2p-listen-endpoint arg (=0.0.0.0:9876) + The actual host:port used to listen for + incoming p2p connections. + --p2p-server-address arg An externally accessible host:port for + identifying this node. Defaults to + p2p-listen-endpoint. + --p2p-peer-address arg The public endpoint of a peer node to + connect to. Use multiple + p2p-peer-address options as needed to + compose a network. + Syntax: host:port[:|] + The optional 'trx' and 'blk' + indicates to node that only + transactions 'trx' or blocks 'blk' + should be sent. Examples: + p2p.eos.io:9876 + p2p.trx.eos.io:9876:trx + p2p.blk.eos.io:9876:blk + + --p2p-max-nodes-per-host arg (=1) Maximum number of client nodes from any + single IP address + --agent-name arg (="EOS Test Agent") The name supplied to identify this node + amongst the peers. + --allowed-connection arg (=any) Can be 'any' or 'producers' or + 'specified' or 'none'. If 'specified', + peer-key must be specified at least + once. If only 'producers', peer-key is + not required. 'producers' and + 'specified' may be combined. + --peer-key arg Optional public key of peer allowed to + connect. May be used multiple times. + --peer-private-key arg Tuple of [PublicKey, WIF private key] + (may specify multiple times) + --max-clients arg (=25) Maximum number of clients from which + connections are accepted, use 0 for no + limit + --connection-cleanup-period arg (=30) number of seconds to wait before + cleaning up dead connections + --max-cleanup-time-msec arg (=10) max connection cleanup time per cleanup + call in millisec + --net-threads arg (=2) Number of worker threads in net_plugin + thread pool + --sync-fetch-span arg (=100) number of blocks to retrieve in a chunk + from any individual peer during + synchronization + --use-socket-read-watermark arg (=0) Enable expirimental socket read + watermark optimization + --peer-log-format arg (=["${_name}" ${_ip}:${_port}]) + The string used to format peers when + logging messages about them. Variables + are escaped with ${}. + Available Variables: + _name self-reported name + + _id self-reported ID (64 hex + characters) + + _sid first 8 characters of + _peer.id + + _ip remote IP address of peer + + _port remote port number of peer + + _lip local IP address connected to + peer + + _lport local port number connected + to peer + + + +Config Options for eosio::producer_plugin: + + -e [ --enable-stale-production ] Enable block production, even if the + chain is stale. + -x [ --pause-on-startup ] Start this node in a state where + production is paused + --max-transaction-time arg (=30) Limits the maximum time (in + milliseconds) that is allowed a pushed + transaction's code to execute before + being considered invalid + --max-irreversible-block-age arg (=-1) + Limits the maximum age (in seconds) of + the DPOS Irreversible Block for a chain + this node will produce blocks on (use + negative value to indicate unlimited) + -p [ --producer-name ] arg ID of producer controlled by this node + (e.g. inita; may specify multiple + times) + --private-key arg (DEPRECATED - Use signature-provider + instead) Tuple of [public key, WIF + private key] (may specify multiple + times) + --signature-provider arg (=EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV=KEY:5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3) + Key=Value pairs in the form + = + Where: + is a string form of + a vaild EOSIO public + key + + is a string in the + form + : + + is KEY, or KEOSD + + KEY: is a string form of + a valid EOSIO + private key which + maps to the provided + public key + + KEOSD: is the URL where + keosd is available + and the approptiate + wallet(s) are + unlocked + --keosd-provider-timeout arg (=5) Limits the maximum time (in + milliseconds) that is allowed for + sending blocks to a keosd provider for + signing + --greylist-account arg account that can not access to extended + CPU/NET virtual resources + --greylist-limit arg (=1000) Limit (between 1 and 1000) on the + multiple that CPU/NET virtual resources + can extend during low usage (only + enforced subjectively; use 1000 to not + enforce any limit) + --produce-time-offset-us arg (=0) offset of non last block producing time + in microseconds. Negative number + results in blocks to go out sooner, and + positive number results in blocks to go + out later + --last-block-time-offset-us arg (=0) offset of last block producing time in + microseconds. Negative number results + in blocks to go out sooner, and + positive number results in blocks to go + out later + --max-scheduled-transaction-time-per-block-ms arg (=100) + Maximum wall-clock time, in + milliseconds, spent retiring scheduled + transactions in any block before + returning to normal transaction + processing. + --subjective-cpu-leeway-us arg (=31000) + Time in microseconds allowed for a + transaction that starts with + insufficient CPU quota to complete and + cover its CPU usage. + --incoming-defer-ratio arg (=1) ratio between incoming transations and + deferred transactions when both are + exhausted + --incoming-transaction-queue-size-mb arg (=1024) + Maximum size (in MiB) of the incoming + transaction queue. Exceeding this value + will subjectively drop transaction with + resource exhaustion. + --producer-threads arg (=2) Number of worker threads in producer + thread pool + --snapshots-dir arg (="snapshots") the location of the snapshots directory + (absolute path or relative to + application data dir) + +Config Options for eosio::state_history_plugin: + --state-history-dir arg (="state-history") + the location of the state-history + directory (absolute path or relative to + application data dir) + --trace-history enable trace history + --chain-state-history enable chain state history + --state-history-endpoint arg (=127.0.0.1:8080) + the endpoint upon which to listen for + incoming connections. Caution: only + expose this port to your internal + network. + --trace-history-debug-mode enable debug mode for trace history + +Command Line Options for eosio::state_history_plugin: + --delete-state-history clear state history files + +Config Options for eosio::txn_test_gen_plugin: + --txn-reference-block-lag arg (=0) Lag in number of blocks from the head + block when selecting the reference + block for transactions (-1 means Last + Irreversible Block) + --txn-test-gen-threads arg (=2) Number of worker threads in + txn_test_gen thread pool + --txn-test-gen-account-prefix arg (=txn.test.) + Prefix to use for accounts generated + and used by this plugin +``` + +For more information on each plugin-specific option, just visit the [Plugins](../03_plugins/index.md) section. diff --git a/docs/01_nodeos/02_usage/01_nodeos-configuration.md b/docs/01_nodeos/02_usage/01_nodeos-configuration.md new file mode 100644 index 00000000000..227dddab442 --- /dev/null +++ b/docs/01_nodeos/02_usage/01_nodeos-configuration.md @@ -0,0 +1,63 @@ +--- +content_title: Nodeos Configuration +--- + +The plugin-specific options can be configured using either CLI options or a configuration file, `config.ini`. Nodeos-specific options can only be configured from the command line. All CLI options and `config.ini` options can be found by running `$ nodeos --help` as shown above. + +Each `config.ini` option has a corresponding CLI option. However, not all CLI options are available in `config.ini`. For instance, most plugin-specific options that perform actions are not available in `config.ini`, such as `--delete-state-history` from `state_history_plugin`. + +For example, the CLI option `--plugin eosio::chain_api_plugin` can also be set by adding `plugin = eosio::chain_api_plugin` in `config.ini`. + +## `config.ini` location + +The default `config.ini` can be found in the following folders: +- Mac OS: `~/Library/Application Support/eosio/nodeos/config` +- Linux: `~/.local/share/eosio/nodeos/config` + +A custom `config.ini` file can be set by passing the `nodeos` option `--config path/to/config.ini`. + +## Nodeos Example + +The example below shows a typical usage of `nodeos` when starting a block producing node: + +```sh +$ nodeos --replay-blockchain \ + -e -p eosio \ + --plugin eosio::producer_plugin \ + --plugin eosio::chain_api_plugin \ + --plugin eosio::http_plugin \ + >> nodeos.log 2>&1 & +``` + +```sh +$ nodeos \ + -e -p eosio \ + --data-dir /users/mydir/eosio/data \ + --config-dir /users/mydir/eosio/config \ + --plugin eosio::producer_plugin \ + --plugin eosio::chain_plugin \ + --plugin eosio::http_plugin \ + --plugin eosio::state_history_plugin \ + --contracts-console \ + --disable-replay-opts \ + --access-control-allow-origin='*' \ + --http-validate-host=false \ + --verbose-http-errors \ + --state-history-dir /shpdata \ + --trace-history \ + --chain-state-history \ + >> nodeos.log 2>&1 & +``` + +The above `nodeos` command starts a producing node by: + +* enabling block production (`-e`) +* identifying itself as block producer "eosio" (`-p`) +* setting the blockchain data directory (`--data-dir`) +* setting the `config.ini` directory (`--config-dir`) +* loading plugins `producer_plugin`, `chain_plugin`, `http_plugin`, `state_history_plugin` (`--plugin`) +* passing `chain_plugin` options (`--contracts-console`, `--disable-replay-opts`) +* passing `http-plugin` options (`--access-control-allow-origin`, `--http-validate-host`, `--verbose-http-errors`) +* passing `state_history` options (`--state-history-dir`, `--trace-history`, `--chain-state-history`) +* redirecting both `stdout` and `stderr` to the `nodeos.log` file +* returning to the shell by running in the background (&) diff --git a/docs/01_nodeos/02_usage/02_node-setups/00_producing-node.md b/docs/01_nodeos/02_usage/02_node-setups/00_producing-node.md new file mode 100644 index 00000000000..583a301c927 --- /dev/null +++ b/docs/01_nodeos/02_usage/02_node-setups/00_producing-node.md @@ -0,0 +1,97 @@ +--- +content_title: Producing Node Setup +--- + +[[info | System contracts required]] +| These instructions assume you want to launch a producing node on a network with **system contracts loaded**. These instructions will not work on a default development node using native functionality, or one without system contracts loaded. + +## Goal + +This section describes how to set up a producing node within the EOSIO network. A producing node, as its name implies, is a node that is configured to produce blocks in an `EOSIO` based blockchain. This functionality if provided through the `producer_plugin` as well as other [Nodeos Plugins](../../03_plugins/index.md). + +## Before you begin + +* [Install the EOSIO software](../../../00_install/index.md) before starting this section. +* It is assumed that `nodeos`, `cleos`, and `keosd` are accessible through the path. If you built EOSIO using shell scripts, make sure to run the [Install Script](../../../00_install/01_build-from-source/01_shell-scripts/03_install-eosio-binaries.md). +* Know how to pass [Nodeos options](../../02_usage/00_nodeos-options.md) to enable or disable functionality. + +## Steps + +Please follow the steps below to set up a producing node: + +1. [Register your account as a producer](#1-register-your-account-as-a-producer) +2. [Set Producer Name](#2-set-producer-name) +3. [Set the Producer's signature-provider](#3-set-the-producers-signature-provider) +4. [Define a peers list](#4-define-a-peers-list) +5. [Load the Required Plugins](#5-load-the-required-plugins) + +### 1. Register your account as a producer + +In order for your account to be eligible as a producer, you will need to register the account as a producer: + +```sh +$ cleos system regproducer accountname1 EOS1234534... http://producer.site Antarctica +``` + +### 2. Set Producer Name + +Set the `producer-name` option in `config.ini` to your account, as follows: + +```console +# config.ini: + +# ID of producer controlled by this node (e.g. inita; may specify multiple times) (eosio::producer_plugin) +producer-name = youraccount +``` + +### 3. Set the Producer's signature-provider + +You will need to set the private key for your producer. The public key should have an authority for the producer account defined above. + +`signature-provider` is defined with a 3-field tuple: +* `public-key` - A valid EOSIO public key in form of a string. +* `provider-spec` - It's a string formatted like : +* `provider-type` - KEY or KEOSD + +#### Using a Key: + +```console +# config.ini: + +signature-provider = PUBLIC_SIGNING_KEY=KEY:PRIVATE_SIGNING_KEY + +//Example +//signature-provider = EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV=KEY:5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3 +``` + +#### Using Keosd: +You can also use Keosd instead of hard-defining keys. + +```console +# config.ini: + +signature-provider = KEOSD: + +//Example +//EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV=KEOSD:https://127.0.0.1:88888 +``` + +### 4. Define a peers list + +```console +# config.ini: + +# Default p2p port is 9876 +p2p-peer-address = 123.255.78.9:9876 +``` + +### 5. Load the Required Plugins + +In your [config.ini](../index.md), confirm the following plugins are loading or append them if necessary. + +```console +# config.ini: + +plugin = eosio::chain_plugin +plugin = eosio::producer_plugin +``` diff --git a/docs/01_nodeos/02_usage/02_node-setups/01_non-producing-node.md b/docs/01_nodeos/02_usage/02_node-setups/01_non-producing-node.md new file mode 100644 index 00000000000..329507c3685 --- /dev/null +++ b/docs/01_nodeos/02_usage/02_node-setups/01_non-producing-node.md @@ -0,0 +1,40 @@ +--- +content_title: Non-producing Node Setup +--- + +## Goal + +This section describes how to set up a non-producing node within the EOSIO network. A non-producing node is a node that is not configured to produce blocks, instead it is connected and synchronized with other peers from an `EOSIO` based blockchain, exposing one or more services publicly or privately by enabling one or more [Nodeos Plugins](../../03_plugins/index.md), except the `producer_plugin`. + +## Before you begin + +* [Install the EOSIO software](../../../00_install/index.md) before starting this section. +* It is assumed that `nodeos`, `cleos`, and `keosd` are accessible through the path. If you built EOSIO using shell scripts, make sure to run the [Install Script](../../../00_install/01_build-from-source/01_shell-scripts/03_install-eosio-binaries.md). +* Know how to pass [Nodeos options](../../02_usage/00_nodeos-options.md) to enable or disable functionality. + +## Steps + +To setup a non-producing node is simple. + +1. [Set Peers](#1-set-peers) +2. [Enable one or more available plugins](#2-enable-one-or-more-available-plugins) + +### 1. Set Peers + +You need to set some peers in your config ini, for example: + +```console +# config.ini: + +p2p-peer-address = 106.10.42.238:9876 +``` + +Or you can include the peer in as a boot flag when running `nodeos`, as follows: + +```sh +$ nodeos ... --p2p-peer-address=106.10.42.238:9876 +``` + +### 2. Enable one or more available plugins + +Each available plugin is listed and detailed in the [Nodeos Plugins](../../03_plugins/index.md) section. When `nodeos` starts, it will expose the functionality provided by the enabled plugins it was started with. For example, if you start `nodeos` with [`state_history_plugin`](../../03_plugins/state_history_plugin/index.md) enabled, you will have a non-producing node that offers full blockchain history. If you start `nodeos` with [`http_plugin`](../../03_plugins/http_plugin/index.md) enabled, you will have a non-producing node which exposes the EOSIO RPC API. Therefore, you can extend the basic functionality provided by a non-producing node by enabling any number of existing plugins on top of it. Another aspect to consider is that some plugins have dependencies to other plugins. Therefore, you need to satisfy all dependencies for a plugin in order to enable it. diff --git a/docs/01_nodeos/02_usage/02_node-setups/index.md b/docs/01_nodeos/02_usage/02_node-setups/index.md new file mode 100644 index 00000000000..ebad19d97a4 --- /dev/null +++ b/docs/01_nodeos/02_usage/02_node-setups/index.md @@ -0,0 +1,12 @@ +--- +content_title: Nodeos Common Setups +--- + +`Nodeos` generally runs in two modes: + + * [Producing Node](00_producing-node.md) + * [Non-Producing Node](01_non-producing-node.md) + +`Producing Nodes` are configured for block production. They connect to the peer-to-peer network and actively produce new blocks. Loose transactions are also validated and relayed. On mainnet, `Producing Nodes` only produce blocks if their assigned block producer is part of an active schedule. + +`Non-Producing Nodes` connect to the peer-to-peer network but do not actively produce new blocks; they are useful for acting as proxy nodes, relaying API calls, validating transactions, broadcasting information to other nodes, etc. `Non-Producing Nodes` are also useful for monitoring the blockchain state. diff --git a/docs/01_nodeos/02_usage/03_development-environment/00_local-single-node-testnet.md b/docs/01_nodeos/02_usage/03_development-environment/00_local-single-node-testnet.md new file mode 100644 index 00000000000..5b29dff55cb --- /dev/null +++ b/docs/01_nodeos/02_usage/03_development-environment/00_local-single-node-testnet.md @@ -0,0 +1,122 @@ +--- +content_title: Local Single-Node Testnet +--- + +## Goal + +This section describes how to set up a single-node blockchain configuration running on a single host. This is referred to as a _**single host, single-node testnet**_. We will set up one node on your local computer and have it produce blocks. The following diagram depicts the desired single host testnet. + +![Single host single node testnet](single-host-single-node-testnet.png) + +`cleos` is used to manage the wallets, manage the accounts, and invoke actions on the blockchain. `keosd` performs wallet management, including digital signing. If not started explicitly, `keosd` is started by `cleos` by default. + +## Before you begin + +* [Install the EOSIO software](../../../00_install/index.md) before starting this section. +* It is assumed that `nodeos`, `cleos`, and `keosd` are accessible through the path. If you built EOSIO using shell scripts, make sure to run the [Install Script](../../../00_install/01_build-from-source/01_shell-scripts/03_install-eosio-binaries.md). +* Know how to pass [Nodeos options](../../02_usage/00_nodeos-options.md) to enable or disable functionality. + +## Steps + +Open one "terminal" window and perform the following steps: + +1. [Start the Producer Node](#1-start-the-producer-node) +2. [Get Node Info](#2-get-node-info) + +### 1. Start the Producer Node + +Start your own single-node blockchain with this single command: + +```sh +$ nodeos -e -p eosio --plugin eosio::chain_api_plugin --plugin eosio::history_api_plugin +``` + +[[info | Nodeos Minimal Options]] +| A minimal `nodeos` instance setup for block production requires both `chain_api_plugin` and `history_api_plugin` with the `-e` option (enable stale production) and `-p eosio` option (producer name `eosio`). Alternatively, you can also setup and specify your own account as the producer name. + +After running `nodeos`, you should get log messages similar as below. It means the blocks are successfully produced. + +```console +1575001ms thread-0 chain_controller.cpp:235 _push_block ] initm #1 @2017-09-04T04:26:15 | 0 trx, 0 pending, exectime_ms=0 +1575001ms thread-0 producer_plugin.cpp:207 block_production_loo ] initm generated block #1 @ 2017-09-04T04:26:15 with 0 trxs 0 pending +1578001ms thread-0 chain_controller.cpp:235 _push_block ] initc #2 @2017-09-04T04:26:18 | 0 trx, 0 pending, exectime_ms=0 +1578001ms thread-0 producer_plugin.cpp:207 block_production_loo ] initc generated block #2 @ 2017-09-04T04:26:18 with 0 trxs 0 pending +... +eosio generated block 046b9984... #101527 @ 2018-04-01T14:24:58.000 with 0 trxs +eosio generated block 5e527ee2... #101528 @ 2018-04-01T14:24:58.500 with 0 trxs +... +``` +At this point, `nodeos` is running with a single producer, `eosio`. + +### 2. Get Node Info + +Get info about the producing node: + +```sh +$ cleos get info +``` + +This should produce output that looks similar to this: + +```console +{ + "server_version": "0f9df63e", + "chain_id": "cf057bbfb72640471fd910bcb67639c22df9f92470936cddc1ade0e2f2e7dc4f", + "head_block_num": 134, + "last_irreversible_block_num": 133, + "last_irreversible_block_id": "00000085060e9872849ef87bef3b19ab07de9faaed71154510c7f0aeeaddae2c", + "head_block_id": "000000861e3222dce1c7c2cfb938940d8aac22c816cc8b0b89f6bf65a8ad5bdc", + "head_block_time": "2019-11-18T22:13:10.500", + "head_block_producer": "eosio", + "virtual_block_cpu_limit": 228396, + "virtual_block_net_limit": 1197744, + "block_cpu_limit": 199900, + "block_net_limit": 1048576, + "server_version_string": "v2.0.0-rc2", + "fork_db_head_block_num": 134, + "fork_db_head_block_id": "000000861e3222dce1c7c2cfb938940d8aac22c816cc8b0b89f6bf65a8ad5bdc", + "server_full_version_string": "v2.0.0-rc2-0f9df63e1eca4dda4cb7df30683f4a1220599444" +} +``` + +## Advanced Steps + +The more advanced user will likely have need to modify the configuration. `nodeos` uses a custom configuration folder. The location of this folder is determined by your system. + +* Mac OS: `~/Library/Application\ Support/eosio/nodeos/config` +* Linux: `~/.local/share/eosio/nodeos/config` + +The build seeds this folder with a default `genesis.json` file. A configuration folder can be specified using the `--config-dir` command line argument to `nodeos`. If you use this option, you will need to manually copy a `genesis.json` file to your config folder. + +`nodeos` will need a properly configured `config.ini` file in order to do meaningful work. On startup, `nodeos` looks in the config folder for `config.ini`. If one is not found, a default `config.ini` file is created. If you do not already have a `config.ini` file ready to use, run `nodeos` and then close it immediately with Ctrl-C. A default configuration (`config.ini`) will have been created in the config folder. Edit the `config.ini` file, adding/updating the following settings to the defaults already in place: + +```console +# config.ini: + + # Enable production on a stale chain, since a single-node test chain is pretty much always stale + enable-stale-production = true + # Enable block production with the testnet producers + producer-name = eosio + # Load the block producer plugin, so you can produce blocks + plugin = eosio::producer_plugin + # As well as API and HTTP plugins + plugin = eosio::chain_api_plugin + plugin = eosio::http_plugin + plugin = eosio::history_api_plugin +``` + +Now it should be possible to run `nodeos` and see it begin producing blocks. + +```sh +$ nodeos +``` + +`nodeos` stores runtime data (e.g., shared memory and log content) in a custom data folder. The location of this folder is determined by your system. + +* Mac OS: `~/Library/Application\ Support/eosio/nodeos/data` +* Linux: `~/.local/share/eosio/nodeos/data` + +A data folder can be specified using the `--data-dir` command line argument to `nodeos`. + +[[info | What's next?]] +| We will explore how to setup and run a [single-host, multi-node testnet](#01_local-multi-node-testnet.md). diff --git a/docs/01_nodeos/02_usage/03_development-environment/01_local-multi-node-testnet.md b/docs/01_nodeos/02_usage/03_development-environment/01_local-multi-node-testnet.md new file mode 100644 index 00000000000..28dcb916f0f --- /dev/null +++ b/docs/01_nodeos/02_usage/03_development-environment/01_local-multi-node-testnet.md @@ -0,0 +1,215 @@ +--- +content_title: Local Multi-Node Testnet +--- + +## Goal + +This section describes how to set up a multi-node blockchain configuration running on a single host. This is referred to as a _**single host, multi-node testnet**_. We will set up two nodes on your local computer and have them communicate with each other. The examples in this section rely on three command-line applications, `nodeos`, `keosd`, and `cleos`. The following diagram depicts the desired testnet configuration. + +![Single host multi node testnet](single-host-multi-node-testnet.png) + +## Before you begin + +* [Install the EOSIO software](../../../00_install/index.md) before starting this section. +* It is assumed that `nodeos`, `cleos`, and `keosd` are accessible through the path. If you built EOSIO using shell scripts, make sure to run the [Install Script](../../../00_install/01_build-from-source/01_shell-scripts/03_install-eosio-binaries.md). +* Know how to pass [Nodeos options](../../02_usage/00_nodeos-options.md) to enable or disable functionality. + +## Steps + +Open four "terminal" windows and perform the following steps: + +1. [Start the Wallet Manager](#1-start-the-wallet-manager) +2. [Create a Default Wallet](#2-create-a-default-wallet) +3. [Loading the EOSIO Key](#3-loading-the-eosio-key) +4. [Start the First Producer Node](#4-start-the-first-producer-node) +5. [Start the Second Producer Node](#5-start-the-second-producer-node) +6. [Get Nodes Info](#6-get-nodes-info) + +### 1. Start the Wallet Manager + +In the first terminal window, start `keosd`, the wallet management application: + +```sh +$ keosd --http-server-address 127.0.0.1:8899 +``` + +If successful, `keosd` will display some information, starting with: + +```console +2493323ms thread-0 wallet_plugin.cpp:39 plugin_initialize ] initializing wallet plugin +2493323ms thread-0 http_plugin.cpp:141 plugin_initialize ] host: 127.0.0.1 port: 8899 +2493323ms thread-0 http_plugin.cpp:144 plugin_initialize ] configured http to listen on 127.0.0.1:8899 +2493323ms thread-0 http_plugin.cpp:213 plugin_startup ] start listening for http requests +2493324ms thread-0 wallet_api_plugin.cpp:70 plugin_startup ] starting wallet_api_plugin +``` + +Look for a line saying the wallet is listening on 127.0.0.1:8899. This will indicate that `keosd` started correctly and is listening on the correct port. If you see anything else, or you see some error report prior to "starting wallet_api_plugin", then you need to diagnose the issue and restart. + +When `keosd` is running correctly, leave that window open with the wallet app running and move to the next terminal window. + +### 2. Create a Default Wallet + +In the next terminal window, use `cleos`, the command-line utility, to create the default wallet. + +```sh +$ cleos --wallet-url http://127.0.0.1:8899 wallet create --to-console +``` + +`cleos` will indicate that it created the "default" wallet, and will provide a password for future wallet access. As the message says, be sure to preserve this password for future use. Here is an example of this output: + +```console +Creating wallet: default +Save password to use in the future to unlock this wallet. +Without password imported keys will not be retrievable. +"PW5JsmfYz2wrdUEotTzBamUCAunAA8TeRZGT57Ce6PkvM12tre8Sm" +``` + +`keosd` will generate some status output in its window. We will continue to use this second window for subsequent `cleos` commands. + +### 3. Loading the EOSIO Key + +The private blockchain launched in the steps above is created with a default initial key which must be loaded into the wallet. + +```sh +$ cleos --wallet-url http://127.0.0.1:8899 wallet import --private-key 5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3 +``` + +```console +imported private key for: EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV +``` + +### 4. Start the First Producer Node + +We can now start the first producer node. In the third terminal window run: + +```sh +$ nodeos --enable-stale-production --producer-name eosio --plugin eosio::chain_api_plugin --plugin eosio::net_api_plugin +``` + +This creates a special producer, known as the "bios" producer. Assuming everything has executed correctly to this point, you should see output from the `nodeos` process reporting block creation. + +### 5. Start the Second Producer Node + +The following commands assume that you are running this tutorial from the `eos\build` directory, from which you ran `./eosio_build.sh` to build the EOSIO binaries. + +To start additional nodes, you must first load the `eosio.bios` contract. This contract enables you to have direct control over the resource allocation of other accounts and to access other privileged API calls. Return to the second terminal window and run the following command to load the contract: + +```sh +$ cleos --wallet-url http://127.0.0.1:8899 set contract eosio build/contracts/eosio.bios +``` + +We will create an account to become a producer, using the account name `inita`. To create the account, we need to generate keys to associate with the account, and import those into our wallet. + +Run the create key command: + +```sh +$ cleos create key +``` + +[[caution | Caution]] +| The command line instructions that follow use the private/public keys shown below. In order to be able to cut-and-paste the command line instructions directly from this tutorial, use those keys instead of the ones generated from your `cleos create key` command. If you still want to use your newly generated keys, you need to replace the key values with yours in the commands that follow. + +This will report newly generated public and private keypairs that will look similar to the following. + +```console +Private key: 5JgbL2ZnoEAhTudReWH1RnMuQS6DBeLZt4ucV6t8aymVEuYg7sr +Public key: EOS6hMjoWRF2L8x9YpeqtUEcsDKAyxSuM1APicxgRU1E3oyV5sDEg +``` + +Now import the private key portion into your wallet. If successful, the matching public key will be reported. This should match the previously generated public key: + +```sh +$ cleos --wallet-url http://127.0.0.1:8899 wallet import 5JgbL2ZnoEAhTudReWH1RnMuQS6DBeLZt4ucV6t8aymVEuYg7sr +``` + +```console +imported private key for: EOS6hMjoWRF2L8x9YpeqtUEcsDKAyxSuM1APicxgRU1E3oyV5sDEg +``` + +Create the `inita` account that we will use to become a producer. The `create account` command requires two public keys, one for the account's owner key and one for its active key. In this example, the newly created public key is used twice, as both the owner key and the active key. Example output from the create command is shown: + +```sh +$ cleos --wallet-url http://127.0.0.1:8899 create account eosio inita EOS6hMjoWRF2L8x9YpeqtUEcsDKAyxSuM1APicxgRU1E3oyV5sDEg EOS6hMjoWRF2L8x9YpeqtUEcsDKAyxSuM1APicxgRU1E3oyV5sDEg +``` + +```console +executed transaction: d1ea511977803d2d88f46deb554f5b6cce355b9cc3174bec0da45fc16fe9d5f3 352 bytes 102400 cycles +# eosio <= eosio::newaccount {"creator":"eosio","name":"inita","owner":{"threshold":1,"keys":[{"key":"EOS6hMjoWRF2L8x9YpeqtUEcsDK... +``` + +We now have an account that is available to have a contract assigned to it, enabling it to do meaningful work. In other tutorials, the account has been used to establish simple contracts. In this case, the account will be designated as a block producer. + +In the fourth terminal window, start a second `nodeos` instance. Notice that this command line is substantially longer than the one we used above to create the first producer. This is necessary to avoid collisions with the first `nodeos` instance. Fortunately, you can just cut and paste this command line and adjust the keys: + +```sh +$ nodeos --producer-name inita --plugin eosio::chain_api_plugin --plugin eosio::net_api_plugin --http-server-address 127.0.0.1:8889 --p2p-listen-endpoint 127.0.0.1:9877 --p2p-peer-address 127.0.0.1:9876 --config-dir node2 --data-dir node2 --private-key [\"EOS6hMjoWRF2L8x9YpeqtUEcsDKAyxSuM1APicxgRU1E3oyV5sDEg\",\"5JgbL2ZnoEAhTudReWH1RnMuQS6DBeLZt4ucV6t8aymVEuYg7sr\"] +``` + +The output from this new node will show a little activity but will stop reporting until the last step in this tutorial, when the `inita` account is registered as a producer account and activated. Here is some example output from a newly started node. Your output might look a little different, depending on how much time you took entering each of these commands. Furthermore, this example is only the last few lines of output: + +```console +2393147ms thread-0 producer_plugin.cpp:176 plugin_startup ] producer plugin: plugin_startup() end +2393157ms thread-0 net_plugin.cpp:1271 start_sync ] Catching up with chain, our last req is 0, theirs is 8249 peer dhcp15.ociweb.com:9876 - 295f5fd +2393158ms thread-0 chain_controller.cpp:1402 validate_block_heade ] head_block_time 2018-03-01T12:00:00.000, next_block 2018-04-05T22:31:08.500, block_interval 500 +2393158ms thread-0 chain_controller.cpp:1404 validate_block_heade ] Did not produce block within block_interval 500ms, took 3061868500ms) +2393512ms thread-0 producer_plugin.cpp:241 block_production_loo ] Not producing block because production is disabled until we receive a recent block (see: --enable-stale-production) +2395680ms thread-0 net_plugin.cpp:1385 recv_notice ] sync_manager got last irreversible block notice +2395680ms thread-0 net_plugin.cpp:1271 start_sync ] Catching up with chain, our last req is 8248, theirs is 8255 peer dhcp15.ociweb.com:9876 - 295f5fd +2396002ms thread-0 producer_plugin.cpp:226 block_production_loo ] Previous result occurred 5 times +2396002ms thread-0 producer_plugin.cpp:244 block_production_loo ] Not producing block because it isn't my turn, its eosio +``` + +At this point, the second `nodeos` is an idle producer. To turn it into an active producer, `inita` needs to be registered as a producer with the bios node, and the bios node needs to perform an action to update the producer schedule. + +```sh +$ cleos --wallet-url http://127.0.0.1:8899 push action eosio setprods "{ \"schedule\": [{\"producer_name\": \"inita\",\"block_signing_key\": \"EOS6hMjoWRF2L8x9YpeqtUEcsDKAyxSuM1APicxgRU1E3oyV5sDEg\"}]}" -p eosio@active +``` + +```console +executed transaction: 2cff4d96814752aefaf9908a7650e867dab74af02253ae7d34672abb9c58235a 272 bytes 105472 cycles +# eosio <= eosio::setprods {"version":1,"producers":[{"producer_name":"inita","block_signing_key":"EOS6hMjoWRF2L8x9YpeqtUEcsDKA... +``` + +Congratulations, you have now configured a two-node testnet! You can see that the original node is no longer producing blocks but it is receiving them. You can verify this by running the `get info` commmand against each node. + +### 6. Get Nodes Info + +Get info about the first node: + +```sh +$ cleos get info +``` + +This should produce output that looks similar to this: + +```console +{ + "server_version": "223565e8", + "head_block_num": 11412, + "last_irreversible_block_num": 11411, + "head_block_id": "00002c94daf7dff456cd940bd585c4d9b38e520e356d295d3531144329c8b6c3", + "head_block_time": "2018-04-06T00:06:14", + "head_block_producer": "inita" +} +``` + +Now for the second node: + +```sh +$ cleos --url http://127.0.0.1:8889 get info +``` + +This should produce output that looks similar to this: + +```console +{ + "server_version": "223565e8", + "head_block_num": 11438, + "last_irreversible_block_num": 11437, + "head_block_id": "00002cae32697444fa9a2964e4db85b5e8fd4c8b51529a0c13e38587c1bf3c6f", + "head_block_time": "2018-04-06T00:06:27", + "head_block_producer": "inita" +} +``` + +In a later tutorial we will explore how to use more advanced tools to run a multi-host, multi-node testnet. diff --git a/docs/01_nodeos/02_usage/03_development-environment/index.md b/docs/01_nodeos/02_usage/03_development-environment/index.md new file mode 100644 index 00000000000..c01fb17c51d --- /dev/null +++ b/docs/01_nodeos/02_usage/03_development-environment/index.md @@ -0,0 +1,26 @@ +--- +content_title: Development Environment +--- + +There are several ways to configure a `nodeos` environment for development and testing. Which option to use largely depends on what the project goals are. Some practical options are provided below. + +## Local Single-Node Testnet + +This is the go-to option for smart contract developers, aspiring Block Producers or Non-Producing Node operators. It has the most simple configuration with the least number of requirements. + +* [Configure Nodeos as a Local Single-node Testnet](00_local-single-node-testnet.md) + +## Local Multi-Node Testnet + +While this option can technically be used for smart contract development, it may be overkill. This is most beneficial for those who are working on aspects of core development, such as benchmarking, optimization and experimentation. It's also a good option for hands-on learning and concept proofing. + +* [Configure Nodeos as a Local Two-Node Testnet](01_local-multi-node-testnet.md) +* [Configure Nodeos as a Local 21-Node Testnet](https://github.com/EOSIO/eos/blob/master/tutorials/bios-boot-tutorial/README.md) + +## Third-Party Testnets + +The following third-party testnets are available for testing EOSIO dApps and smart contracts: + +* Jungle Testnet [monitor](https://monitor.jungletestnet.io/), [website](https://jungletestnet.io/) +* [CryptoKylin Testnet](https://www.cryptokylin.io/) +* [Telos Testnet](https://mon-test.telosfoundation.io/) diff --git a/docs/01_nodeos/02_usage/03_development-environment/single-host-multi-node-testnet.png b/docs/01_nodeos/02_usage/03_development-environment/single-host-multi-node-testnet.png new file mode 100644 index 00000000000..86b63c571f3 Binary files /dev/null and b/docs/01_nodeos/02_usage/03_development-environment/single-host-multi-node-testnet.png differ diff --git a/docs/01_nodeos/02_usage/03_development-environment/single-host-single-node-testnet.png b/docs/01_nodeos/02_usage/03_development-environment/single-host-single-node-testnet.png new file mode 100644 index 00000000000..0698c40e213 Binary files /dev/null and b/docs/01_nodeos/02_usage/03_development-environment/single-host-single-node-testnet.png differ diff --git a/docs/01_nodeos/02_usage/05_nodeos-implementation.md b/docs/01_nodeos/02_usage/05_nodeos-implementation.md new file mode 100644 index 00000000000..aff18bf7b6e --- /dev/null +++ b/docs/01_nodeos/02_usage/05_nodeos-implementation.md @@ -0,0 +1,61 @@ +--- +content_title: Nodeos Implementation +--- + +The EOSIO platform stores blockchain information in various data structures at various stages of a transaction's lifecycle. Some of these are described below. The producing node is the `nodeos` instance run by the block producer who is currently creating blocks for the blockchain (which changes every 6 seconds, producing 12 blocks in sequence before switching to another producer.) + +## Blockchain State and Storage + +Every `nodeos` instance creates some internal files to housekeep the blockchain state. These files reside in the `~/eosio/nodeos/data` installation directory and their purpose is described below: + +* The `block.log` is an append only log of blocks written to disk and contains all the irreversible blocks. +* `Reversible_blocks` is a memory mapped file and contains blocks that have been written to the blockchain but have not yet become irreversible. +* The `chain state` or `database` is a memory mapped file, storing the blockchain state of each block (account details, deferred transactions, transactions, data stored using multi index tables in smart contracts, etc.). Once a block becomes irreversible we no longer cache the chain state. +* The `pending block` is an in memory block containing transactions as they are processed into a block, this will/may eventually become the head block. If this instance of `nodeos` is the producing node then the pending block is distributed to other `nodeos` instances. +* The head block is the last block written to the blockchain, stored in `reversible_blocks`. + +## Nodeos Read Modes + +EOSIO provides a set of [services and interfaces](https://developers.eos.io/eosio-cpp/docs/db-api) that enable contract developers to persist state across action, and consequently transaction, boundaries. Contracts may use these services and interfaces for different purposes. For example, `eosio.token` contract keeps balances for all users in the database. + +Each instance of `nodeos` keeps the database in memory, so contracts can read and write data. `nodeos` also provides access to this data over HTTP RPC API for reading the database. + +However, at any given time there can be multiple correct ways to query that data: +- `speculative`: this includes the side effects of confirmed and unconfirmed transactions. +- `head`: this only includes the side effects of confirmed transactions, this mode processes unconfirmed transactions but does not include them. +- `read-only`: this only includes the side effects of confirmed transactions. +- `irreversible`: this mode also includes confirmed transactions only up to those included in the last irreversible block. + +A transaction is considered confirmed when a `nodeos` instance has received, processed, and written it to a block on the blockchain, i.e. it is in the head block or an earlier block. + +### Speculative Mode + +Clients such as `cleos` and the RPC API, will see database state as of the current head block plus changes made by all transactions known to this node but potentially not included in the chain, unconfirmed transactions for example. + +Speculative mode is low latency but fragile, there is no guarantee that the transactions reflected in the state will be included in the chain OR that they will reflected in the same order the state implies. + +This mode features the lowest latency, but is the least consistent. + +In speculative mode `nodeos` is able to execute transactions which have TaPoS (Transaction as Proof of Stake) pointing to any valid block in a fork considered to be the best fork by this node. + +### Head Mode + +Clients such as `cleos` and the RPC API will see database state as of the current head block of the chain. Since current head block is not yet irreversible and short-lived forks are possible, state read in this mode may become inaccurate if `nodeos` switches to a better fork. Note that this is also true of speculative mode. + +This mode represents a good trade-off between highly consistent views of the data and latency. + +In this mode `nodeos` is able to execute transactions which have TaPoS pointing to any valid block in a fork considered to be the best fork by this node. + +### Read-Only Mode + +Clients such as `cleos` and the RPC API will see database state as of the current head block of the chain. It **will not** include changes made by transactions known to this node but not included in the chain, such as unconfirmed transactions. + +### Irreversible Mode + +When `nodeos` is configured to be in irreversible read mode, it will still track the most up-to-date blocks in the fork database, but the state will lag behind the current best head block, sometimes referred to as the fork DB head, to always reflect the state of the last irreversible block. + +Clients such as `cleos` and the RPC API will see database state as of the current head block of the chain. It **will not** include changes made by transactions known to this node but not included in the chain, such as unconfirmed transactions. + +## How To Specify the Read Mode + +The mode in which `nodeos` is run can be specified using the `--read-mode` option from the `eosio::chain_plugin`. diff --git a/docs/01_nodeos/02_usage/index.md b/docs/01_nodeos/02_usage/index.md new file mode 100644 index 00000000000..1edd34ab3f3 --- /dev/null +++ b/docs/01_nodeos/02_usage/index.md @@ -0,0 +1,12 @@ +--- +content_title: Nodeos Usage +--- + +This section explains how to use `nodeos`, lists its configuration options, describes its local file layout, provides common setups, and discusses the potential test environments for development. + +* [Options](00_nodeos-options.md) - Setting up `nodeos`; nodeos vs. plugin only options. +* [Configuration](01_nodeos-configuration.md) - CLI vs. `config.ini` options; `nodeos` example. +* [Node Setups](02_node-setups/index.md) - Producing vs. non-producing nodes setup. +* [Development Environment](03_development-environment/index.md) - Setting up a development/test environment. +* [Nodeos Implementation](05_nodeos-implementation.md) - Blockchain state and storage, `nodeos` read modes. +* [Deprecation Notices](https://github.com/EOSIO/eos/issues/7597) - Lists `nodeos` deprecated functionality. diff --git a/docs/01_nodeos/03_plugins/chain_api_plugin/index.md b/docs/01_nodeos/03_plugins/chain_api_plugin/index.md new file mode 100644 index 00000000000..a430896df19 --- /dev/null +++ b/docs/01_nodeos/03_plugins/chain_api_plugin/index.md @@ -0,0 +1,38 @@ +# chain_api_plugin + +## Description + +The `chain_api_plugin` exposes functionality from the [`chain_plugin`](../chain_plugin/index.md) to the RPC API interface managed by the [`http_plugin`](../http_plugin/index.md). + +## Usage + +```sh +# config.ini +plugin = eosio::chain_api_plugin + +# command-line +$ nodeos ... --plugin eosio::chain_api_plugin +``` + +## Options + +None + +## Dependencies + +* [`chain_plugin`](../chain_plugin/index.md) +* [`http_plugin`](../http_plugin/index.md) + +### Load Dependency Examples + +```sh +# config.ini +plugin = eosio::chain_plugin +[options] +plugin = eosio::http_plugin +[options] + +# command-line +$ nodeos ... --plugin eosio::chain_plugin [operations] [options] \ + --plugin eosio::http_plugin [options] +``` diff --git a/docs/01_nodeos/03_plugins/chain_plugin/index.md b/docs/01_nodeos/03_plugins/chain_plugin/index.md new file mode 100644 index 00000000000..6fac390f160 --- /dev/null +++ b/docs/01_nodeos/03_plugins/chain_plugin/index.md @@ -0,0 +1,174 @@ +# chain_plugin + +## Description + +The `chain_plugin` is a core plugin required to process and aggregate chain data on an EOSIO node. + +## Usage + +```sh +# config.ini +plugin = eosio::chain_plugin +[options] + +# command-line +$ nodeos ... --plugin eosio::chain_plugin [operations] [options] +``` + +## Operations + +These can only be specified from the `nodeos` command-line: + +```console +Command Line Options for eosio::chain_plugin: + + --genesis-json arg File to read Genesis State from + --genesis-timestamp arg override the initial timestamp in the + Genesis State file + --print-genesis-json extract genesis_state from blocks.log + as JSON, print to console, and exit + --extract-genesis-json arg extract genesis_state from blocks.log + as JSON, write into specified file, and + exit + --fix-reversible-blocks recovers reversible block database if + that database is in a bad state + --force-all-checks do not skip any checks that can be + skipped while replaying irreversible + blocks + --disable-replay-opts disable optimizations that specifically + target replay + --replay-blockchain clear chain state database and replay + all blocks + --hard-replay-blockchain clear chain state database, recover as + many blocks as possible from the block + log, and then replay those blocks + --delete-all-blocks clear chain state database and block + log + --truncate-at-block arg (=0) stop hard replay / block log recovery + at this block number (if set to + non-zero number) + --import-reversible-blocks arg replace reversible block database with + blocks imported from specified file and + then exit + --export-reversible-blocks arg export reversible block database in + portable format into specified file and + then exit + --snapshot arg File to read Snapshot State from +``` + +## Options + +These can be specified from both the `nodeos` command-line or the `config.ini` file: + +```console +Config Options for eosio::chain_plugin: + + --blocks-dir arg (="blocks") the location of the blocks directory + (absolute path or relative to + application data dir) + --protocol-features-dir arg (="protocol_features") + the location of the protocol_features + directory (absolute path or relative to + application config dir) + --checkpoint arg Pairs of [BLOCK_NUM,BLOCK_ID] that + should be enforced as checkpoints. + --wasm-runtime wavm/wabt Override default WASM runtime + --abi-serializer-max-time-ms arg (=15000) + Override default maximum ABI + serialization time allowed in ms + --chain-state-db-size-mb arg (=1024) Maximum size (in MiB) of the chain + state database + --chain-state-db-guard-size-mb arg (=128) + Safely shut down node when free space + remaining in the chain state database + drops below this size (in MiB). + --reversible-blocks-db-size-mb arg (=340) + Maximum size (in MiB) of the reversible + blocks database + --reversible-blocks-db-guard-size-mb arg (=2) + Safely shut down node when free space + remaining in the reverseible blocks + database drops below this size (in + MiB). + --signature-cpu-billable-pct arg (=50) + Percentage of actual signature recovery + cpu to bill. Whole number percentages, + e.g. 50 for 50% + --chain-threads arg (=2) Number of worker threads in controller + thread pool + --contracts-console print contract's output to console + --actor-whitelist arg Account added to actor whitelist (may + specify multiple times) + --actor-blacklist arg Account added to actor blacklist (may + specify multiple times) + --contract-whitelist arg Contract account added to contract + whitelist (may specify multiple times) + --contract-blacklist arg Contract account added to contract + blacklist (may specify multiple times) + --action-blacklist arg Action (in the form code::action) added + to action blacklist (may specify + multiple times) + --key-blacklist arg Public key added to blacklist of keys + that should not be included in + authorities (may specify multiple + times) + --sender-bypass-whiteblacklist arg Deferred transactions sent by accounts + in this list do not have any of the + subjective whitelist/blacklist checks + applied to them (may specify multiple + times) + --read-mode arg (=speculative) Database read mode ("speculative", + "head", "read-only", "irreversible"). + In "speculative" mode database contains + changes done up to the head block plus + changes made by transactions not yet + included to the blockchain. + In "head" mode database contains + changes done up to the current head + block. + In "read-only" mode database contains + changes done up to the current head + block and transactions cannot be pushed + to the chain API. + In "irreversible" mode database + contains changes done up to the last + irreversible block and transactions + cannot be pushed to the chain API. + + --validation-mode arg (=full) Chain validation mode ("full" or + "light"). + In "full" mode all incoming blocks will + be fully validated. + In "light" mode all incoming blocks + headers will be fully validated; + transactions in those validated blocks + will be trusted + + --disable-ram-billing-notify-checks Disable the check which subjectively + fails a transaction if a contract bills + more RAM to another account within the + context of a notification handler (i.e. + when the receiver is not the code of + the action). + --trusted-producer arg Indicate a producer whose blocks + headers signed by it will be fully + validated, but transactions in those + validated blocks will be trusted. + --database-map-mode arg (=mapped) Database map mode ("mapped", "heap", or + "locked"). + In "mapped" mode database is memory + mapped as a file. + In "heap" mode database is preloaded in + to swappable memory. + In "locked" mode database is preloaded, + locked in to memory, and optionally can + use huge pages. + + --database-hugepage-path arg Optional path for database hugepages + when in "locked" mode (may specify + multiple times) +``` + +## Dependencies + +None diff --git a/docs/01_nodeos/03_plugins/db_size_api_plugin/index.md b/docs/01_nodeos/03_plugins/db_size_api_plugin/index.md new file mode 100644 index 00000000000..357ab5fb749 --- /dev/null +++ b/docs/01_nodeos/03_plugins/db_size_api_plugin/index.md @@ -0,0 +1,39 @@ +# db_size_api_plugin + +## Description + +The `db_size_api_plugin` retrieves analytics about the blockchain. + +* free_bytes +* used_bytes +* size +* indices + +## Usage + +```console +# Not available +``` + +## Options + +None + +## Dependencies + +* [`chain_plugin`](../chain_plugin/index.md) +* [`http_plugin`](../http_plugin/index.md) + +### Load Dependency Examples + +```sh +# config.ini +plugin = eosio::chain_plugin +[options] +plugin = eosio::http_plugin +[options] + +# command-line +$ nodeos ... --plugin eosio::chain_plugin [operations] [options] \ + --plugin eosio::http_plugin [options] +``` diff --git a/docs/01_nodeos/03_plugins/faucet_testnet_plugin/index.md b/docs/01_nodeos/03_plugins/faucet_testnet_plugin/index.md new file mode 100644 index 00000000000..42de7572def --- /dev/null +++ b/docs/01_nodeos/03_plugins/faucet_testnet_plugin/index.md @@ -0,0 +1,23 @@ +# faucet_testnet_plugin + +## Description + +The `faucet_testnet_plugin` provides an interface that assists in the automation of distributing tokens on an EOSIO testnet. + +## Usage + +```sh +# config.ini +plugin = eosio::faucet_testnet_plugin + +# command-line +$ nodeos ... --plugin eosio::faucet_testnet_plugin +``` + +## Options + +None + +## Dependencies + +* [`http_plugin`](../http_plugin/index.md) diff --git a/docs/01_nodeos/03_plugins/history_api_plugin/index.md b/docs/01_nodeos/03_plugins/history_api_plugin/index.md new file mode 100644 index 00000000000..3adbc66f7a8 --- /dev/null +++ b/docs/01_nodeos/03_plugins/history_api_plugin/index.md @@ -0,0 +1,62 @@ +# history_api_plugin + +[[warning | Deprecation Notice]] +| The `history_plugin` that the `history_api_plugin` depends upon is deprecated and will no longer be maintained. Please use the [`state_history_plugin`](../state_history_plugin/index.md) instead. + +## Description + +The `history_api_plugin` exposes functionality from the [`history_plugin`](../history_plugin/index.md) to the RPC API interface managed by the [`http_plugin`](../http_plugin/index.md), providing read-only access to blockchain data. + +It provides four RPC API endpoints: + +* get_actions +* get_transaction +* get_key_accounts +* get_controlled_accounts + +[[info | More Info]] +| See HISTORY section of [RPC API](https://developers.eos.io/eosio-nodeos/reference). + +The four actions listed above are used by the following Cleos commands (matching order): + +* get actions +* get transaction +* get accounts +* get servants + +## Usage + +```sh +# config.ini +plugin = eosio::history_api_plugin + +# command-line +$ nodeos ... --plugin eosio::history_api_plugin +``` + +## Options + +None + +## Dependencies + +* [`history_plugin`](../history_plugin/index.md) +* [`chain_plugin`](../chain_plugin/index.md) +* [`http_plugin`](../http_plugin/index.md) + +### Load Dependency Examples + +```sh +# config.ini +plugin = eosio::history_plugin +[options] +plugin = eosio::chain_plugin +[options] +plugin = eosio::http_plugin +[options] + +# command-line +$ nodeos ... --plugin eosio::history_plugin [options] \ + --plugin eosio::chain_plugin [operations] [options] \ + --plugin eosio::http_plugin [options] +``` diff --git a/docs/01_nodeos/03_plugins/history_plugin/index.md b/docs/01_nodeos/03_plugins/history_plugin/index.md new file mode 100644 index 00000000000..c8fbc7137eb --- /dev/null +++ b/docs/01_nodeos/03_plugins/history_plugin/index.md @@ -0,0 +1,42 @@ +# history_plugin + +[[warning | Deprecation Notice]] +| The `history_plugin` is deprecated and will no longer be maintained. Please use the [`state_history_plugin`](../state_history_plugin/index.md) instead. + +## Description + +The `history_plugin` provides a cache layer to obtain historical data about the blockchain objects. It depends on [`chain_plugin`](../chain_plugin/index.md) for the data. + +## Usage + +```sh +# config.ini +plugin = eosio::history_plugin +[options] + +# command-line +$ nodeos ... --plugin eosio::history_plugin [options] +``` + +## Options + +These can be specified from both the `nodeos` command-line or the `config.ini` file: + +```console +Config Options for eosio::history_plugin: + -f [ --filter-on ] arg Track actions which match + receiver:action:actor. Actor may be + blank to include all. Action and Actor + both blank allows all from Recieiver. + Receiver may not be blank. + -F [ --filter-out ] arg Do not track actions which match + receiver:action:actor. Action and Actor + both blank excludes all from Reciever. + Actor blank excludes all from + reciever:action. Receiver may not be + blank. +``` + +## Dependencies + +* [`chain_plugin`](../chain_plugin/index.md) diff --git a/docs/01_nodeos/03_plugins/http_client_plugin/index.md b/docs/01_nodeos/03_plugins/http_client_plugin/index.md new file mode 100644 index 00000000000..2a92af9f597 --- /dev/null +++ b/docs/01_nodeos/03_plugins/http_client_plugin/index.md @@ -0,0 +1,44 @@ +# http_client_plugin + +## Description + +The `http_client_plugin` is an internal utility plugin, providing the `producer_plugin` the ability to use securely an external `keosd` instance as its block signer. It can only be used when the `producer_plugin` is configured to produce blocks. + +## Usage + +# config.ini +plugin = eosio::http_client_plugin + + +```sh +# config.ini +plugin = eosio::http_client_plugin +https-client-root-cert = "path/to/my/certificate.pem" +https-client-validate-peers = 1 + +# command-line +$ nodeos ... --plugin eosio::http_client_plugin \ + --https-client-root-cert "path/to/my/certificate.pem" \ + --https-client-validate-peers 1 +``` + +## Options + +These can be specified from both the `nodeos` command-line or the `config.ini` file: + +```console +Config Options for eosio::http_client_plugin: + --https-client-root-cert arg PEM encoded trusted root certificate + (or path to file containing one) used + to validate any TLS connections made. + (may specify multiple times) + + --https-client-validate-peers arg (=1) + true: validate that the peer + certificates are valid and trusted, + false: ignore cert errors +``` + +## Dependencies + +* [`producer_plugin`](../producer_plugin/index.md) diff --git a/docs/01_nodeos/03_plugins/http_plugin/index.md b/docs/01_nodeos/03_plugins/http_plugin/index.md new file mode 100644 index 00000000000..922f94b00c4 --- /dev/null +++ b/docs/01_nodeos/03_plugins/http_plugin/index.md @@ -0,0 +1,72 @@ +# http_plugin + +## Description + +The `http_plugin` is a core plugin supported by both `nodeos` and `keosd`. The plugin is required to enable any RPC API functionality provided by a `nodeos` or `keosd` instance. + +## Usage + +```sh +# config.ini +plugin = eosio::http_plugin +[options] + +# command-line +$ nodeos ... --plugin eosio::http_plugin [options] +$ keosd ... --plugin eosio::http_plugin [options] +``` + +## Options + +These can be specified from both the command-line or the `config.ini` file: + +```console +Config Options for eosio::http_plugin: + --unix-socket-path arg The filename (relative to data-dir) to + create a unix socket for HTTP RPC; set + blank to disable (=keosd.sock for keosd) + --http-server-address arg (=127.0.0.1:8888 for nodeos) + The local IP and port to listen for + incoming http connections; set blank to + disable. + --https-server-address arg The local IP and port to listen for + incoming https connections; leave blank + to disable. + --https-certificate-chain-file arg Filename with the certificate chain to + present on https connections. PEM + format. Required for https. + --https-private-key-file arg Filename with https private key in PEM + format. Required for https + --https-ecdh-curve arg (=secp384r1) Configure https ECDH curve to use: + secp384r1 or prime256v1 + --access-control-allow-origin arg Specify the Access-Control-Allow-Origin + to be returned on each request. + --access-control-allow-headers arg Specify the Access-Control-Allow-Header + s to be returned on each request. + --access-control-max-age arg Specify the Access-Control-Max-Age to + be returned on each request. + --access-control-allow-credentials Specify if Access-Control-Allow-Credent + ials: true should be returned on each + request. + --max-body-size arg (=1048576) The maximum body size in bytes allowed + for incoming RPC requests + --http-max-bytes-in-flight-mb arg (=500) + Maximum size in megabytes http_plugin + should use for processing http + requests. 503 error response when + exceeded. + --verbose-http-errors Append the error log to HTTP responses + --http-validate-host arg (=1) If set to false, then any incoming + "Host" header is considered valid + --http-alias arg Additionaly acceptable values for the + "Host" header of incoming HTTP + requests, can be specified multiple + times. Includes http/s_server_address + by default. + --http-threads arg (=2) Number of worker threads in http thread + pool +``` + +## Dependencies + +None diff --git a/docs/01_nodeos/03_plugins/index.md b/docs/01_nodeos/03_plugins/index.md new file mode 100644 index 00000000000..4e0c09fd92f --- /dev/null +++ b/docs/01_nodeos/03_plugins/index.md @@ -0,0 +1,29 @@ +--- +content_title: Nodeos Plugins +--- + +## Overview + +Plugins extend the core functionality implemented in `nodeos`. Some plugins are mandatory, such as `chain_plugin`, `net_plugin`, and `producer_plugin`, which reflect the modular design of `nodeos`. The other plugins are optional as they provide nice to have features, but non-essential for the nodes operation. + +For information on specific plugins, just select from the list below: + +* [`chain_api_plugin`](chain_api_plugin/index.md) +* [`chain_plugin`](chain_plugin/index.md) +* [`db_size_api_plugin`](db_size_api_plugin/index.md) +* [`faucet_testnet_plugin`](faucet_testnet_plugin/index.md) +* [`history_api_plugin`](history_api_plugin/index.md) +* [`history_plugin`](history_plugin/index.md) +* [`http_client_plugin`](http_client_plugin/index.md) +* [`http_plugin`](http_plugin/index.md) +* [`login_plugin`](login_plugin/index.md) +* [`net_api_plugin`](net_api_plugin/index.md) +* [`net_plugin`](net_plugin/index.md) +* [`producer_plugin`](producer_plugin/index.md) +* [`state_history_plugin`](state_history_plugin/index.md) +* [`test_control_api_plugin`](test_control_api_plugin/index.md) +* [`test_control_plugin`](test_control_plugin/index.md) +* [`txn_test_gen_plugin`](txn_test_gen_plugin/index.md) + +[[info | Nodeos is modular]] +| Plugins add incremental functionality to `nodeos`. Unlike runtime plugins, `nodeos` plugins are built at compile-time. diff --git a/docs/01_nodeos/03_plugins/login_plugin/index.md b/docs/01_nodeos/03_plugins/login_plugin/index.md new file mode 100644 index 00000000000..8f2ba95c311 --- /dev/null +++ b/docs/01_nodeos/03_plugins/login_plugin/index.md @@ -0,0 +1,47 @@ +# login_plugin + +## Description + +The `login_plugin` supports the concept of applications authenticating with the EOSIO blockchain. The `login_plugin` API allows an application to verify whether an account is allowed to sign in order to satisfy a specified authority. + +## Usage + +```sh +# config.ini +plugin = eosio::login_plugin +[options] + +# command-line +$ nodeos ... --plugin eosio::login_plugin [options] +``` + +## Options + +These can be specified from both the `nodeos` command-line or the `config.ini` file: + +```console +Config Options for eosio::login_plugin: + --max-login-requests arg (=1000000) The maximum number of pending login + requests + --max-login-timeout arg (=60) The maximum timeout for pending login + requests (in seconds) +``` + +## Dependencies + +* [`chain_plugin`](../chain_plugin/index.md) +* [`http_plugin`](../http_plugin/index.md) + +### Load Dependency Examples + +```sh +# config.ini +plugin = eosio::chain_plugin +[options] +plugin = eosio::http_plugin +[options] + +# command-line +$ nodeos ... --plugin eosio::chain_plugin [options] \ + --plugin eosio::http_plugin [options] +``` diff --git a/docs/01_nodeos/03_plugins/mongo_db_plugin/index.md b/docs/01_nodeos/03_plugins/mongo_db_plugin/index.md new file mode 100644 index 00000000000..c5018089968 --- /dev/null +++ b/docs/01_nodeos/03_plugins/mongo_db_plugin/index.md @@ -0,0 +1,155 @@ +# mongo_db_plugin + +[[warning | Deprecation Notice]] +| The `mongo_db_plugin` is deprecated and will no longer be maintained. Please refer to the [`state_history_plugin`](../state_history_plugin/index.md) and the [`history-tools`](../state_history_plugin/index.md#history-tools) for better options to archive blockchain data. + +## Description + +The optional `eosio::mongo_db_plugin` provides archiving of blockchain data into a MongoDB. It is recommended that the plugin be added to a non-producing node as it is designed to shut down on any failed insert into the MongoDB and it is resource intensive. For best results dedicate a `nodeos` instance to running this one plugin. The rationale behind this shutdown on error is so that any issues with connectivity or the mongo database can be fixed and `nodeos` can be restarted without having to resync or replay. + +## Important Notes + +* Documents stored in mongo by `mongo_db_plugin` which contain empty field/struct names will be stored with the field/struct name of `empty_field_name` / `empty_struct_name`. +* Action data is stored on chain as raw bytes. This plugin attempts to use associated ABI on accounts to deserialize the raw bytes into expanded `abi_def` form for storage into mongo. Note that invalid or missing ABI on a contract will result in the action data being stored as raw bytes. For example the EOSIO system contract does not provide ABI for the `onblock` action so it is stored as raw bytes. +* The `mongo_db_plugin` does slow down replay/resync as the conversion of block data to JSON and insertion into MongoDB is resource intensive. The plugin does use a worker thread for processing the block data, but this does not help much when replaying/resyncing. + +## Recommendations + +* It is recommended that a large `--abi-serializer-max-time-ms` value be passed into the `nodeos` running the `mongo_db_plugin` as the default ABI serializer time limit is not large enough to serialize large blocks. +* Read-only mode should be used to avoid speculative execution. See [Nodeos Read Modes](../../02_usage/05_nodeos-implementation.md#nodeos-read-modes). Forked data is still recorded (data that never becomes irreversible) but speculative transaction processing and signaling is avoided, minimizing the transaction_traces/action_traces stored. + +## Options + +These can be specified from both the command-line or the `config.ini` file: + +```console + -q [ --mongodb-queue-size ] arg (=256) + The target queue size between nodeos + and MongoDB plugin thread. + --mongodb-abi-cache-size The maximum size of the abi cache for + serializing data. + --mongodb-wipe Required with --replay-blockchain, + --hard-replay-blockchain, or + --delete-all-blocks to wipe mongo + db.This option required to prevent + accidental wipe of mongo db. + Defaults to false. + --mongodb-block-start arg (=0) If specified then only abi data pushed + to mongodb until specified block is + reached. + -m [ --mongodb-uri ] arg MongoDB URI connection string, see: + https://docs.mongodb.com/master/referen + ce/connection-string/. If not specified + then plugin is disabled. Default + database 'EOS' is used if not specified + in URI. Example: mongodb://127.0.0.1:27 + 017/EOS + --mongodb-update-via-block-num arg (=0) + Update blocks/block_state with latest + via block number so that duplicates are + overwritten. + --mongodb-store-blocks Enables storing blocks in mongodb. + Defaults to true. + --mongodb-store-block-states Enables storing block state in mongodb. + Defaults to true. + --mongodb-store-transactions Enables storing transactions in mongodb. + Defaults to true. + --mongodb-store-transaction-traces Enables storing transaction traces in mongodb. + Defaults to true. + --mongodb-store-action-traces Enables storing action traces in mongodb. + Defaults to true. + --mongodb-filter-on Mongodb: Track actions which match + receiver:action:actor. Actor may be blank + to include all. Receiver and Action may + not be blank. Default is * include + everything. + --mongodb-filter-out Mongodb: Do not track actions which match + receiver:action:actor. Action and Actor + both blank excludes all from reciever. Actor blank excludes all from + reciever:action. Receiver may not be + blank. +``` + +## Notes + +* `--mongodb-store-*` options all default to true. +* `--mongodb-filter-*` options currently only applies to the `action_traces` collection. + +## Example Filters + +```console +mongodb-filter-out = eosio:onblock: +mongodb-filter-out = gu2tembqgage:: +mongodb-filter-out = blocktwitter:: +``` + +[[warning | Warning]] +| When the `mongo_db_plugin` is turned on, the target mongodb instance may take a lot of storage space. With all collections enabled and no filters applied, the mongodb data folder can easily occupy hundreds of GBs of data. It is recommended that you tailor the options and utilize the filters as you need in order to maximize storage efficiency. + +## Collections + +* `accounts` - created on applied transaction. Always updated even if `mongodb-store-action-traces=false`. + * Currently limited to just name and ABI if contract abi on account + * Mostly for internal use as the stored ABI is used to convert action data into JSON for storage as associated actions on contract are processed. + * Invalid ABI on account will prevent conversion of action data into JSON for storage resulting in just the action data being stored as hex. For example, the original eosio.system contract did not provide ABI for the `onblock` action and therefore all `onblock` action data is stored as hex until the time `onblock` ABI is added to the eosio.system contract. + +* `action_traces` - created on applied transaction + * `receipt` - action_trace action_receipt - see `eosio::chain::action_receipt` + * `trx_id` - transaction id + * `act` - action - see `eosio::chain::action` + * `elapsed` - time in microseconds to execute action + * `console` - console output of action. Always empty unless `contracts-console = true` option specified. + +* `block_states` - created on accepted block + * `block_num` + * `block_id` + * `block_header_state` - see `eosio::chain::block_header_state` + * `validated` + * `in_current_chain` + +* `blocks` - created on accepted block + * `block_num` + * `block_id` + * `block` - signed block - see `eosio::chain::signed_block` + * `validated` - added on irreversible block + * `in_current_chain` - added on irreversible block + * `irreversible=true` - added on irreversible block + +* `transaction_traces` - created on applied transaction + * see `chain::eosio::transaction_trace` + +* `transactions` - created on accepted transaction - does not include inline actions + * see `eosio::chain::signed_transaction`. In addition to signed_transaction data the following are also stored. + * `trx_id` - transaction id + * `irreversible=true` - added on irreversible block + * `block_id` - added on irreversble block + * `block_num` - added on irreversible block + * `signing_keys` + * `accepted` + * `implicit` + * `scheduled` + +* `account_controls` - created on applied transaction. Always updated even if `mongodb-store-action-traces=false`. + * `controlled_account` + * `controlling_permission` + * `controlling_account` + +The equivalent of `/v1/history/get_controlled_acounts` with mongo: `db.account_controls.find({"controlling_account":"hellozhangyi"}).pretty()` + +* `pub_keys` - created on applied transaction. Always updated even if `mongodb-store-action-traces=false`. + * `account` + * `permission` + * `public_key` + +## Examples + +The mongodb equivalent of `/v1/history/get_key_accounts` RPC API endpoint: + +```console +db.pub_keys.find({"public_key":"EOS7EarnUhcyYqmdnPon8rm7mBCTnBoot6o7fE2WzjvEX2TdggbL3"}).pretty() +``` + +## Dependencies + +* [`chain_plugin`](../chain_plugin/index.md) +* [`history_plugin`](../history_plugin/index.md) diff --git a/docs/01_nodeos/03_plugins/net_api_plugin/index.md b/docs/01_nodeos/03_plugins/net_api_plugin/index.md new file mode 100644 index 00000000000..af4bf4dd371 --- /dev/null +++ b/docs/01_nodeos/03_plugins/net_api_plugin/index.md @@ -0,0 +1,50 @@ +# net_api_plugin + +## Description + +The `net_api_plugin` exposes functionality from the `net_plugin` to the RPC API interface managed by the `http_plugin`. + +The `net_api_plugin` provides four RPC API endpoints: + +* connect +* disconnect +* connections +* status + +See [Net section of RPC API](https://developers.eos.io/eosio-nodeos/reference). + +[[caution | Caution]] +| This plugin exposes endpoints that allow management of p2p connections. Running this plugin on a publicly accessible node is not recommended as it can be exploited. + +## Usage + +```sh +# config.ini +plugin = eosio::net_api_plugin + +# command-line +$ nodeos ... --plugin eosio::net_api_plugin +``` + +## Options + +None + +## Dependencies + +* [`net_plugin`](../net_plugin/index.md) +* [`http_plugin`](../http_plugin/index.md) + +### Load Dependency Examples + +```sh +# config.ini +plugin = eosio::net_plugin +[options] +plugin = eosio::http_plugin +[options] + +# command-line +$ nodeos ... --plugin eosio::net_plugin [options] \ + --plugin eosio::http_plugin [options] +``` diff --git a/docs/01_nodeos/03_plugins/net_plugin/index.md b/docs/01_nodeos/03_plugins/net_plugin/index.md new file mode 100644 index 00000000000..84e7811b4dd --- /dev/null +++ b/docs/01_nodeos/03_plugins/net_plugin/index.md @@ -0,0 +1,91 @@ +# net_plugin + +## Description + +The `net_plugin` provides an authenticated p2p protocol to persistently synchronize nodes. + +## Usage + +```sh +# config.ini +plugin = eosio::net_plugin +[options] + +# command-line +$ nodeos ... --plugin eosio::net_plugin [options] +``` + +## Options + +These can be specified from both the `nodeos` command-line or the `config.ini` file: + +```console +Config Options for eosio::net_plugin: + + --p2p-listen-endpoint arg (=0.0.0.0:9876) + The actual host:port used to listen for + incoming p2p connections. + --p2p-server-address arg An externally accessible host:port for + identifying this node. Defaults to + p2p-listen-endpoint. + --p2p-peer-address arg The public endpoint of a peer node to + connect to. Use multiple + p2p-peer-address options as needed to + compose a network. + --p2p-max-nodes-per-host arg (=1) Maximum number of client nodes from any + single IP address + --agent-name arg (="EOS Test Agent") The name supplied to identify this node + amongst the peers. + --allowed-connection arg (=any) Can be 'any' or 'producers' or + 'specified' or 'none'. If 'specified', + peer-key must be specified at least + once. If only 'producers', peer-key is + not required. 'producers' and + 'specified' may be combined. + --peer-key arg Optional public key of peer allowed to + connect. May be used multiple times. + --peer-private-key arg Tuple of [PublicKey, WIF private key] + (may specify multiple times) + --max-clients arg (=25) Maximum number of clients from which + connections are accepted, use 0 for no + limit + --connection-cleanup-period arg (=30) number of seconds to wait before + cleaning up dead connections + --max-cleanup-time-msec arg (=10) max connection cleanup time per cleanup + call in millisec + --network-version-match arg (=0) True to require exact match of peer + network version. + --net-threads arg (=1) Number of worker threads in net_plugin + thread pool + --sync-fetch-span arg (=100) number of blocks to retrieve in a chunk + from any individual peer during + synchronization + --use-socket-read-watermark arg (=0) Enable expirimental socket read + watermark optimization + --peer-log-format arg (=["${_name}" ${_ip}:${_port}]) + The string used to format peers when + logging messages about them. Variables + are escaped with ${}. + Available Variables: + _name self-reported name + + _id self-reported ID (64 hex + characters) + + _sid first 8 characters of + _peer.id + + _ip remote IP address of peer + + _port remote port number of peer + + _lip local IP address connected to + peer + + _lport local port number connected + to peer +``` + +## Dependencies + +None diff --git a/docs/01_nodeos/03_plugins/producer_api_plugin/index.md b/docs/01_nodeos/03_plugins/producer_api_plugin/index.md new file mode 100644 index 00000000000..c43a9645f86 --- /dev/null +++ b/docs/01_nodeos/03_plugins/producer_api_plugin/index.md @@ -0,0 +1,42 @@ +# producer_api_plugin + +## Description + +The `producer_api_plugin` exposes a number of endpoints for the [`producer_plugin`](../producer_plugin/index.md) to the RPC API interface managed by the [`http_plugin`](../http_plugin/index.md). + +## Usage + +```sh +# config.ini +plugin = eosio::producer_api_plugin + +# nodeos startup params +$ nodeos ... --plugin eosio::producer_api_plugin +``` + +## Options + +None + +## Dependencies + +* [`producer_plugin`](../producer_plugin/index.md) +* [`chain_plugin`](../chain_plugin/index.md) +* [`http_plugin`](../http_plugin/index.md) + +### Load Dependency Examples + +```sh +# config.ini +plugin = eosio::producer_plugin +[options] +plugin = eosio::chain_plugin +[options] +plugin = eosio::http_plugin +[options] + +# command-line +$ nodeos ... --plugin eosio::producer_plugin [options] \ + --plugin eosio::chain_plugin [operations] [options] \ + --plugin eosio::http_plugin [options] +``` diff --git a/docs/01_nodeos/03_plugins/producer_plugin/index.md b/docs/01_nodeos/03_plugins/producer_plugin/index.md new file mode 100644 index 00000000000..022a473d36b --- /dev/null +++ b/docs/01_nodeos/03_plugins/producer_plugin/index.md @@ -0,0 +1,116 @@ +# producer_plugin + +## Description + +The `producer_plugin` loads functionality required for a node to produce blocks. + +[[info]] +| Additional configuration is required to produce blocks. Please read [Configuring Block Producing Node](https://developers.eos.io/eosio-nodeos/docs/environment-producing-node). + +## Usage + +```sh +# config.ini +plugin = eosio::producer_plugin [options] + +# nodeos startup params +$ nodeos ... -- plugin eosio::producer_plugin [options] +``` + +## Options + +These can be specified from both the `nodeos` command-line or the `config.ini` file: + +```console +Config Options for eosio::producer_plugin: + + -e [ --enable-stale-production ] Enable block production, even if the + chain is stale. + -x [ --pause-on-startup ] Start this node in a state where + production is paused + --max-transaction-time arg (=30) Limits the maximum time (in + milliseconds) that is allowed a pushed + transaction's code to execute before + being considered invalid + --max-irreversible-block-age arg (=-1) + Limits the maximum age (in seconds) of + the DPOS Irreversible Block for a chain + this node will produce blocks on (use + negative value to indicate unlimited) + -p [ --producer-name ] arg ID of producer controlled by this node + (e.g. inita; may specify multiple + times) + --private-key arg (DEPRECATED - Use signature-provider + instead) Tuple of [public key, WIF + private key] (may specify multiple + times) + --signature-provider arg (=EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV=KEY:5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3) + Key=Value pairs in the form + = + Where: + is a string form of + a vaild EOSIO public + key + + is a string in the + form + : + + is KEY, or KEOSD + + KEY: is a string form of + a valid EOSIO + private key which + maps to the provided + public key + + KEOSD: is the URL where + keosd is available + and the approptiate + wallet(s) are + unlocked + --keosd-provider-timeout arg (=5) Limits the maximum time (in + milliseconds) that is allowed for + sending blocks to a keosd provider for + signing + --greylist-account arg account that can not access to extended + CPU/NET virtual resources + --produce-time-offset-us arg (=0) offset of non last block producing time + in microseconds. Negative number + results in blocks to go out sooner, and + positive number results in blocks to go + out later + --last-block-time-offset-us arg (=0) offset of last block producing time in + microseconds. Negative number results + in blocks to go out sooner, and + positive number results in blocks to go + out later + --max-scheduled-transaction-time-per-block-ms arg (=100) + Maximum wall-clock time, in + milliseconds, spent retiring scheduled + transactions in any block before + returning to normal transaction + processing. + --incoming-defer-ratio arg (=1) ratio between incoming transations and + deferred transactions when both are + exhausted + --producer-threads arg (=2) Number of worker threads in producer + thread pool + --snapshots-dir arg (="snapshots") the location of the snapshots directory + (absolute path or relative to + application data dir) +``` + +## Dependencies + +* [`chain_plugin`](../chain_plugin/index.md) + +### Load Dependency Examples + +```sh +# config.ini +plugin = eosio::chain_plugin [operations] [options] + +# command-line +$ nodeos ... --plugin eosio::chain_plugin [operations] [options] +``` diff --git a/docs/01_nodeos/03_plugins/state_history_plugin/how-to-create-snapshot-with-full-history.md b/docs/01_nodeos/03_plugins/state_history_plugin/how-to-create-snapshot-with-full-history.md new file mode 100644 index 00000000000..c20cfe2d943 --- /dev/null +++ b/docs/01_nodeos/03_plugins/state_history_plugin/how-to-create-snapshot-with-full-history.md @@ -0,0 +1,39 @@ + + +## Goal + +This procedure creates a database containing the chain state, with full history since genesis state. + +## Before you begin + +* Make sure [EOSIO is installed](../../../00_install/index.md). +* Learn about [Using Nodeos](../../02_usage/index.md). +* Get familiar with [state_history](../../03_plugins/state_history_plugin/index.md) plugin. + +## Steps + +1. Enable the `producer_api_plugin` on a node with full state-history. + +[[caution | Caution when using `producer_api_plugin`]] +| Either use a firewall to block access to `http-server-address`, or change it to `localhost:8888` to disable remote access. + +2. Create a portable snapshot: +```sh +$ curl http://127.0.0.1:8888/v1/producer/create_snapshot | json_pp +``` + +3. Wait for `nodeos` to process several blocks after the snapshot completed. The goal is for the state-history files to contain at least 1 more block than the portable snapshot has, and for the `blocks.log` file to contain the block after it has become irreversible. + +[[info | Note]] +| If the block included in the portable snapshot is forked out, then the snapshot will be invalid. Repeat this process if this happens. + +4. Stop `nodeos`. + +5. Make backups of: + * The newly-created portable snapshot (`data/snapshots/snapshot-xxxxxxx.bin`) + * The contents of `data/state-history`: + * `chain_state_history.log` + * `trace_history.log` + * `chain_state_history.index`: optional. Restoring will take longer without this file. + * `trace_history.index`: optional. Restoring will take longer without this file. + * Optional: the contents of `data/blocks`, but excluding `data/blocks/reversible`. diff --git a/docs/01_nodeos/03_plugins/state_history_plugin/how-to-fast-start-without-old-history.md b/docs/01_nodeos/03_plugins/state_history_plugin/how-to-fast-start-without-old-history.md new file mode 100644 index 00000000000..b7f5d43b56d --- /dev/null +++ b/docs/01_nodeos/03_plugins/state_history_plugin/how-to-fast-start-without-old-history.md @@ -0,0 +1,40 @@ + + +## Goal + +This procedure records the current chain state and future history, without previous historical data on the local chain. + +## Before you begin + +* Make sure [EOSIO is installed](../../../00_install/index.md). +* Learn about [Using Nodeos](../../02_usage/index.md). +* Get familiar with [state_history](../../03_plugins/state_history_plugin/index.md) plugin. + +## Steps + +1. Get the following: + * A portable snapshot (`data/snapshots/snapshot-xxxxxxx.bin`) + * Optional: a block log which includes the block the snapshot was taken at + +2. Make sure `data/state` does not exist + +3. Start `nodeos` with the `--snapshot` option, and the options listed in the [`state_history_plugin`](#index.md). + +4. Look for `Placing initial state in block n` in the log, where n is the start block number. + +5. Start a filler with `--fpg-create` (if PostgreSQL), `--fill-skip-to n`, and `--fill-trim`. Replace `n` with the value above. + +6. Do not stop `nodeos` until it has received at least 1 block from the network, or it won't be able to restart. + +## Remarks + +If `nodeos` fails to receive blocks from the network, then try the above using `net_api_plugin`. Use `cleos net disconnect` and `cleos net connect` to reconnect nodes which timed out. + +[[caution | Caution when using `net_api_plugin`]] +| Either use a firewall to block access to your `http-server-address`, or change it to `localhost:8888` to disable remote access. + +[[info]] +| Whenever you run a filler after this point, use the `--fill-trim` option. Only use `--fpg-create` and `--fill-skip-to` the first time. + +[[info]] +| On large chains, this procedure creates a delta record that is too large for javascript processes to handle. 64-bit C++ processes can handle the large record. `fill-pg` and `fill-lmdb` break up the large record into smaller records when filling databases. diff --git a/docs/01_nodeos/03_plugins/state_history_plugin/how-to-replay-or-resync-wth-full-history.md b/docs/01_nodeos/03_plugins/state_history_plugin/how-to-replay-or-resync-wth-full-history.md new file mode 100644 index 00000000000..de7d0e89ae2 --- /dev/null +++ b/docs/01_nodeos/03_plugins/state_history_plugin/how-to-replay-or-resync-wth-full-history.md @@ -0,0 +1,19 @@ + + +## Goal + +This procedure records the entire chain history. + +## Before you begin + +* Make sure [EOSIO is installed](../../../00_install/index.md). +* Learn about [Using Nodeos](../../02_usage/index.md). +* Get familiar with [state_history](../../03_plugins/state_history_plugin/index.md) plugin. + +## Steps + +1. Get a block log and place it in `data/blocks`, or get a genesis file and use the `--genesis-json` option + +2. Make sure `data/state` does not exist, or use the `--replay-blockchain` option + +3. Start `nodeos` with the `--snapshot` option, and the options listed in the [`state_history_plugin`] diff --git a/docs/01_nodeos/03_plugins/state_history_plugin/how-to-restore-snapshot-with-full-history.md b/docs/01_nodeos/03_plugins/state_history_plugin/how-to-restore-snapshot-with-full-history.md new file mode 100644 index 00000000000..98cf666d88c --- /dev/null +++ b/docs/01_nodeos/03_plugins/state_history_plugin/how-to-restore-snapshot-with-full-history.md @@ -0,0 +1,31 @@ + + +## Goal + +This procedure restores an existing snapshot with full history, so the node can become active in the blockchain. + +## Before you begin + +* Make sure [EOSIO is installed](../../../00_install/index.md). +* Learn about [Using Nodeos](../../02_usage/index.md). +* Get familiar with [state_history](../../03_plugins/state_history_plugin/index.md) plugin. + +## Steps + +1. Get the following: + * A portable snapshot (`data/snapshots/snapshot-xxxxxxx.bin`) + * The contents of `data/state-history` + * Optional: a block log which includes the block the snapshot was taken at. Do not include `data/blocks/reversible`. + +2. Make sure `data/state` does not exist + +3. Start `nodeos` with the `--snapshot` option, and the options listed in the [`state_history_plugin`](#index.md). + +4. Do not stop `nodeos` until it has received at least 1 block from the network, or it won't be able to restart. + +## Remarks + +If `nodeos` fails to receive blocks from the network, then try the above using `net_api_plugin`. Use `cleos net disconnect` and `cleos net connect` to reconnect nodes which timed out. + +[[caution | Caution when using `net_api_plugin`]] +| Either use a firewall to block access to `http-server-address`, or change it to `localhost:8888` to disable remote access. diff --git a/docs/01_nodeos/03_plugins/state_history_plugin/index.md b/docs/01_nodeos/03_plugins/state_history_plugin/index.md new file mode 100644 index 00000000000..e24fc265af8 --- /dev/null +++ b/docs/01_nodeos/03_plugins/state_history_plugin/index.md @@ -0,0 +1,80 @@ +# state_history_plugin + +## Description + +The `state_history_plugin` is useful for capturing historical data about the blockchain state. The plugin receives blockchain data from other connected nodes and caches the data into files. The plugin listens on a socket for applications to connect and sends blockchain data back based on the plugin options specified when starting `nodeos`. + +## Usage + +```sh +# config.ini +plugin = eosio::state_history_plugin +[options] + +# command-line +$ nodeos ... --plugin eosio::state_history_plugin [operations] [options] +``` + +## Operations + +These can only be specified from the `nodeos` command-line: + +```console +Command Line Options for eosio::state_history_plugin: + + --delete-state-history clear state history files +``` + +## Options + +These can be specified from both the `nodeos` command-line or the `config.ini` file: + +```console +Config Options for eosio::state_history_plugin: + + --state-history-dir arg (="state-history") + the location of the state-history + directory (absolute path or relative to + application data dir) + --trace-history enable trace history + --chain-state-history enable chain state history + --state-history-endpoint arg (=127.0.0.1:8080) + the endpoint upon which to listen for + incoming connections. Caution: only + expose this port to your internal + network. + --trace-history-debug-mode enable debug mode for trace history +``` + +## Examples + +### JavaScript Example + + * [Source code](https://github.com/EOSIO/eos/blob/state-history-docs/docs/state-history-plugin/js-example.md) + +### history-tools + + * [Source code](https://github.com/EOSIO/history-tools/) + * [Documentation](https://eosio.github.io/history-tools/) + * [Protocol](https://github.com/EOSIO/eos/blob/state-history-docs/docs/state-history-plugin/protocol.md) + +## Dependencies + +* [`chain_plugin`](../chain_plugin/index.md) + +### Load Dependency Examples + +```sh +# config.ini +plugin = eosio::chain_plugin --disable-replay-opts + +# command-line +$ nodeos ... --plugin eosio::chain_plugin --disable-replay-opts +``` + +## How-To Guides + +* [How to fast start without history on existing chains](how-to-fast-start-without-old-history.md) +* [How to create a portable snapshot with full state history](how-to-create-snapshot-with-full-history.md) +* [How to restore a portable snapshot with full state history](how-to-restore-snapshot-with-full-history.md) +* [How to replay or resync with full history](how-to-replay-or-resync-wth-full-history.md) diff --git a/docs/01_nodeos/03_plugins/test_control_api_plugin/index.md b/docs/01_nodeos/03_plugins/test_control_api_plugin/index.md new file mode 100644 index 00000000000..d3636f6fdf0 --- /dev/null +++ b/docs/01_nodeos/03_plugins/test_control_api_plugin/index.md @@ -0,0 +1,45 @@ +# test_control_api_plugin + +## Description + +The `test_control_api_plugin` allows to send a control message to the [test_control_plugin](../test_control_plugin/index.md) telling the plugin to shut down the `nodeos` instance when reaching a particular block. It is intended for testing. + +## Usage + +```sh +# config.ini +plugin = eosio::test_control_api_plugin + +# command-line +$ nodeos ... --plugin eosio::test_control_api_plugin +``` + +## Options + +None + +## Usage Example + +```sh +curl %s/v1/test_control/kill_node_on_producer -d '{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":\"%s\" }' -X POST -H \"Content-Type: application/json\"" % +``` + +## Dependencies + +* [`test_control_plugin`](../test_control_plugin/index.md) +* [`chain_plugin`](../chain_plugin/index.md) +* [`http_plugin`](../http_plugin/index.md) + +### Load Dependency Examples + +```sh +# config.ini +plugin = eosio::chain_plugin +[options] +plugin = eosio::http_plugin +[options] + +# command-line +$ nodeos ... --plugin eosio::chain_plugin [operations] [options] \ + --plugin eosio::http_plugin [options] +``` diff --git a/docs/01_nodeos/03_plugins/test_control_plugin/index.md b/docs/01_nodeos/03_plugins/test_control_plugin/index.md new file mode 100644 index 00000000000..1a6c63c152e --- /dev/null +++ b/docs/01_nodeos/03_plugins/test_control_plugin/index.md @@ -0,0 +1,36 @@ +# test_control_plugin + +## Description + +The `test_control_plugin` is designed to cause a graceful shutdown when reaching a particular block in a sequence of blocks produced by a specific block producer. It can be invoked to either shutdown on the **head block** or the **last irreversible block**. + +This is intended for testing, to determine exactly when a nodeos instance will shutdown. + +## Usage + +```sh +# config.ini +plugin = eosio::test_control_plugin + +# command-line +$ nodeos ... --plugin eosio::test_control_plugin +``` + +## Options + +None + +## Dependencies + +* [`chain_plugin`](../chain_plugin/index.md) + +### Load Dependency Examples + +```sh +# config.ini +plugin = eosio::chain_plugin +[options] + +# command-line +$ nodeos ... --plugin eosio::chain_plugin [operations] [options] +``` diff --git a/docs/01_nodeos/03_plugins/txn_test_gen_plugin/index.md b/docs/01_nodeos/03_plugins/txn_test_gen_plugin/index.md new file mode 100644 index 00000000000..0adf0744acc --- /dev/null +++ b/docs/01_nodeos/03_plugins/txn_test_gen_plugin/index.md @@ -0,0 +1,40 @@ +# txn_test_gen_plugin + +## Description + +The `txn_test_gen_plugin` is used for transaction test purposes. + +[[info | For More Information]] +For more information, check the [txn_test_gen_plugin/README.md](https://github.com/EOSIO/eos/blob/develop/plugins/txn_test_gen_plugin/README.md) on the EOSIO/eos repository. + +## Usage + +```sh +# config.ini +plugin = eosio::txn_test_gen_plugin +[options] + +# command-line +$ nodeos ... --plugin eosio::txn_test_gen_plugin [options] +``` + +## Options + +These can be specified from both the `nodeos` command-line or the `config.ini` file: + +```console +Config Options for eosio::txn_test_gen_plugin: + --txn-reference-block-lag arg (=0) Lag in number of blocks from the head + block when selecting the reference + block for transactions (-1 means Last + Irreversible Block) + --txn-test-gen-threads arg (=2) Number of worker threads in + txn_test_gen thread pool + --txn-test-gen-account-prefix arg (=txn.test.) + Prefix to use for accounts generated + and used by this plugin +``` + +## Dependencies + +None diff --git a/docs/01_nodeos/04_replays/how-to-generate-a-blocks.log.md b/docs/01_nodeos/04_replays/how-to-generate-a-blocks.log.md new file mode 100644 index 00000000000..3d6398082be --- /dev/null +++ b/docs/01_nodeos/04_replays/how-to-generate-a-blocks.log.md @@ -0,0 +1,8 @@ +--- +content_title: How to generate a blocks.log file +--- + +The `blocks.log` file is used by `nodeos` to persist irreversible blocks. This is the actual local copy of the immutable blockchain maintained by the node. The default location of the `blocks.log` file is in the `data/blocks` directory. However the default data directory can be overridden with the `-d [ --data-dir ]` option on the `nodeos` command line. + +[[info | Other `blocks.log` files]] +| You can also download a `blocks.log` file from third party providers. diff --git a/docs/01_nodeos/04_replays/how-to-generate-a-snapshot.md b/docs/01_nodeos/04_replays/how-to-generate-a-snapshot.md new file mode 100644 index 00000000000..972f7c329f7 --- /dev/null +++ b/docs/01_nodeos/04_replays/how-to-generate-a-snapshot.md @@ -0,0 +1,19 @@ +--- +content_title: How to generate a snapshot +--- + +You can force a running `nodeos` instance to create a snapshot by using the `create_snapshot` RPC API call supported by the `producer_api_plugin`. This will create a snapshot file in the `data/snapshots` directory. Snapshot files are written to disk with the name pattern `*snapshot-\.bin*`. + +[[info | Snapshots Location]] +| By default, snapshots are written to the `data/snapshots` directory relative to your `nodeos` data directory. See the `-d [ --data-dir ]` option. + +If your `nodeos` instance is running locally, the below command will request `nodeos` to create a snapshot: + +```sh +$ curl --request POST \ + --url http://127.0.0.1:8888/v1/producer/create_snapshot \ + --header 'content-type: application/x-www-form-urlencoded; charset=UTF-8' +``` + +[[info | Other `blocks.log` files]] +| You can also download a `blocks.log` file from third party providers. diff --git a/docs/01_nodeos/04_replays/how-to-replay-from-a-blocks.log.md b/docs/01_nodeos/04_replays/how-to-replay-from-a-blocks.log.md new file mode 100644 index 00000000000..6489ed570f8 --- /dev/null +++ b/docs/01_nodeos/04_replays/how-to-replay-from-a-blocks.log.md @@ -0,0 +1,26 @@ +--- +content_title: How to replay from a blocks.log file +--- + +Once you have obtained a copy of the `blocks.log` file which you wish to replay the blockchain from, copy it to your `data/blocks` directory, backing up any existing contents if you wish to keep them, and remove the `blocks.index`, `forkdb.dat`, `shared_memory.bin`, and `shared_memory.meta`. + +The table below sumarizes the actions you should take for each of the files enumerated above: + +Folder name | File name | Action +----------------------- | ------------------ | ------ +data/blocks | blocks.index | Remove +data/blocks | blocks.log | Replace this file with the `block.log` you want to replay +data/blocks/reversible | forkdb.dat | Remove +data/blocks/reversible | shared_memory.bin | Remove +data/blocks/reversible | shared_memory.meta | Remove + +You can use `blocks-dir = "blocks"` in the `config.ini` file, or use the `--blocks-dir` command line option, to specify where to find the `blocks.log` file to replay. + +```sh +$ nodeos --replay-blockchain \ + -e -p eosio \ + --plugin eosio::producer_plugin \ + --plugin eosio::chain_api_plugin \ + --plugin eosio::http_plugin \ + >> nodeos.log 2>&1 & +``` diff --git a/docs/01_nodeos/04_replays/how-to-replay-from-a-snapshot.md b/docs/01_nodeos/04_replays/how-to-replay-from-a-snapshot.md new file mode 100644 index 00000000000..e94d0aba45b --- /dev/null +++ b/docs/01_nodeos/04_replays/how-to-replay-from-a-snapshot.md @@ -0,0 +1,31 @@ +--- +content_title: How to replay from a snapshot +--- + +Once you have obtained a copy of a valid snapshot file from which you wish to create a valid chain state, copy it to your data/snapshots directory, backing up (if you wish to keep them) and removing any existing contents of the data directory. + +location | name | action +----------------- | -------------------------- | ------------ +data/snapshots | .bin | place the snapshot file you want to replay here +data/ | * | remove + +You can use `snapshots-dir = "snapshots" ` in the configuration file or using the `--snapshots-dir` command line option, to specify the where to find the the snapshot to replay, use `--snapshot` to specify the name of the snapshot to replay. + +```sh +$ nodeos --snapshot yoursnapshot.name \ + -e -p eosio \ + --plugin eosio::producer_plugin \ + --plugin eosio::chain_api_plugin \ + --plugin eosio::http_plugin \ + >> nodeos.log 2>&1 & +``` + +When replaying from a snapshot file it is recommended that all existing data is removed, however if a blocks.log file is provided it *must* at least contain blocks up to the snapshotted block and *may* contain additional blocks that will be applied as part of startup. If a blocks.log file exists, but does not contain blocks up to and/or after the snapshotted block then replaying from a snapshot will create an exception. Any available reversible blocks will also be applied. + +blocks.log | snapshot | action +------------------------ | --------------------------- | ------ +no blocks.log | for irreversible block 2000 | ok +contains blocks 1 - 1999 | for irreversible block 2000 | exception +contains blocks 1 - 2001 | for irreversible block 2000 | ok - will recreate from snapshot and 'play' block 2001 + +When instantiating a node from a snapshot file, it is illegal to pass in the `--genesis-json` or `--genesis-timestamp` arguments to `nodeos` as that information is loaded from the snapshot file. If a `blocks.log` file exists, the genesis information it contains will be validated against the genesis data in the snapshot. The replay will fail with an error if the genesis data is not consistent, i.e. it checks that the blocks.log file and the snapshot file are for the same blockchain. diff --git a/docs/01_nodeos/04_replays/index.md b/docs/01_nodeos/04_replays/index.md new file mode 100644 index 00000000000..340a9fa317f --- /dev/null +++ b/docs/01_nodeos/04_replays/index.md @@ -0,0 +1,52 @@ +--- +content_title: Nodeos Replays +--- + +Nodeos provides various options for replaying blockchain blocks. This can be useful if, for example, a node has downloaded a `blocks.log` file from the internet (as a faster alternative to synchronizing from the p2p network) and the node wants to use it to quickly catch up with the network, or if you want to know the chain state at specified points in a blockchain's life. + +Replaying data can be done in two ways: + +- From a **`blocks.log` file**: +The `blocks.log` file contains all irreversible transactions on the blockchain. All instances of `nodeos` write irreversible blocks to the `blocks.log` file, which is located at the `data/blocks` directory relative to the `nodeos` directory. Using a `blocks.log` file to replay will allow you to start a `nodeos` instance, which recreates the entire history of the blockchain locally, without adding unnecessary load to the network. + +- From a **snapshot file**: +Snapshot files can be created from a running `nodeos` instance. The snapshot contains the chain state for the block referenced when created. It is recommended to use snapshot files created from blocks that are irreversible. Using a snapshot file to replay allows you to quickly start a `nodeos` instance which has a full and correct chain state at a specified block number, but not a full history of transactions up to that block number. From that point on the `nodeos` instance will operate in the configured manner. + +## Replay How-Tos + +* [How To Generate a Blocks Log](how-to-generate-a-blocks.log.md) +* [How To Generate a Snapshot](how-to-generate-a-snapshot.md) +* [How To Replay from a Blocks Log](how-to-replay-from-a-blocks.log.md) +* [How to Replay from a Snapshot](how-to-replay-from-a-snapshot.md) + +## Replay Snapshot-specific Options + +Typing `$ nodeos --help` on the command line will show you all the options available for running `nodeos`. The snapshot and replay specific options are: + + - **--force-all-checks** +The node operator may not trust the source of the `blocks.log` file and may want to run `nodeos` with `--replay-blockchain --force-all-checks` the first time to make sure the blocks are good. The `--force-all-checks` flag can be passed into `nodeos` to tell it to not skip any checks during replay. + + - **--disable-replay-opts** +By default, during replay, `nodeos` does not create a stack of chain state deltas (this stack is used to enable rollback of state for reversible blocks.) This is a replay performance optimization. Using this option turns off this replay optimization and creates a stack of chain state deltas. If you are using the state history plugin you must use this option. + + - **--replay-blockchain** +This option tells `nodeos` to replay from the `blocks.log` file located in the data/blocks directory. `nodeos` will clear the chain state and replay all blocks. + + - **--hard-replay-blockchain** +This option tells `nodeos` to replay from the `blocks.log` file located in the data/blocks directory. `nodeos` makes a backup of the existing `blocks.log` file and will then clear the chain state and replay all blocks. This option assumes that the backup `blocks.log` file may contain corrupted blocks, so `nodeos` replays as many blocks as possible from the backup block log. When `nodeos` finds the first corrupted block while replying from `nodeos.log` it will synchronize the rest of the blockchain from the p2p network. + + - **--delete-all-blocks** +This tells `nodeos` to clear the local chain state and local the `blocks.log` file, If you intend to then synchronize from the p2p network you would need to provide a correct `genesis.json` file. This option is not recommended. + + - **--truncate-at-block** +Default argument (=0), only used if the given value is non-zero. +Using this option when replaying the blockchain will force the replay to stop at the specified block number. This option will only work if replaying with the `--hard-replay-blockchain` option, or, when not replaying, the `--fix-reversible-blocks` option. The local `nodeos` process will contain the chain state for that block. This option may be useful for checking blockchain state at specific points in time. It is intended for testing/validation and is not intended to be used when creating a local `nodeos` instance which is synchronized with the network. + + - **--snapshot** +Use this option to specify which snapshot file to use to recreate the chain state from a snapshot file. This option will not replay the `blocks.log` file. The `nodeos` instance will not know the full transaction history of the blockchain. + + - **--snapshots-dir** +You can use this to specify the location of the snapshot file directory (absolute path or relative to application data dir.) + + - **--blocks-dir** +You can use this to specify the location of the `blocks.log` file directory (absolute path or relative to application data dir) diff --git a/docs/01_nodeos/06_logging/00_setup-logging.json.md b/docs/01_nodeos/06_logging/00_setup-logging.json.md new file mode 100644 index 00000000000..12bc116c791 --- /dev/null +++ b/docs/01_nodeos/06_logging/00_setup-logging.json.md @@ -0,0 +1,15 @@ +--- +content_title: Setup logging.json +--- + +The `logging.json` file is usually located in the specified `--config-dir`, the same directory as the `config.ini` file. This path can be explicitly defined using the `-l` or `--logconf` options when starting `nodeos`. + +```sh +./nodeos --help + ... + Application Command Line Options: + ... + --config-dir arg Directory containing configuration files such as config.ini + -l [ --logconf ] arg (=logging.json) Logging configuration file name/path for library users + +``` diff --git a/docs/01_nodeos/06_logging/01_logging-levels.md b/docs/01_nodeos/06_logging/01_logging-levels.md new file mode 100644 index 00000000000..bb68ceeeb1d --- /dev/null +++ b/docs/01_nodeos/06_logging/01_logging-levels.md @@ -0,0 +1,66 @@ +--- +content_title: Logging Levels +--- + +There are six available logging levels: +- all +- debug +- info +- warn +- error +- off + +Sample `logging.json`: + +``` +{ + "includes": [], + "appenders": [{ + "name": "consoleout", + "type": "console", + "args": { + "stream": "std_out", + "level_colors": [{ + "level": "debug", + "color": "green" + },{ + "level": "warn", + "color": "brown" + },{ + "level": "error", + "color": "red" + } + ] + }, + "enabled": true + },{ + "name": "net", + "type": "gelf", + "args": { + "endpoint": "10.10.10.10", + "host": "test" + }, + "enabled": true + } + ], + "loggers": [{ + "name": "default", + "level": "info", + "enabled": true, + "additivity": false, + "appenders": [ + "consoleout", + "net" + ] + },{ + "name": "net_plugin_impl", + "level": "debug", + "enabled": true, + "additivity": false, + "appenders": [ + "net" + ] + } + ] +} +``` diff --git a/docs/01_nodeos/06_logging/index.md b/docs/01_nodeos/06_logging/index.md new file mode 100644 index 00000000000..8ad1157688f --- /dev/null +++ b/docs/01_nodeos/06_logging/index.md @@ -0,0 +1,108 @@ +--- +content_title: Nodeos Logging +--- + +Logging for `nodeos` is controlled by the `logging.json` file. CLI options can be passed to `nodeos` to [setup `logging.json`](00_setup-logging.json.md). The logging configuration file can be used to define [appenders](#appenders) and tie them to [loggers](#loggers) and [logging levels](01_logging-levels.md). + +## Appenders + +The logging library built into EOSIO supports two appender types: + +- [Console](#console) +- [GELF](#gelf) (Graylog Extended Log Format) + +### Console + +This will output log messages to the screen. The configuration options are: + +- `name` - arbitrary name to identify instance for use in loggers +- `type` - "console" +- `stream` - "std_out" or "std_err" +- `level_colors` - maps a log level to a colour + - level - see [logging levels](01_logging-levels.md) + - color - may be one of ("red", "green", "brown", "blue", "magenta", "cyan", "white", "console_default") +- `enabled` - bool value to enable/disable the appender. + +Example: + +```json +{ + "name": "consoleout", + "type": "console", + "args": { + "stream": "std_out", + + "level_colors": [{ + "level": "debug", + "color": "green" + },{ + "level": "warn", + "color": "brown" + },{ + "level": "error", + "color": "red" + } + ] + }, + "enabled": true +} +``` + +### GELF + +This sends the log messages to `Graylog`. `Graylog` is a fully integrated platform for collecting, indexing, and analyzing log messages. The configuration options are: + + - `name` - arbitrary name to identify instance for use in loggers + - `type` - "gelf" + - `endpoint` - ip address and port number + - `host` - Graylog hostname, identifies you to Graylog. + - `enabled` - bool value to enable/disable the appender. + +Example: + +```json +{ + "name": "net", + "type": "gelf", + "args": { + "endpoint": "104.198.210.18:12202”, + "host": + }, + "enabled": true +} +``` + +## Loggers + +The logging library built into EOSIO currently supports five loggers: + +- `default` - the default logger, always enabled. +- `net_plugin_impl` - detailed logging for the net plugin. +- `bnet_plugin` - detailed logging for the bnet plugin. +- `producer_plugin` - detailed logging for the producer plugin. +- `transaction_tracing` - detailed log that emits verdicts from relay nodes on the P2P network. + +The configuration options are: + + - `name` - must match one of the names described above. + - `level` - see logging levels below. + - `enabled` - bool value to enable/disable the logger. + - `additivity` - true or false + - `appenders` - list of appenders by name (name in the appender configuration) + +Example: + +```json +{ + "name": "net_plugin_impl", + "level": "debug", + "enabled": true, + "additivity": false, + "appenders": [ + "net" + ] +} +``` + +[[info]] +| `net_plugin_impl`, `bnet_plugin`, `producer_plugin`, `transaction_tracing` are not enabled unless explicitly enabled in the `logging.json` diff --git a/docs/01_nodeos/07_upgrade-guides/1.8-upgrade-guide.md b/docs/01_nodeos/07_upgrade-guides/1.8-upgrade-guide.md new file mode 100644 index 00000000000..f51102a26c1 --- /dev/null +++ b/docs/01_nodeos/07_upgrade-guides/1.8-upgrade-guide.md @@ -0,0 +1,132 @@ +--- +content_title: EOSIO 1.8+ Consensus Protocol Upgrade Process +--- + +This guide is intended to instruct node operators on the steps needed to successfully transition an EOSIO network through a consensus protocol upgrade (also known as a "hard fork") with minimal disruption to users. + +## Test networks + +Before deploying the upgrade to any non-test networks, protocol upgrades should be deployed and verified on test networks. The version of nodeos supporting the initial set of protocol upgrades is [v1.8.1](https://github.com/EOSIO/eos/releases/tag/v1.8.1). Existing EOSIO-based test networks can use this version of nodeos to carry out and test the upgrade process. + +This test upgrade process can give block producers of their respective EOSIO blockchain networks practice with carrying out the steps necessary to successfully coordinate the activation of the first consensus protocol upgrade feature (or just protocol feature for short), which will fork out any nodes that have not yet updated to the new version of nodeos by the time of activation. The process will also inform block producers of the requirements for nodes to upgrade nodeos to v1.8 from v1.7 and earlier, and it can help them decide an appropriate deadline to be given as notice to the community for when the first protocol feature will be activated. + +Testing the upgrade process on test networks will also allow block explorers and other applications interacting with the blockchain to test the transition and the behavior of their applications under the new rules after activation of the individual protocol features. Some of the protocol features (`PREACTIVATE_FEATURE` and `NO_DUPLICATE_DEFERRED_ID` as examples) make slight changes to the block and transaction data structures, and therefore force applications that are reliant on the old structure to migrate. One of the protocol features (`RESTRICT_ACTION_TO_SELF`) restricts an existing authorization bypass (which has been deprecated since the v1.5.1 release of EOSIO) and could potentially break smart contracts that continue to rely on that authorization bypass. + +## Upgrade process for all EOSIO networks (including test networks) + +Because these steps require replay from genesis, after the release of [v1.8.1](https://github.com/EOSIO/eos/releases/tag/v1.8.1) of nodeos which supports the initial set of consensus protocol upgrades, all node operators should take the following steps as soon as possible. These steps should be followed on an additional node that they can afford to be taken offline for an extended period of time: + +1. Ensure that their existing node is running the most recent stable release (1.7) of nodeos and then shut down nodeos. +2. Make a backup and delete the `blocks/reversible` directory, `state-history` directory, and `state` directory within the data directory. +3. Replace their old version of nodeos with the new release. +4. Start the new 1.8 release of nodeos and let it complete replay from genesis and catch up with syncing with the network. The node should receive blocks and LIB should advance. Nodes running v1.8 and v1.7 will continue to coexist in the same network prior to the activation of the first protocol upgrade feature. + +A replay from genesis is required when upgrading nodeos from v1.7 to v1.8. Afterward, the v1.8 node can, as usual, start and stop quickly without requiring replays. The state directory generated by a v1.7 node will not be compatible with v1.8 of nodeos. Version 1 portable snapshots (generated by v1.7) will not be compatible with v1.8 which require the version 2 portable snapshots. + +Due to the long amount of time it will take to replay from genesis (even longer if running with plugins that track history), block producers of the network are suggested to provide sufficient time to the community to upgrade their nodes prior to activating the first protocol upgrade feature. + +Nodes that wish to make the transition but are not interested in tracking the history of the chain from genesis have an option to speed things up by using a version 2 portable snapshots that can be generated by synced v1.8 nodes. Since the portable snapshots are generated in a deterministic and portable manner, users can simply compare the hash of the snapshot files they downloaded from an arbitrary source to the hashes published by a variety of trusted sources, but only if they correspond to snapshots taken at the same block ID. + +### Special notes to block producers + +Block producers will obviously need to run the replay of nodeos on a separate machine that is not producing blocks. This machine will have to be production ready so that they can switch block production over to it when it has finished replaying and syncing. Alternatively, they can take a portable snapshot on the replay machine and move it to yet another machine which is production ready, then activate the switch over from their currently producing v1.7 BP node to the v1.8 node. + +Nearly all of the protocol upgrade features introduced in v1.8 first require a special protocol feature (codename `PREACTIVATE_FEATURE`) to be activated and for an updated version of the system contract that utilizes the functionality introduced by that feature to be deployed. Block producers should be aware that as soon as the `PREACTIVATE_FEATURE` protocol feature is activated by the BPs, all nodes still on v1.7 will be unable to continue syncing normally and their last irreversible block will stop advancing. For this reason, it is important to coordinate when the activation happens and announce the expected activation date with sufficient time provided to the community to upgrade their nodes in time. + +After activation of the `PREACTIVATE_FEATURE` and deployment of the updated system contract, block producers will be able to more easily coordinate activation of further protocol features. For the remaining protocol features in the v1.8 release, they can activate the features at any time and no preparation time needs to be given to the community since anyone synced up with the blockchain at that time will necessarily be on a version of nodeos that is at least v1.8 and therefore will support the entire initial set of protocol features. Furthermore, due to the `PREACTIVATE_FEATURE` protocol feature, they can activate the other remaining protocol features with an `eosio.msig` proposed transaction using the `activate` action in the new system contract and no replay is required. + +The activation of the first protocol feature, `PREACTIVATE_FEATURE`, however cannot be done with an `eosio.msig` proposed transaction. It will require more coordination and manual action by the block producers. First, block producers should come to an agreement on the earliest time that they are willing to activate the first protocol feature. + +The BPs should then set this chosen time in the configuration JSON file for the `PREACTIVATE_FEATURE` protocol upgrade of their v1.8 node. Specifically, they should modify the value for the `earliest_allowed_activation_time` field in the `protocol_features/BUILTIN-PREACTIVATE_FEATURE.json` file located in the config directory. + +It is important that this configuration change happens prior to allowing that node to produce blocks on the network. As long as more than two-thirds of the active block producers have set the same future time in the configuration file for the `PREACTIVATE_FEATURE` on their BP nodes, the network will be safe from any attempts at premature activation by some other active BP. + +After the agreed upon time has passed, any of the active block producers can activate the `PREACTIVATE_FEATURE` protocol feature with a simple request sent to the [`producer_api_plugin`](../03_plugins/producer_api_plugin/index.md) of their BP node. + +To determine the specific format of the request, the digest of the `PREACTIVATE_FEATURE` protocol feature must first be determined. This can be found by looking at nodeos startup logs, or by sending a request to the `get_supported_protocol_features` endpoint provided by the [`producer_api_plugin`](../03_plugins/producer_api_plugin/index.md). + +Send a request to the endpoint locally: + +``` +curl -X POST http://127.0.0.1:8888/v1/producer/get_supported_protocol_features -d '{}' | jq +``` + +In the returned array, find an object that references the `PREACTIVATE_FEATURE` codename, for example: + +``` +... +{ + "feature_digest": "0ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd", + "subjective_restrictions": { + "enabled": true, + "preactivation_required": false, + "earliest_allowed_activation_time": "1970-01-01T00:00:00.000" + }, + "description_digest": "64fe7df32e9b86be2b296b3f81dfd527f84e82b98e363bc97e40bc7a83733310", + "dependencies": [], + "protocol_feature_type": "builtin", + "specification": [ + { + "name": "builtin_feature_codename", + "value": "PREACTIVATE_FEATURE" + } + ] +}, +... +``` + +In this case, the digest of the `PREACTIVATE_FEATURE` protocol feature is `0ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd` (note that the values may be different depending on the local changes made to the configuration of the protocol features that are specific to the blockchain network). + +Then, the local block producing nodeos instance can be requested to activate the `PREACTIVATE_FEATURE` protocol at its earliest opportunity (i.e. the next time that node produces a block) using the following command: + +``` +curl -X POST http://127.0.0.1:8888/v1/producer/schedule_protocol_feature_activations -d '{"protocol_features_to_activate": ["0ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd"]}' | jq +``` + +The above command should only be used after the time has passed the agreed upon `earliest_allowed_activation_time` for the `PREACTIVATE_FEATURE` protocol feature. + +Any synced v1.8.x nodes can be used to check which protocol features have been activated using the following command: + +``` +curl -X POST http://127.0.0.1:8888/v1/chain/get_activated_protocol_features -d '{}' | jq +``` + +For example, if the `PREACTIVATE_FEATURE` protocol feature is activated, that command may return a result such as (specific values, especially the `activation_block_num`, may vary): + +``` +{ + "activated_protocol_features": [ + { + "feature_digest": "0ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd", + "activation_ordinal": 0, + "activation_block_num": 348, + "description_digest": "64fe7df32e9b86be2b296b3f81dfd527f84e82b98e363bc97e40bc7a83733310", + "dependencies": [], + "protocol_feature_type": "builtin", + "specification": [ + { + "name": "builtin_feature_codename", + "value": "PREACTIVATE_FEATURE" + } + ] + } + ] +} +``` + +Once the `PREACTIVATE_FEATURE` protocol feature has been activated, the [new system contract](https://github.com/EOSIO/eosio.contracts/releases/tag/v1.7.0) with the `activate` action can be deployed. + +## Notes for block explorers, exchanges, and applications + +Block explorers, exchanges, and applications building on the blockchain can all follow the four-step processes described above to upgrade their nodes in time and ensure their services continue when the first protocol upgrade is activated. However, they should also be aware that certain protocol features change the behavior of existing operations on the blockchain, and in some cases also slightly change the structure of blocks and transactions. + + +**First**, v1.8 changes the structure of transaction traces, even prior to the activation of any protocol features. Clients consuming transaction and action traces made available through [`history_plugin`](../03_plugins/history_plugin/index.md), [`mongo_db_plugin`](../03_plugins/mongo_db_plugin/index.md), or [`state_history_plugin`](../03_plugins/state_history_plugin/index.md) should be aware of the changes made to the trace structure (see details at [#7044](https://github.com/EOSIO/eos/pull/7044) and [#7108](https://github.com/EOSIO/eos/pull/7108)). Clients consuming the trace output of the `push_transaction` RPC from the chain API should not need to do anything since the output of that RPC should be backwards compatible. However, they are encouraged to replace usage of `push_transaction` with the new RPC [`send_transaction`](https://developers.eos.io/eosio-nodeos/reference#send_transaction) which uses the new flat structure to store the action traces. + +The [`state_history_plugin`](../03_plugins/state_history_plugin/index.md) has also changed its API and the structure of the files it stores on disk in a backwards incompatible way in v1.8. These changes reflect, among other things, the transaction trace structural changes and the data structure changes made within the chain state database to support the new protocol features. Consumers of the [`state_history_plugin`](../03_plugins/state_history_plugin/index.md) will need to be updated to work with the new changes in v1.8. + +**Second**, all protocol features are activated by signaling their 256-bit digest through a block. The block producer is able to place the digest of a protocol feature in a special section of the block header (called the block header extensions) that, under the original rules of v1.7, is expected to be empty. This change may especially be relevant to block explorers which need to ensure that their tools will not break because of the extra data included in the block header and ideally will update their block explorers to reflect the new information. The first time block explorers or other consumers of the blockchain data will encounter a non-empty block header extension is during the activation of the `PREACTIVATE_FEATURE` protocol feature. + +**Third**, upon activation of the `NO_DUPLICATE_DEFERRED_ID` protocol feature, contract-generated deferred transactions will include a non-empty `transaction_extensions` field. While block explorers may be interested in exposing the contents of this field in a user-friendly way, clients are free to ignore it. However, for code dealing with the binary serialized form of these transactions directly, they must be capable of successfully deserializing the transaction with the extension data present. Note that this also applies to smart contract code that may be reading the deferred transaction that caused it to execute, whether it is because it is executing an action within the deferred transaction or executing the `eosio::onerror` notification handler of the contract that sent the (failed) deferred transaction. + +**Fourth**, activation of the `RESTRICT_ACTION_TO_SELF` protocol feature will remove the authorization bypass that is available when a contract sends an inline action to itself (this authorization bypass was deprecated in the v1.5.1 release of EOSIO). Smart contract developers should ensure their contracts do not rely on this authorization bypass prior to the time the block producers activate the `RESTRICT_ACTION_TO_SELF` protocol feature, otherwise, their contracts may stop functioning correctly. diff --git a/docs/01_nodeos/07_upgrade-guides/index.md b/docs/01_nodeos/07_upgrade-guides/index.md new file mode 100644 index 00000000000..a1d178dd8fb --- /dev/null +++ b/docs/01_nodeos/07_upgrade-guides/index.md @@ -0,0 +1,7 @@ +--- +content_title: Nodeos Upgrade Guides +--- + +This section contains important instructions for node operators and other EOSIO stakeholders to transition an EOSIO network successfully through an EOSIO version or protocol upgrade. + +* [1.8 Upgrade Guide](1.8-upgrade-guide.md) diff --git a/docs/01_nodeos/08_troubleshooting/index.md b/docs/01_nodeos/08_troubleshooting/index.md new file mode 100644 index 00000000000..c991d5b9812 --- /dev/null +++ b/docs/01_nodeos/08_troubleshooting/index.md @@ -0,0 +1,45 @@ +--- +content_title: Nodeos Troubleshooting +--- + +### "Database dirty flag set (likely due to unclean shutdown): replay required" + +`nodeos` needs to be shut down cleanly. To ensure this is done, send a `SIGTERM`, `SIGQUIT` or `SIGINT` and wait for the process to shutdown. Failing to do this will result in this error. If you get this error, you're only recourse is to replay by starting `nodeos` with `--replay-blockchain` + +### "Memory does not match data" Error at Restart + +If you get an error such as `St9exception: content of memory does not match data expected by executable` when trying to start `nodeos`, try restarting `nodeos` with one of the following options (you can use `nodeos --help` to get a full listing of these). + +``` +Command Line Options for eosio::chain_plugin: + --fix-reversible-blocks recovers reversible block database if + that database is in a bad state + --force-all-checks do not skip any checks that can be + skipped while replaying irreversible + blocks + --replay-blockchain clear chain state database and replay + all blocks + --hard-replay-blockchain clear chain state database, recover as + many blocks as possible from the block + log, and then replay those blocks + --delete-all-blocks clear chain state database and block + log +``` + +### "Could not grow database file to requested size." Error + +Start `nodeos` with `--shared-memory-size-mb 1024`. A 1 GB shared memory file allows approximately half a million transactions. + +### What version of EOSIO am I running/connecting to? + +If defaults can be used, then `cleos get info` will output a block that contains a field called `server_version`. If your `nodeos` is not using the defaults, then you need to know the URL of the `nodeos`. In that case, use the following with your `nodeos` URL: + +```sh +$ cleos --url http://localhost:8888 get info +``` + +To focus only on the version line within the block: + +```sh +$ cleos --url http://localhost:8888 get info | grep server_version +``` diff --git a/docs/01_nodeos/index.md b/docs/01_nodeos/index.md new file mode 100644 index 00000000000..030ffc71ca5 --- /dev/null +++ b/docs/01_nodeos/index.md @@ -0,0 +1,25 @@ +--- +content_title: Nodeos +--- + +## Introduction + +`nodeos` is the core service daemon that runs on every EOSIO node. It can be configured to process smart contracts, validate transactions, produce blocks containing valid transactions, and confirm blocks to record them on the blockchain. + +## Installation + +`nodeos` is distributed as part of the [EOSIO software suite](https://github.com/EOSIO/eos/blob/master/README.md). To install `nodeos`, visit the [EOSIO Software Installation](../00_install/index.md) section. + +## Explore + +Navigate the sections below to configure and use `nodeos`. + +* [Usage](02_usage/index.md) - Configuring and using `nodeos`, node setups/environments. +* [Plugins](03_plugins/index.md) - Using plugins, plugin options, mandatory vs. optional. +* [Replays](04_replays/index.md) - Replaying the chain from a snapshot or a blocks.log file. +* [Logging](06_logging/index.md) - Logging config/usage, loggers, appenders, logging levels. +* [Upgrade Guides](07_upgrade-guides/index.md) - EOSIO version/consensus upgrade guides. +* [Troubleshooting](08_troubleshooting/index.md) - Common `nodeos` troubleshooting questions. + +[[info | Access Node]] +| A local or remote EOSIO access node running `nodeos` is required for a client application or smart contract to interact with the blockchain. diff --git a/docs/02_cleos/02_how-to-guides/how-to-buy-ram.md b/docs/02_cleos/02_how-to-guides/how-to-buy-ram.md new file mode 100644 index 00000000000..9c9942c05f6 --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-buy-ram.md @@ -0,0 +1,23 @@ +## Goal + +Setup an account that require multiple signatures for signning a transaction + +## Before you begin + +* You have an account + +* Ensure the reference system contracts from `eosio.contracts` repository is deployed and used to manage system resources + +* You have sufficient token allocated to your account + +* Install the currently supported version of cleos + +* Unlock your wallet + +## Steps + +Buys RAM in value of 0.1 SYS tokens for account `alice`: + +```shell +cleos system buyram alice alice "0.1 SYS" -p alice@active +``` \ No newline at end of file diff --git a/docs/02_cleos/02_how-to-guides/how-to-config-a-multisig-account.md b/docs/02_cleos/02_how-to-guides/how-to-config-a-multisig-account.md new file mode 100644 index 00000000000..f587d484bd2 --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-config-a-multisig-account.md @@ -0,0 +1,22 @@ +## Goal + +Setup an account that require multiple signatures for signning a transaction + +## Before you begin + +* You have an account + +* You have enough resoruces allocated to execute the transaction + +* Install the currently supported version of cleos + +* Understand the following: + * What is an account + * What is a transaction + + +## Steps + +```shell +cleos set account permission multisig active '{\"threshold\" : 1, \"accounts\" :[{\"permission\":{\"actor\":\"eosio\",\"permission\":\"active\"},\"weight\":1},{\"permission\":{\"actor\":\"customera\",\"permission\":\"active\"},\"weight\":1}]}' owner -p multisig@owner" +``` \ No newline at end of file diff --git a/docs/02_cleos/02_how-to-guides/how-to-connect-a-specific-network.md b/docs/02_cleos/02_how-to-guides/how-to-connect-a-specific-network.md new file mode 100644 index 00000000000..d5478ef9669 --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-connect-a-specific-network.md @@ -0,0 +1,25 @@ +## Goal + +Connect to a specific `nodeos` or `keosd` host + +`cleos` and `keosd` can connecte to a specific node by using the `-H, --host` and `-p, --port` optional arguments. + +[[info]] +| If no optional arguments are used (i.e. -H and -p), `cleos` automatically tries to connect to a locally running `nodeos` and `keosd` node on the default port + +## Before you begin + +* Install the currently supported version of cleos + +## Steps +### Connecting to Nodeos + +```bash + cleos -url http://nodeos-host:8888 ${subcommand} +``` + +### Connecting to Keosd + +```bash + cleos --wallet-url http://keosd-host:8888 ${subcommand} +``` diff --git a/docs/02_cleos/02_how-to-guides/how-to-create-a-wallet.md b/docs/02_cleos/02_how-to-guides/how-to-create-a-wallet.md new file mode 100644 index 00000000000..90044ccbd4c --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-create-a-wallet.md @@ -0,0 +1,43 @@ +## Goal + +Create a keosd wallet + +## Before you begin + +* Install the currently supported version of cleos + +* Understand the following: + * What is an account + * What is a public and private key pair + +## Steps + +Create a wallet and save the password to a file: + +```shell +cleos wallet create --file password.pwd +``` + +You should see something like below. Note here the wallet is named as `default` + +```shell +Creating wallet: default +Save password to use in the future to unlock this wallet. +Without password imported keys will not be retrievable. +saving password to password.pwd +``` + +Alternatively, you can name a wallet with `-n` option: + +```shell +cleos wallet create -n named_wallet -f passwd +``` + +You will see something like below: + +```shell +Creating wallet: named_wallet +Save password to use in the future to unlock this wallet. +Without password imported keys will not be retrievable. +saving password to passwd +``` diff --git a/docs/02_cleos/02_how-to-guides/how-to-create-an-account.md b/docs/02_cleos/02_how-to-guides/how-to-create-an-account.md new file mode 100644 index 00000000000..398ceb7c6e2 --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-create-an-account.md @@ -0,0 +1,21 @@ +## Goal + +## Before you begin + +* Install the currently supported version of cleos + +* Understand the following: + * What is an account + * What is a public and private key pair + +* Created an Owner and an Active key pair +* Imported a key pair which can authorize on behalf of a creator account + +## Steps + +```shell +cleos create account creator name OwnerKey [ActiveKey] +``` + +[[info]] +| ActiveKey is optional but recommanded to provide. diff --git a/docs/02_cleos/02_how-to-guides/how-to-create-key-pairs.md b/docs/02_cleos/02_how-to-guides/how-to-create-key-pairs.md new file mode 100644 index 00000000000..a6e09b44c86 --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-create-key-pairs.md @@ -0,0 +1,22 @@ +## Goal + +## Before you begin + +* Install the currently supported version of cleos + +* Understand the following: + * What is a public and private key pair + +## Steps + +To output the key pair to the console + +```shell +cleos create key --to-console +``` + +To save the key pair to file + +```shell +cleos create key --file FILE_TO_SAVEKEY +``` \ No newline at end of file diff --git a/docs/02_cleos/02_how-to-guides/how-to-delegate-CPU-resource.md b/docs/02_cleos/02_how-to-guides/how-to-delegate-CPU-resource.md new file mode 100644 index 00000000000..c18835426bc --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-delegate-CPU-resource.md @@ -0,0 +1,34 @@ +## Goal + +Delegate resource for an account or application + +## Before you begin + +* Install the currently supported version of cleos + +* Ensure the reference system contracts from `eosio.contracts` repository is deployed and used to manage system resources + +* Understand the following: + * What is an account + * What is network bandwidth + * What is CPU bandwidth + + + +## Steps + +Delegate 0.01 SYS CPU bandwidth from `bob` to `alice` + +```shell +cleos system delegatebw bob alice "0.01 SYS" "0 SYS" +``` + +You should see something below: + +```shell +executed transaction: 5487afafd67bf459a20fcc2dbc5d0c2f0d1f10e33123eaaa07088046fd18e3ae 192 bytes 503 us +# eosio <= eosio::delegatebw {"from":"bob","receiver":"alice","stake_net_quantity":"0.0000 SYS","stake_cpu_quanti... +# eosio.token <= eosio.token::transfer {"from":"bob","to":"eosio.stake","quantity":"0.0010 EOS","memo":"stake bandwidth"} +# bob <= eosio.token::transfer {"from":"bob","to":"eosio.stake","quantity":"0.0010 SYS","memo":"stake bandwidth"} +# eosio.stake <= eosio.token::transfer {"from":"bob","to":"eosio.stake","quantity":"0.0010 SYS","memo":"stake bandwidth"} +``` diff --git a/docs/02_cleos/02_how-to-guides/how-to-delegate-net-resource.md b/docs/02_cleos/02_how-to-guides/how-to-delegate-net-resource.md new file mode 100644 index 00000000000..42f2f2017f0 --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-delegate-net-resource.md @@ -0,0 +1,34 @@ +## Goal + +Delegate resource for an account or application + +## Before you begin + +* Install the currently supported version of cleos + +* Ensure the reference system contracts from `eosio.contracts` repository is deployed and used to manage system resources + +* Understand the following: + * What is an account + * What is network bandwidth + * What is CPU bandwidth + + + +## Steps + +Delegate 0.01 SYS network bandwidth from `bob` to `alice` + +```shell +cleos system delegatebw bob alice "0 SYS" "0.01 SYS" +``` + +You should see something below: + +```shell +executed transaction: 5487afafd67bf459a20fcc2dbc5d0c2f0d1f10e33123eaaa07088046fd18e3ae 192 bytes 503 us +# eosio <= eosio::delegatebw {"from":"bob","receiver":"alice","stake_net_quantity":"0.0100 SYS","stake_cpu_quanti... +# eosio.token <= eosio.token::transfer {"from":"bob","to":"eosio.stake","quantity":"0.0010 SYS","memo":"stake bandwidth"} +# bob <= eosio.token::transfer {"from":"bob","to":"eosio.stake","quantity":"0.0010 SYS","memo":"stake bandwidth"} +# eosio.stake <= eosio.token::transfer {"from":"bob","to":"eosio.stake","quantity":"0.0010 SYS","memo":"stake bandwidth"} +``` diff --git a/docs/02_cleos/02_how-to-guides/how-to-deploy-a-smart-contract.md b/docs/02_cleos/02_how-to-guides/how-to-deploy-a-smart-contract.md new file mode 100644 index 00000000000..355d671ea6e --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-deploy-a-smart-contract.md @@ -0,0 +1,19 @@ +## Goal + +Deploy an EOSIO contract + +## Before you begin + +* Install the currently supported version of cleos + +* Unlock the wallet which contains the private key of the contract account + +## Steps + +Execute: + +```shell +cleos set contract contract_account contract_directory_path +``` + +Replace the `contract_directory_path` with the path points to your contract folder diff --git a/docs/02_cleos/02_how-to-guides/how-to-get-account-information.md b/docs/02_cleos/02_how-to-guides/how-to-get-account-information.md new file mode 100644 index 00000000000..039e94fbaf8 --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-get-account-information.md @@ -0,0 +1,43 @@ +## Goal + +Query infomation of an EOSIO account + +## Before you begin + +* Install the currently supported version of cleos + +* Understand the following: + * What is an account + +## Steps + +Execute the command below: + +```shell +cleos get account ACCOUNT_NAME +``` + +You should be see something like below: + +```shell +created: 2018-06-01T12:00:00.000 +privileged: true +permissions: + owner 1: 1 EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV + active 1: 1 EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV +memory: + quota: unlimited used: 3.004 KiB + +net bandwidth: + used: unlimited + available: unlimited + limit: unlimited + +cpu bandwidth: + used: unlimited + available: unlimited + limit: unlimited +``` + +[[info]] +| Depends on the EOSIO network you are connected, you might see different fields associated with an account. That depends on which system contract has been deployed tp the network. \ No newline at end of file diff --git a/docs/02_cleos/02_how-to-guides/how-to-get-block-information.md b/docs/02_cleos/02_how-to-guides/how-to-get-block-information.md new file mode 100644 index 00000000000..1010f807b27 --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-get-block-information.md @@ -0,0 +1,18 @@ +## Goal + +Query infomation of a block + +## Before you begin + +* Install the currently supported version of cleos + +* Understand the following: + * What is a block + +## Steps + +Execute the command below: + +```shell +cleos get block BLOCK_NUMBER +``` \ No newline at end of file diff --git a/docs/02_cleos/02_how-to-guides/how-to-get-tables-information.md b/docs/02_cleos/02_how-to-guides/how-to-get-tables-information.md new file mode 100644 index 00000000000..7686ac5a8c6 --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-get-tables-information.md @@ -0,0 +1,18 @@ +## Goal + +Query infomation of a table + +## Before you begin + +* Install the currently supported version of cleos + +* Understand the following: + * What is an account + * What is a table + * What is a scope of table + +## Steps + +```shell +cleos get table ACCOUNT SCOPE TABLE +``` \ No newline at end of file diff --git a/docs/02_cleos/02_how-to-guides/how-to-get-transaction-information.md b/docs/02_cleos/02_how-to-guides/how-to-get-transaction-information.md new file mode 100644 index 00000000000..0ab255fe9c8 --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-get-transaction-information.md @@ -0,0 +1,20 @@ +## Goal + +Query infomation of a transaction + +## Before you begin + +* Install the currently supported version of cleos + +* Understand the following: + + * What is a transaction + +## Steps + +```shell +cleos get transaction id +``` + +[[info]] +| Beware that you need to connect a nodeos that enable history API plugin to query transaction information \ No newline at end of file diff --git a/docs/02_cleos/02_how-to-guides/how-to-import-a-key.md b/docs/02_cleos/02_how-to-guides/how-to-import-a-key.md new file mode 100644 index 00000000000..6e1faf530a7 --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-import-a-key.md @@ -0,0 +1,22 @@ +## Goal + +Import an key pair + +## Before you begin + +* Install the currently supported version of cleos + +* Understand the following: + * What is a public and private key pair + +## Steps + +```bash +cleos wallet import +``` + +Type your private key. You should see something like below: + +```shell +private key: imported private key for: EOS8FBXJUfbANf3xeDWPoJxnip3Ych9HjzLBr1VaXRQFdkVAxwLE7 +``` \ No newline at end of file diff --git a/docs/02_cleos/02_how-to-guides/how-to-link-permission.md b/docs/02_cleos/02_how-to-guides/how-to-link-permission.md new file mode 100644 index 00000000000..57ecab69046 --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-link-permission.md @@ -0,0 +1,20 @@ +## Goal + +Link a permission to an action of a contract + +## Before you begin + +* Install the currently supported version of cleos + +* Understand the following: + * What is an account + * What is permission level + * What is an action + +## Steps + +Link a permission level `permlvl` to the action `transfer` of contract `hodlcontract` + +```shell +cleos set action permission alice hodlcontract transfer permlvl +``` \ No newline at end of file diff --git a/docs/02_cleos/02_how-to-guides/how-to-list-all-key-pair.md b/docs/02_cleos/02_how-to-guides/how-to-list-all-key-pair.md new file mode 100644 index 00000000000..576e4935bd4 --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-list-all-key-pair.md @@ -0,0 +1,34 @@ +## Goal + +List all key pairs + +## Before you begin + +* Install the currently supported version of cleos + +* Understand the following: + * What is a public and private key pair + +## Steps + +Unlock your wallet + +```bash +cleos wallet unlock +``` + +List all public keys: + +```bash +cleos wallet keys +``` + +List all private keys: + +```bash +cleos wallet private_keys + +``` + +[[warning]] +| Be careful never real your private keys in a production enviroment diff --git a/docs/02_cleos/02_how-to-guides/how-to-stake-resource.md b/docs/02_cleos/02_how-to-guides/how-to-stake-resource.md new file mode 100644 index 00000000000..83f3a8da756 --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-stake-resource.md @@ -0,0 +1,28 @@ +## Goal + +Stake resource for your account + +## Before you begin + +* Install the currently supported version of cleos + +* Ensure the reference system contracts from `eosio.contracts` repository is deployed and used to manage system resources + +* Understand the following: + * What is an account + * What is network bandwidth + * What is CPU bandwidth + +## Steps + +Stake 0.01 SYS network bandwidth for `alice` + +```shell +cleos system delegatebw alice alice "0 SYS" "0.01 SYS" +``` + +Stake 0.01 SYS CPU bandwidth for `alice`: + +```shell +cleos system delegatebw alice alice "0.01 SYS" "0 SYS" +``` \ No newline at end of file diff --git a/docs/02_cleos/02_how-to-guides/how-to-submit-a-transaction.md b/docs/02_cleos/02_how-to-guides/how-to-submit-a-transaction.md new file mode 100644 index 00000000000..419876d805f --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-submit-a-transaction.md @@ -0,0 +1,91 @@ +## Goal + +Push a transaction + +## Before you begin + +* Install the currently supported version of cleos + +* Understand the following: + * What is a transaction + * How to generate a valid transaction JSON + +## Steps + +* Create a JSON snippet contains a valid transaction such as the following: + +```JSON +{ + "expiration": "2019-08-01T07:15:49", + "ref_block_num": 34881, + "ref_block_prefix": 2972818865, + "max_net_usage_words": 0, + "max_cpu_usage_ms": 0, + "delay_sec": 0, + "context_free_actions": [], + "actions": [{ + "account": "eosio.token", + "name": "transfer", + "authorization": [{ + "actor": "han", + "permission": "active" + } + ], + "data": "000000000000a6690000000000ea305501000000000000000453595300000000016d" + } + ], + "transaction_extensions": [], + "context_free_data": [] +} +``` + +* You can also create a JSON snippet that uses clear text JSON for `data` field. + +[[info]] +| Be aware that if a clear text `data` field is used, cleos need to fetch copies of required ABIs using `nodeos` API. That operation has a performance overhead on `nodeos` + +```JSON +{ + "expiration": "2019-08-01T07:15:49", + "ref_block_num": 34881, + "ref_block_prefix": 2972818865, + "max_net_usage_words": 0, + "max_cpu_usage_ms": 0, + "delay_sec": 0, + "context_free_actions": [], + "actions": [{ + "account": "eosio.token", + "name": "transfer", + "authorization": [{ + "actor": "han", + "permission": "active" + } + ], + "data": { + "from": "han", + "to": "eosio", + "quantity": "0.0001 SYS", + "memo": "m" + } + } + ], + "transaction_extensions": [], + "context_free_data": [] +} +``` + +* Execute the following command: + +```shell +cleos push transaction TRX_FILE.json +``` + +* Submit a transction from a JSON: + +```shell +cleos push transaction JSON +``` + + \ No newline at end of file diff --git a/docs/02_cleos/02_how-to-guides/how-to-transfer-an-eosio.token-token.md b/docs/02_cleos/02_how-to-guides/how-to-transfer-an-eosio.token-token.md new file mode 100644 index 00000000000..794621a445a --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-transfer-an-eosio.token-token.md @@ -0,0 +1,21 @@ +## Goal + +Transfer token created by eosio.token contract + +## Before you begin + +* Install the currently supported version of cleos + +* You are going to transfer a token created by eosio.token contract and eosio.token contract has been deployed on the network which you are connected to + +* Understand the following: + * What is a transaction + * Token transfers are irrevertable + +## Steps + +Assume you would like to transfer `0.0001 SYS` token to an account called `bob` from an account called `alice`, execute the following: + +```shell +cleos transfer alice bob "0.0001 SYS" "Hodl!" -p alice@active +``` diff --git a/docs/02_cleos/02_how-to-guides/how-to-undelegate-CPU.md b/docs/02_cleos/02_how-to-guides/how-to-undelegate-CPU.md new file mode 100644 index 00000000000..dc9ca010ab5 --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-undelegate-CPU.md @@ -0,0 +1,34 @@ +## Goal + +Undelegate resource for an account or application + +Beware that only the account which originally delegated resource can undelegate + +## Before you begin + +* Install the currently supported version of cleos + +* Ensure the reference system contracts from `eosio.contracts` repository is deployed and used to manage system resources + +* Understand the following: + * What is an account + * What is network bandwidth + * What is CPU bandwidth + +## Steps + +Undelegate 0.01 SYS CPU bandwidth form from account `alice` back to account `bob`: + +```shell +cleos system undelegatebw bob alice "0 SYS" "0.01 SYS" +``` + +You should see something below: + +```shell + +node1:~ han$ cleos system delegatebw bob alice "0 SYS" "0.01 SYS" +executed transaction: e7e7edb6c5556de933f9d663fea8b4a9cd56ece6ff2cebf056ddd0835efa6606 184 bytes 452 us +# eosio <= eosio::undelegatebw {"from":"alice","receiver":"bob","unstake_net_quantity":"0.0000 EOS","unstake_cpu_qu... +warning: transaction executed locally, but may not be confirmed by the network yet ] +``` \ No newline at end of file diff --git a/docs/02_cleos/02_how-to-guides/how-to-undelegate-NET.md b/docs/02_cleos/02_how-to-guides/how-to-undelegate-NET.md new file mode 100644 index 00000000000..09996586b0d --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-undelegate-NET.md @@ -0,0 +1,34 @@ +## Goal + +Undelegate resource for an account or application + +Beware that only the account which originally delegated resource can undelegate + +## Before you begin + +* Install the currently supported version of cleos + +* Ensure the reference system contracts from `eosio.contracts` repository is deployed and used to manage system resources + +* Understand the following: + * What is an account + * What is network bandwidth + * What is CPU bandwidth + +## Steps + +Undelegate 0.01 SYS network bandwidth from account `alice` back to account `bob`: + +```shell +cleos system undelegatebw bob alice "0 SYS" "0.01 SYS" +``` + +You should see something below: + +```shell + +node1:~ han$ cleos system delegatebw bob alice "0 SYS" "0.01 SYS" +executed transaction: e7e7edb6c5556de933f9d663fea8b4a9cd56ece6ff2cebf056ddd0835efa6606 184 bytes 452 us +# eosio <= eosio::undelegatebw {"from":"alice","receiver":"bob","unstake_net_quantity":"0.01 EOS","unstake_cpu_qu... +warning: transaction executed locally, but may not be confirmed by the network yet ] +``` \ No newline at end of file diff --git a/docs/02_cleos/02_how-to-guides/how-to-unlink-permission.md b/docs/02_cleos/02_how-to-guides/how-to-unlink-permission.md new file mode 100644 index 00000000000..a7dff1fb9b0 --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-unlink-permission.md @@ -0,0 +1,20 @@ +## Goal + +Unlink a linked permission level + +## Before you begin + +* Install the current supported version of cleos + +* Understand the following: + * What is an account + * What is permission level + * What is an action + +## Steps + +Remove a linked permission level from an action `transfer` of contract `hodlcontract` + +```shell +cleos set action permission alice hodlcontract transfer NULL +``` \ No newline at end of file diff --git a/docs/02_cleos/02_how-to-guides/how-to-unstake-CPU.md b/docs/02_cleos/02_how-to-guides/how-to-unstake-CPU.md new file mode 100644 index 00000000000..b824a244dfe --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-unstake-CPU.md @@ -0,0 +1,32 @@ +## Goal + +Unstake resource from your account + +Beware that only the account which originally delegated resource can undelegate + +## Before you begin + +* Install the currently supported version of cleos + +* Ensure the reference system contracts from `eosio.contracts` repository is deployed and used to manage system resources + +* Understand the following: + * What is an account + * What is network bandwidth + * What is CPU bandwidth + +## Steps + +Unstake 0.01 SYS CPU bandwidth form account `alice`: + +```shell +cleos system undelegatebw alice alice "0.01 SYS" "0 SYS" +``` + +You should see something below: + +```shell +executed transaction: e7e7edb6c5556de933f9d663fea8b4a9cd56ece6ff2cebf056ddd0835efa6606 184 bytes 452 us +# eosio <= eosio::undelegatebw {"from":"alice","receiver":"alice","unstake_net_quantity":"0.0000 EOS","unstake_cpu_qu... +warning: transaction executed locally, but may not be confirmed by the network yet ] +``` \ No newline at end of file diff --git a/docs/02_cleos/02_how-to-guides/how-to-unstake-NET.md b/docs/02_cleos/02_how-to-guides/how-to-unstake-NET.md new file mode 100644 index 00000000000..0d8c698d353 --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-unstake-NET.md @@ -0,0 +1,32 @@ +## Goal + +Unstake resource from your account + +Beware that only the account which originally delegated resource can undelegate + +## Before you begin + +* Install the currently supported version of cleos + +* Ensure the reference system contracts from `eosio.contracts` repository is deployed and used to manage system resources + +* Understand the following: + * What is an account + * What is network bandwidth + * What is CPU bandwidth + +## Steps + +Unstake 0.01 SYS network bandwidth from account `alice`: + +```shell +cleos system undelegatebw alice alice "0 SYS" "0.01 SYS" +``` + +You should see something below: + +```shell +executed transaction: e7e7edb6c5556de933f9d663fea8b4a9cd56ece6ff2cebf056ddd0835efa6606 184 bytes 452 us +# eosio <= eosio::undelegatebw {"from":"alice","receiver":"alice","unstake_net_quantity":"0.01 EOS","unstake_cpu_qu... +warning: transaction executed locally, but may not be confirmed by the network yet ] +``` \ No newline at end of file diff --git a/docs/02_cleos/02_how-to-guides/how-to-vote.md b/docs/02_cleos/02_how-to-guides/how-to-vote.md new file mode 100644 index 00000000000..34dd01c586b --- /dev/null +++ b/docs/02_cleos/02_how-to-guides/how-to-vote.md @@ -0,0 +1,31 @@ +## Goal + +Vote for a block producer + +## Before you begin + +* Install the current supported version of cleos + +* Ensure the reference system contracts from `eosio.contracts` repository is deployed and used to manage system resources + +* Understand the following: + * What is a block producer + * How does voting works + +* Unlock your wallet + +## Steps + +Assume you are going to vote for blockproducer1 and blockproducer2 from an account called `eosiotestts2`, execute the following: + +```bash +cleos system voteproducer prods eosiotestts2 blockproducer1 blockproducer2 +``` + +You should see something like below: + + +```shell +executed transaction: 2d8b58f7387aef52a1746d7a22d304bbbe0304481d7751fc4a50b619df62676d 128 bytes 374 us +# eosio <= eosio::voteproducer {"voter":"eosiotestts2","proxy":"","producers":["blockproducer1","blockproducer2"]} +``` \ No newline at end of file diff --git a/docs/02_cleos/02_how-to-guides/index.md b/docs/02_cleos/02_how-to-guides/index.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/docs/02_cleos/03_command-reference/convert/index.md b/docs/02_cleos/03_command-reference/convert/index.md new file mode 100755 index 00000000000..d4ae096130d --- /dev/null +++ b/docs/02_cleos/03_command-reference/convert/index.md @@ -0,0 +1,8 @@ +## Description +Pack and unpack transactions + +## subcommands +- [pack_transaction](pack_transaction) - From plain signed json to packed form +- [unpack_transaction](unpack_transaction) - From packed to plain signed json form +- [pack_action_data](pack_action_data) - From json action data to packed form +- [unpack_action_data](unpack_action_data) - From packed to json action data form \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/convert/pack_action_data.md b/docs/02_cleos/03_command-reference/convert/pack_action_data.md new file mode 100755 index 00000000000..b84dc7e4f68 --- /dev/null +++ b/docs/02_cleos/03_command-reference/convert/pack_action_data.md @@ -0,0 +1,23 @@ +## Description +From json action data to packed form + +## Positionals +- `account` _TEXT_ - The name of the account that hosts the contract +- `name` _TEXT_ - The name of the function that's called by this action +- `unpacked_action_data` _TEXT_ - The action data expressed as json + +## Options + +- `-h,--help` - Print this help message and exit + +## Usage +```shell + cleos convert pack_action_data eosio unlinkauth '{"account":"test1", "code":"test2", "type":"eosioeosio"}' +``` + +## Output + + +```text +000000008090b1ca000000000091b1ca000075982aea3055 +``` diff --git a/docs/02_cleos/03_command-reference/convert/pack_transaction.md b/docs/02_cleos/03_command-reference/convert/pack_transaction.md new file mode 100755 index 00000000000..db2b60a6980 --- /dev/null +++ b/docs/02_cleos/03_command-reference/convert/pack_transaction.md @@ -0,0 +1,51 @@ +## Description + +From plain signed json to packed form + +## Positionals + +- `transaction` _TEXT_ - The plain signed json (string) + +## Options + +- `-h,--help` - Print this help message and exit +- `--pack-action-data` - Pack all action data within transaction, needs interaction with nodeos + +## Usage + + +```shell +cleos convert pack_transaction '{ + "expiration": "2018-08-02T20:24:36", + "ref_block_num": 14207, + "ref_block_prefix": 1438248607, + "max_net_usage_words": 0, + "max_cpu_usage_ms": 0, + "delay_sec": 0, + "context_free_actions": [], + "actions": [{ + "account": "eosio", + "name": "newaccount", + "authorization": [{ + "actor": "eosio", + "permission": "active" + } + ], + "data": "0000000000ea305500a6823403ea30550100000001000240cc0bf90a5656c8bb81f0eb86f49f89613c5cd988c018715d4646c6bd0ad3d8010000000100000001000240cc0bf90a5656c8bb81f0eb86f49f89613c5cd988c018715d4646c6bd0ad3d801000000" + } + ], + "transaction_extensions": [] +}' +``` + +## Output + + +```shell +{ + "signatures": [], + "compression": "none", + "packed_context_free_data": "", + "packed_trx": "8468635b7f379feeb95500000000010000000000ea305500409e9a2264b89a010000000000ea305500000000a8ed3232660000000000ea305500a6823403ea30550100000001000240cc0bf90a5656c8bb81f0eb86f49f89613c5cd988c018715d4646c6bd0ad3d8010000000100000001000240cc0bf90a5656c8bb81f0eb86f49f89613c5cd988c018715d4646c6bd0ad3d80100000000" +} +``` diff --git a/docs/02_cleos/03_command-reference/convert/unpack_action_data.md b/docs/02_cleos/03_command-reference/convert/unpack_action_data.md new file mode 100755 index 00000000000..c1f5810acb1 --- /dev/null +++ b/docs/02_cleos/03_command-reference/convert/unpack_action_data.md @@ -0,0 +1,28 @@ +## Description +From packed to json action data form + +## Positionals +- `account` _TEXT_ - The name of the account that hosts the contract +- `name` _TEXT_ - The name of the function that's called by this action +- `packed_action_data` _TEXT_ - The action data expressed as packed hex string +## Options + +- `-h,--help` - Print this help message and exit + +## Usage + + +```text + cleos convert unpack_action_data eosio unlinkauth 000000008090b1ca000000000091b1ca000075982aea3055 +``` + +## Output + + +```json +{ + "account": "test1", + "code": "test2", + "type": "eosioeosio" +} +``` diff --git a/docs/02_cleos/03_command-reference/convert/unpack_transaction.md b/docs/02_cleos/03_command-reference/convert/unpack_transaction.md new file mode 100755 index 00000000000..29d9f43c2c6 --- /dev/null +++ b/docs/02_cleos/03_command-reference/convert/unpack_transaction.md @@ -0,0 +1,57 @@ +## Description + +From packed to plain signed json form + +## Positionals + +- `transaction` _TEXT_ - The packed transaction json (string containing packed_trx and optionally compression fields.) + +## Options + +- `-h,--help` - Print this help message and exit +- `--unpack-action-data` - Unpack all action data within transaction, needs interaction with nodeos + +## Usage + +```text +cleos convert unpack_transaction '{ + "signatures": [ + "SIG_K1_KmRbWahefwxs6uyCGNR6wNRjw7cntEeFQhNCbyg8S92Kbp7zdSSVGTD2QS7pNVWgcU126zpxaBp9CwUxFpRwSnfkjd46bS" + ], + "compression": "none", + "packed_context_free_data": "", + "packed_trx": "8468635b7f379feeb95500000000010000000000ea305500409e9a2264b89a010000000000ea305500000000a8ed3232660000000000ea305500a6823403ea30550100000001000240cc0bf90a5656c8bb81f0eb86f49f89613c5cd988c018715d4646c6bd0ad3d8010000000100000001000240cc0bf90a5656c8bb81f0eb86f49f89613c5cd988c018715d4646c6bd0ad3d80100000000" +}' +``` + +## Output + + +```text +{ + "expiration": "2018-08-02T20:24:36", + "ref_block_num": 14207, + "ref_block_prefix": 1438248607, + "max_net_usage_words": 0, + "max_cpu_usage_ms": 0, + "delay_sec": 0, + "context_free_actions": [], + "actions": [{ + "account": "eosio", + "name": "newaccount", + "authorization": [{ + "actor": "eosio", + "permission": "active" + } + ], + "data": "0000000000ea305500a6823403ea30550100000001000240cc0bf90a5656c8bb81f0eb86f49f89613c5cd988c018715d4646c6bd0ad3d8010000000100000001000240cc0bf90a5656c8bb81f0eb86f49f89613c5cd988c018715d4646c6bd0ad3d801000000" + } + ], + "transaction_extensions": [], + "signatures": [ + "SIG_K1_KmRbWahefwxs6uyCGNR6wNRjw7cntEeFQhNCbyg8S92Kbp7zdSSVGTD2QS7pNVWgcU126zpxaBp9CwUxFpRwSnfkjd46bS" + ], + "context_free_data": [] +} + +``` diff --git a/docs/02_cleos/03_command-reference/create/account.md b/docs/02_cleos/03_command-reference/create/account.md new file mode 100755 index 00000000000..f7b26d3e5f4 --- /dev/null +++ b/docs/02_cleos/03_command-reference/create/account.md @@ -0,0 +1,72 @@ +## Description +Creates a new account on the blockchain + +## Usage + + +```shell +Usage: cleos create account [OPTIONS] creator name OwnerKey [ActiveKey] + +Positionals: + creator TEXT The name of the account creating the new account (required) + name TEXT The name of the new account (required) + OwnerKey TEXT The owner public key or permission level for the new account (required) + ActiveKey TEXT The active public key or permission level for the new account + +Options: + -h,--help Print this help message and exit + -x,--expiration set the time in seconds before a transaction expires, defaults to 30s + -f,--force-unique force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times + -s,--skip-sign Specify if unlocked wallet keys should be used to sign transaction + -j,--json print result as json + -d,--dont-broadcast don't broadcast transaction to the network (just print to stdout) + --return-packed used in conjunction with --dont-broadcast to get the packed transaction + -r,--ref-block TEXT set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) + -p,--permission TEXT ... An account and permission level to authorize, as in 'account@permission' (defaults to 'creator@active') + --max-cpu-usage-ms UINT set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) + --max-net-usage UINT set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) + --delay-sec UINT set the delay_sec seconds, defaults to 0s +``` + +## Command +A set of EOS keys is required to create an account. A set of EOS keys can be generated by using `./cleos create key`. + +```shell +$ ./cleos create account inita tester EOS4toFS3YXEQCkuuw1aqDLrtHim86Gz9u3hBdcBw5KNPZcursVHq EOS7d9A3uLe6As66jzN8j44TXJUqJSK3bFjjEEqR4oTvNAB3iM9SA +``` + +## Output + + +```shell +{ + "transaction_id": "6acd2ece68c4b86c1fa209c3989235063384020781f2c67bbb80bc8d540ca120", + "processed": { + "refBlockNum": "25217", + "refBlockPrefix": "2095475630", + "expiration": "2017-07-25T17:54:55", + "scope": [ + "eos", + "inita" + ], + "signatures": [], + "messages": [{ + "code": "eos", + "type": "newaccount", + "authorization": [{ + "account": "inita", + "permission": "active" + } + ], + "data": "c9251a0000000000b44c5a2400000000010000000102bcca6347d828d4e1868b7dfa91692a16d5b20d0ee3d16a7ca2ddcc7f6dd03344010000010000000102bcca6347d828d4e1868b7dfa91692a16d5b20d0ee3d16a7ca2ddcc7f6dd03344010000010000000001c9251a000000000061d0640b000000000100010000000000000008454f5300000000" + } + ], + "output": [{ + "notify": [], + "sync_transactions": [], + "async_transactions": [] + } + ] + } +} +``` diff --git a/docs/02_cleos/03_command-reference/create/index.md b/docs/02_cleos/03_command-reference/create/index.md new file mode 100755 index 00000000000..37d0be3b232 --- /dev/null +++ b/docs/02_cleos/03_command-reference/create/index.md @@ -0,0 +1,15 @@ +## Description +Create various items, on and off the blockchain + +## Subcommands +- [key](key) - Create a new keypair and print the public and private keys +- [account](account) - Create a new account on the blockchain + +```text +Create various items, on and off the blockchain +Usage: ./cleos create SUBCOMMAND + +Subcommands: + key Create a new keypair and print the public and private keys + account Create a new account on the blockchain +``` diff --git a/docs/02_cleos/03_command-reference/create/key.md b/docs/02_cleos/03_command-reference/create/key.md new file mode 100755 index 00000000000..fa6036705c5 --- /dev/null +++ b/docs/02_cleos/03_command-reference/create/key.md @@ -0,0 +1,28 @@ +## Description + +Creates a new keypair and prints the public and private keys + +## Usage + +```shell +Usage: cleos create key [OPTIONS] + +Options: + -h,--help Print this help message and exit + --r1 Generate a key using the R1 curve (iPhone), instead of the K1 curve (Bitcoin) + -f,--file TEXT Name of file to write private/public key output to. (Must be set, unless "--to-console" is passed + --to-console Print private/public keys to console. +``` + +## Command + +```shell +$ ./cleos create key -f passwd +``` + +## Output + +```shell +Private key: 5KCkcSxYKZfh5Cr8CCunS2PiUKzNZLhtfBjudaUnad3PDargFQo +Public key: EOS5uHeBsURAT6bBXNtvwKtWaiDSDJSdSmc96rHVws5M1qqVCkAm6 +``` diff --git a/docs/02_cleos/03_command-reference/get/abi.md b/docs/02_cleos/03_command-reference/get/abi.md new file mode 100644 index 00000000000..a2f9cf64458 --- /dev/null +++ b/docs/02_cleos/03_command-reference/get/abi.md @@ -0,0 +1,17 @@ +## Description +Retrieves the ABI for an account + +## Positional Parameters +- `name` _TEXT_ - The name of the account whose abi should be retrieved + +## Options +- `-f,--file` _TEXT_ - The name of the file to save the contract .abi to instead of writing to console + +## Examples +Retrieve and save abi for eosio.token contract + +```shell +$ cleos get abi eosio.token -f eosio.token.abi + +saving abi to eosio.token.abi +``` diff --git a/docs/02_cleos/03_command-reference/get/account.md b/docs/02_cleos/03_command-reference/get/account.md new file mode 100755 index 00000000000..cd3597160e4 --- /dev/null +++ b/docs/02_cleos/03_command-reference/get/account.md @@ -0,0 +1,103 @@ +## Description +Retrieves an account from the blockchain + +## Positional Parameters +- `name` _TEXT_ - The name of the account to retrieve +- `core-symbol` _TEXT_ - The expected core symbol of the chain you are querying + +## Options +- `-j,--json` - Output in JSON format + +## Examples + +### Get formatted data for user **eosio** + +```shell +$ cleos get account eosio +privileged: true +permissions: + owner 1: 1 EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV + active 1: 1 EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV +memory: + quota: -1 bytes used: 1.22 Mb + +net bandwidth: (averaged over 3 days) + used: -1 bytes + available: -1 bytes + limit: -1 bytes + +cpu bandwidth: (averaged over 3 days) + used: -1 us + available: -1 us + limit: -1 us + +producers: +``` +### Get JSON data for user **eosio** + +```shell +$ cleos get account eosio --json +{ + "account_name": "eosio", + "privileged": true, + "last_code_update": "2018-05-23T18:00:25.500", + "created": "2018-03-02T12:00:00.000", + "ram_quota": -1, + "net_weight": -1, + "cpu_weight": -1, + "net_limit": { + "used": -1, + "available": -1, + "max": -1 + }, + "cpu_limit": { + "used": -1, + "available": -1, + "max": -1 + }, + "ram_usage": 1279625, + "permissions": [{ + "perm_name": "active", + "parent": "owner", + "required_auth": { + "threshold": 1, + "keys": [{ + "key": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "weight": 1 + } + ], + "accounts": [], + "waits": [] + } + },{ + "perm_name": "owner", + "parent": "", + "required_auth": { + "threshold": 1, + "keys": [{ + "key": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "weight": 1 + } + ], + "accounts": [], + "waits": [] + } + } + ], + "total_resources": null, + "delegated_bandwidth": null, + "voter_info": { + "owner": "eosio", + "proxy": "", + "producers": [], + "staked": 0, + "last_vote_weight": "0.00000000000000000", + "proxied_vote_weight": "0.00000000000000000", + "is_proxy": 0, + "deferred_trx_id": 0, + "last_unstake_time": "1970-01-01T00:00:00", + "unstaking": "0.0000 SYS" + } +} + +``` diff --git a/docs/02_cleos/03_command-reference/get/accounts.md b/docs/02_cleos/03_command-reference/get/accounts.md new file mode 100755 index 00000000000..68a5f084512 --- /dev/null +++ b/docs/02_cleos/03_command-reference/get/accounts.md @@ -0,0 +1,21 @@ +## Description +Retrieves all accounts associated with a defined public key + +[[info]] +|This command will not return privileged accounts. + +## Positional Parameters +`public_key` _TEXT_ - The public key to retrieve accounts for +## Options +- `-j,--json` - Output in JSON format +## Example + + +```shell +$ cleos get accounts EOS8mUftJXepGzdQ2TaCduNuSPAfXJHf22uex4u41ab1EVv9EAhWt +{ + "account_names": [ + "testaccount" + ] +} +``` diff --git a/docs/02_cleos/03_command-reference/get/actions.md b/docs/02_cleos/03_command-reference/get/actions.md new file mode 100644 index 00000000000..de14e3bd8a4 --- /dev/null +++ b/docs/02_cleos/03_command-reference/get/actions.md @@ -0,0 +1,31 @@ +## Description +Retrieve all actions with specific account name referenced in authorization or receiver + +## Positional Parameters +- `account_name` _TEXT_ - name of account to query on (required) +- `pos` _INT_ - sequence number of action for this account, -1 for last (optional) +- `offset` _INT_ - get actions [pos, pos + offset] for positive offset or [pos - offset, pos] for negative offset + +## Options + +- `-j,--json` - print full json +- `--full` - don't truncate action output +- `--pretty` - pretty print full action json +- `--console` - print console output generated by action + +## Examples +Retrieve and save abi for eosio.token contract + +```shell +$ cleos get actions eosio.token + +# seq when contract::action => receiver trx id... args +================================================================================================================ +# 976 2018-06-01T19:54:05.000 eosio.token::transfer => eosio.token 1d1fe154... {"from":"userae","to":"useraa","quantity":"0.000... +# 977 2018-06-01T19:54:05.000 eosio.token::transfer => eosio.token a0c9e5bc... {"from":"userab","to":"useraa","quantity":"0.000... +# 978 2018-06-01T19:54:05.000 eosio.token::transfer => eosio.token 3749d0d1... {"from":"userab","to":"userah","quantity":"0.000... +# 979 2018-06-01T19:54:05.000 eosio.token::transfer => eosio.token dda205b0... {"from":"userai","to":"useraj","quantity":"0.000... +# 980 2018-06-01T19:54:05.000 eosio.token::transfer => eosio.token 14089e9b... {"from":"userab","to":"userae","quantity":"0.000... +# 981 2018-06-01T19:54:05.000 eosio.token::transfer => eosio.token 6882cefc... {"from":"useraj","to":"userab","quantity":"0.000... +... +``` diff --git a/docs/02_cleos/03_command-reference/get/block.md b/docs/02_cleos/03_command-reference/get/block.md new file mode 100755 index 00000000000..a6e625f9a30 --- /dev/null +++ b/docs/02_cleos/03_command-reference/get/block.md @@ -0,0 +1,35 @@ +## Description +Retrieves a full block from the blockchain + +## Positional Parameters +- `block` _TEXT_ - The number **or** ID of the block to retrieve +## Options +- `--header-state` - Get block header state from fork database instead +## Example + + +```shell +$ ./cleos get block 1 +$ ./cleos get block 0000000130d70e94e0022fd2fa035cabb9e542c34ea27f572ac90b5a7aa3d891 +``` +This will output a block object similar to the following + +```json +{ + "timestamp": "2018-03-02T12:00:00.000", + "producer": "", + "confirmed": 1, + "previous": "0000000000000000000000000000000000000000000000000000000000000000", + "transaction_mroot": "0000000000000000000000000000000000000000000000000000000000000000", + "action_mroot": "0000000000000000000000000000000000000000000000000000000000000000", + "schedule_version": 0, + "new_producers": null, + "header_extensions": [], + "producer_signature": "SIG_K1_111111111111111111111111111111111111111111111111111111111111111116uk5ne", + "transactions": [], + "block_extensions": [], + "id": "0000000130d70e94e0022fd2fa035cabb9e542c34ea27f572ac90b5a7aa3d891", + "block_num": 1, + "ref_block_prefix": 3526296288 +} +``` diff --git a/docs/02_cleos/03_command-reference/get/code.md b/docs/02_cleos/03_command-reference/get/code.md new file mode 100755 index 00000000000..7bdeca67e60 --- /dev/null +++ b/docs/02_cleos/03_command-reference/get/code.md @@ -0,0 +1,30 @@ +## Description +Retrieves the code and ABI for an account + +## Positional Parameters +- `name` _TEXT_ - The name of the account whose code should be retrieved +## Options +- `-c,--code` _TEXT_ - The name of the file to save the contract _.wast_ to +- `-a,--abi` _TEXT_ - The name of the file to save the contract _.abi_ to +- `--wasm` Save contract as wasm +## Examples +Simply output the hash of eosio.token contract + +```shell +$ cleos get code eosio.token +code hash: f675e7aeffbf562c033acfaf33eadff255dacb90d002db51c7ad7cbf057eb791 +``` +Retrieve and save abi for eosio.token contract + +```shell +$ cleos get code eosio.token -a eosio.token.abi +code hash: f675e7aeffbf562c033acfaf33eadff255dacb90d002db51c7ad7cbf057eb791 +saving abi to eosio.token.abi +``` +Retrieve and save wast code for eosio.token contract + +```shell +$ cleos get code eosio.token -c eosio.token.wast +code hash: f675e7aeffbf562c033acfaf33eadff255dacb90d002db51c7ad7cbf057eb791 +saving wast to eosio.token.wast +``` diff --git a/docs/02_cleos/03_command-reference/get/currency-balance.md b/docs/02_cleos/03_command-reference/get/currency-balance.md new file mode 100755 index 00000000000..53d9ab96e88 --- /dev/null +++ b/docs/02_cleos/03_command-reference/get/currency-balance.md @@ -0,0 +1,21 @@ +## Description + +Retrieve the balance of an account for a given currency + +## Positional Parameters +`contract` _TEXT_ - The contract that operates the currency + +`account` _TEXT_ - The account to query balances for + +`symbol` _TEXT_ - The symbol for the currency if the contract operates multiple currencies + +## Options +There are no options for this subcommand + +## Example +Get balance of eosio from eosio.token contract for SYS symbol. + +```text +$ cleos get currency balance eosio.token eosio SYS +999999920.0000 SYS +``` diff --git a/docs/02_cleos/03_command-reference/get/currency-stats.md b/docs/02_cleos/03_command-reference/get/currency-stats.md new file mode 100755 index 00000000000..f959bf5b81d --- /dev/null +++ b/docs/02_cleos/03_command-reference/get/currency-stats.md @@ -0,0 +1,22 @@ +## Description +Retrieve the stats of for a given currency + +## Positional Parameters +`contract` _TEXT_ - The contract that operates the currency + +`symbol` _TEXT_ - The symbol for the currency if the contract operates multiple currencies +## Options +There are no options for this subcommand +## Example +Get stats of the SYS token from the eosio.token contract. + +```text +$ cleos get currency stats eosio.token SYS +{ + "SYS": { + "supply": "1000000000.0000 SYS", + "max_supply": "10000000000.0000 SYS", + "issuer": "eosio" + } +} +``` diff --git a/docs/02_cleos/03_command-reference/get/currency.md b/docs/02_cleos/03_command-reference/get/currency.md new file mode 100755 index 00000000000..0baa9aebb22 --- /dev/null +++ b/docs/02_cleos/03_command-reference/get/currency.md @@ -0,0 +1,8 @@ +## Description + +Exposes subcommands for accessing standard currencies + +## Subcommands +[currency balance](currency-balance.md) - Retrieve the balance of an account for a given currency + +[currency stats](currency-stats.md) - Retrieve the stats of for a given currency \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/get/index.md b/docs/02_cleos/03_command-reference/get/index.md new file mode 100755 index 00000000000..594852b921f --- /dev/null +++ b/docs/02_cleos/03_command-reference/get/index.md @@ -0,0 +1,19 @@ +## Description + +Retrieves various items and information from the blockchain + +## Subcommands +- [info](info.md) - Get current blockchain information +- [block](block.md) - Retrieve a full block from the blockchain +- [account](account.md) - Retrieve an account from the blockchain +- [code](code.md) - Retrieve the code and ABI for an account +- [abi](abi.md) - Retrieve the ABI for an account +- [table](table.md) - Retrieve the contents of a database table +- [scope](scope.md) - Retrieve a list of scopes and tables owned by a contract +- [currency](currency.md) - Retrieve information related to standard currencies +- [accounts](accounts.md) - Retrieve accounts associated with a public key +- [servants](servants.md) - Retrieve accounts which are servants of a given account +- [transaction](transaction.md) - Retrieve a transaction from the blockchain +- [actions](actions.md) - Retrieve all actions with specific account name referenced in authorization or receiver +- [schedule](schedule.md) - Retrieve the producer schedule +- [transaction_id](transaction_id.md) - Get transaction id given transaction object diff --git a/docs/02_cleos/03_command-reference/get/info.md b/docs/02_cleos/03_command-reference/get/info.md new file mode 100755 index 00000000000..4d1933875f2 --- /dev/null +++ b/docs/02_cleos/03_command-reference/get/info.md @@ -0,0 +1,28 @@ +## Description + +Gets current blockchain information + +## Position Parameters +This command does not accept any parameters. +## Options +- `-h` - --help Print this help message and exit +## Example + + +```shell +$ ./cleos get info +``` +This command simply returns the current blockchain state information. + +```shell +{ + "server_version": "7451e092", + "head_block_num": 6980, + "last_irreversible_block_num": 6963, + "head_block_id": "00001b4490e32b84861230871bb1c25fb8ee777153f4f82c5f3e4ca2b9877712", + "head_block_time": "2017-12-07T09:18:48", + "head_block_producer": "initp", + "recent_slots": "1111111111111111111111111111111111111111111111111111111111111111", + "participation_rate": "1.00000000000000000" +} +``` diff --git a/docs/02_cleos/03_command-reference/get/schedule.md b/docs/02_cleos/03_command-reference/get/schedule.md new file mode 100755 index 00000000000..35286153d2d --- /dev/null +++ b/docs/02_cleos/03_command-reference/get/schedule.md @@ -0,0 +1,28 @@ +## Description + +Retrieve the producer schedule + + +## Options +- `-h` - --help Print this help message and exit + +- `-j`- --json Output in JSON format + + +## Example + +```shell +$ ./cleos get schedule +``` +This command simply returns the current producer schedule. + +```shell +active schedule version 0 + Producer Producer key + ============= ================== + eosio EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV + +pending schedule empty + +proposed schedule empty +``` diff --git a/docs/02_cleos/03_command-reference/get/scope.md b/docs/02_cleos/03_command-reference/get/scope.md new file mode 100644 index 00000000000..b7a85088b67 --- /dev/null +++ b/docs/02_cleos/03_command-reference/get/scope.md @@ -0,0 +1,12 @@ +## Description +Retrieves a list of scopes and tables owned by a contract + +## Positional Parameters +- `contract` _TEXT_ - The contract who owns the table + +## Options +- `-t,--table` _TEXT_ - The name of the table as filter +- `-l,--limit` _UINT_ - The maximum number of rows to return +- `-L,--lower` _TEXT_ - lower bound of scope +- `-U,--upper` _TEXT_ - upper bound of scope +- `-r,--reverse` - Iterate in reverse order diff --git a/docs/02_cleos/03_command-reference/get/servants.md b/docs/02_cleos/03_command-reference/get/servants.md new file mode 100755 index 00000000000..873dbc45df0 --- /dev/null +++ b/docs/02_cleos/03_command-reference/get/servants.md @@ -0,0 +1,36 @@ +## Description +Retrieve accounts which are servants of a given account + +## Info + +**Command** + +```shell +$ ./cleos get servants +``` +**Output** + +```shell +Usage: ./cleos get servants account + +Positionals: + account TEXT The name of the controlling account +``` + +## Command + + +```shell +$ ./cleos get servants inita +``` + +## Output + + +```shell +{ + "controlled_accounts": [ + "tester" + ] +} +``` diff --git a/docs/02_cleos/03_command-reference/get/table.md b/docs/02_cleos/03_command-reference/get/table.md new file mode 100755 index 00000000000..17d0183a924 --- /dev/null +++ b/docs/02_cleos/03_command-reference/get/table.md @@ -0,0 +1,36 @@ +## Description + +Retrieves the contents of a database table + +## Positional Parameters +`contract` _TEXT_ - The contract who owns the table + +`scope` _TEXT_ - The scope within the contract in which the table is found + +`table` _TEXT_ - The name of the table as specified by the contract abi + +## Options +`-b,--binary` _UINT_ - Return the value as BINARY rather than using abi to interpret as JSON + +`-l,--limit` _UINT_ - The maximum number of rows to return + +`-k,--key` _TEXT_ - The name of the key to index by as defined by the abi, defaults to primary key + +`-L,--lower` _TEXT_ - JSON representation of lower bound value of key, defaults to first + +`-U,--upper` _TEXT_ - JSON representation of upper bound value value of key, defaults to last + +## Example +Get the data from the accounts table for the eosio.token contract, for user eosio, + +```shell +$ cleos get table eosio.token eosio accounts +{ + "rows": [{ + "balance": "999999920.0000 SYS" + } + ], + "more": false +} + +``` diff --git a/docs/02_cleos/03_command-reference/get/transaction.md b/docs/02_cleos/03_command-reference/get/transaction.md new file mode 100755 index 00000000000..c23c67f82d0 --- /dev/null +++ b/docs/02_cleos/03_command-reference/get/transaction.md @@ -0,0 +1,79 @@ +## Description +Retrieves a transaction from the blockchain + +## Positional Parameters +`id`` _TEXT_ - ID of the transaction to retrieve + +## Options +` -b,--block-hint` UINT the block number this transaction may be in + +**Command** + +```shell +$ ./cleos get transaction +``` +**Output** + +## Example + + +```shell +$ ./cleos get transaction eb4b94b72718a369af09eb2e7885b3f494dd1d8a20278a6634611d5edd76b703 +{ + "transaction_id": "eb4b94b72718a369af09eb2e7885b3f494dd1d8a20278a6634611d5edd76b703", + "processed": { + "refBlockNum": 2206, + "refBlockPrefix": 221394282, + "expiration": "2017-09-05T08:03:58", + "scope": [ + "inita", + "tester" + ], + "signatures": [ + "1f22e64240e1e479eee6ccbbd79a29f1a6eb6020384b4cca1a958e7c708d3e562009ae6e60afac96f9a3b89d729a50cd5a7b5a7a647540ba1678831bf970e83312" + ], + "messages": [{ + "code": "eos", + "type": "transfer", + "authorization": [{ + "account": "inita", + "permission": "active" + } + ], + "data": { + "from": "inita", + "to": "tester", + "amount": 1000, + "memo": "" + }, + "hex_data": "000000008040934b00000000c84267a1e80300000000000000" + } + ], + "output": [{ + "notify": [{ + "name": "tester", + "output": { + "notify": [], + "sync_transactions": [], + "async_transactions": [] + } + },{ + "name": "inita", + "output": { + "notify": [], + "sync_transactions": [], + "async_transactions": [] + } + } + ], + "sync_transactions": [], + "async_transactions": [] + } + ] + } +} +``` + +[[info]] +|Important Note +The above transaction id will not exist on your blockchain diff --git a/docs/02_cleos/03_command-reference/get/transaction_id.md b/docs/02_cleos/03_command-reference/get/transaction_id.md new file mode 100755 index 00000000000..8118e8391fe --- /dev/null +++ b/docs/02_cleos/03_command-reference/get/transaction_id.md @@ -0,0 +1,10 @@ +## Description +Get transaction id given transaction object + +## Positional Parameters + +- `transaction` _TEXT_ - The JSON string or filename defining the transaction which transaction id we want to retrieve (required) + +## Options + + `-h,--help` Print this help message and exit \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/multisig/index.md b/docs/02_cleos/03_command-reference/multisig/index.md new file mode 100755 index 00000000000..2d7cf90942c --- /dev/null +++ b/docs/02_cleos/03_command-reference/multisig/index.md @@ -0,0 +1,12 @@ +## Description +Multisig contract commands + +## Subcommands +- [multisig propose](multisig-propose.md) - Propose transaction +- [multisig review](multisig-review.md) - Review transaction +- [multisig approve](multisig-approve.md) - Approve proposed transaction +- [multisig unapprove](multisig-unapprove.md) - Unapprove proposed transaction + +- [multisig invalidate](multisig-invalidate.md) - Invalidate all multisig approvals of an account +- [multisig cancel](multisig-cancel.md) - Cancel proposed transaction +- [multisig exec](multisig-exec.md) - Execute proposed transaction \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/multisig/multisig-approve.md b/docs/02_cleos/03_command-reference/multisig/multisig-approve.md new file mode 100755 index 00000000000..e5b0a94e110 --- /dev/null +++ b/docs/02_cleos/03_command-reference/multisig/multisig-approve.md @@ -0,0 +1,23 @@ +## Description +Approve proposed transaction + +## Positional Arguments +- `proposer` _TEXT_ - Proposer name (string) +- `proposal_name` _TEXT_ - Proposal name (string) +- `permissions` _TEXT_ - The JSON string of filename defining approving permissions +- `proposal_hash` _TEXT_ - Hash of proposed transaction (i.e. transaction ID) to optionally enforce as a condition of the approval + +## Options + +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-j,--json` print result as json +- `-d,--dont-broadcast` Don't broadcast transaction to the network (just print to stdout) +- `--return-packed` used in conjunction with --dont-broadcast to get the packed transaction +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ - set the delay_sec seconds, defaults to 0s \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/multisig/multisig-cancel.md b/docs/02_cleos/03_command-reference/multisig/multisig-cancel.md new file mode 100755 index 00000000000..12cd1cb3945 --- /dev/null +++ b/docs/02_cleos/03_command-reference/multisig/multisig-cancel.md @@ -0,0 +1,18 @@ +## Description +Cancel proposed transaction + +## Positional Arguments +- `proposer` _TEXT_ - Proposer name (string) +- `proposal_name` _TEXT_ - Proposal name (string) +- `canceler` _TEXT_ - Canceler name (string) +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ - set the delay_sec seconds, defaults to 0s \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/multisig/multisig-exec.md b/docs/02_cleos/03_command-reference/multisig/multisig-exec.md new file mode 100755 index 00000000000..ff760d43b1e --- /dev/null +++ b/docs/02_cleos/03_command-reference/multisig/multisig-exec.md @@ -0,0 +1,19 @@ +## Description + +Execute proposed transaction + +## Positional Arguments +- `proposer` _TEXT_ - Proposer name (string) +- `proposal_name` _TEXT_ - Proposal name (string) +- `executer` _TEXT_ - Account paying for execution (string) +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ - set the delay_sec seconds, defaults to 0s \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/multisig/multisig-invalidate.md b/docs/02_cleos/03_command-reference/multisig/multisig-invalidate.md new file mode 100755 index 00000000000..adcaed2e432 --- /dev/null +++ b/docs/02_cleos/03_command-reference/multisig/multisig-invalidate.md @@ -0,0 +1,19 @@ +## Description +Invalidate all multisig approvals of an account + +## Positionals: + invalidator TEXT invalidator name (string) (required) + +## Options: + - `-h,--help` Print this help message and exit + - `-x,--expiration` set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times + - `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction + - `-j,--json` print result as json + - `-d,--dont-broadcast` don't broadcast transaction to the network (just print to stdout) +- `--return-packed` used in conjunction with --dont-broadcast to get the packed transaction + - `-r,--ref-block` TEXT set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) + - `-p,--permission` TEXT ... An account and permission level to authorize, as in 'account@permission' (defaults to 'invalidator@active') + - `--max-cpu-usage-ms` UINT set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) + - `--max-net-usage` UINT set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) + - `--delay-sec` UINT set the delay_sec seconds, defaults to 0s \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/multisig/multisig-propose.md b/docs/02_cleos/03_command-reference/multisig/multisig-propose.md new file mode 100755 index 00000000000..978f288952d --- /dev/null +++ b/docs/02_cleos/03_command-reference/multisig/multisig-propose.md @@ -0,0 +1,23 @@ +## Description +Propose action + +## Positional Arguments +`proposal_name` _TEXT_ - Proposal name (string) +`requested_permissions` _TEXT_ - The JSON string or filename defining requested permissions +`trx_permissions` _TEXT_ - The JSON string or filename defining transaction permissions +`contract` _TEXT_ - Contract to wich deferred transaction should be delivered +`action` _TEXT_ - Action of deferred transaction +`data` _TEXT_ - The JSON string or filename defining the action to propose +`proposer` _TEXT_ - Account proposing the transaction +`proposal_expiration` _UINT_ - Proposal expiration interval in hours +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ - set the delay_sec seconds, defaults to 0s \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/multisig/multisig-propose_trx.md b/docs/02_cleos/03_command-reference/multisig/multisig-propose_trx.md new file mode 100755 index 00000000000..843a5cea8bd --- /dev/null +++ b/docs/02_cleos/03_command-reference/multisig/multisig-propose_trx.md @@ -0,0 +1,26 @@ +##Description +Propose transaction + +## Positional Arguments + +`proposal_name` _TEXT_ - Proposal name (string) +`requested_permissions` _TEXT_ - The JSON string or filename defining requested permissions +`trx_permissions` _TEXT_ - The JSON string or filename defining transaction permissions +`contract` _TEXT_ - Contract to wich deferred transaction should be delivered +`action` _TEXT_ - Action of deferred transaction +`data` _TEXT_ - The JSON string or filename defining the action to propose +`proposer` _TEXT_ - Account proposing the transaction +`proposal_expiration` _UINT_ - Proposal expiration interval in hours + +## Options + +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ - set the delay_sec seconds, defaults to 0s \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/multisig/multisig-review.md b/docs/02_cleos/03_command-reference/multisig/multisig-review.md new file mode 100755 index 00000000000..6c3dd8dece6 --- /dev/null +++ b/docs/02_cleos/03_command-reference/multisig/multisig-review.md @@ -0,0 +1,17 @@ +## Description +Review transaction + +## Positional Arguments +- `proposer` _TEXT_ - Proposer name (string) +- `proposal_name` _TEXT_ - Proposal name (string) +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +## Examples diff --git a/docs/02_cleos/03_command-reference/multisig/multisig-unapprove.md b/docs/02_cleos/03_command-reference/multisig/multisig-unapprove.md new file mode 100755 index 00000000000..f2dda4b9e94 --- /dev/null +++ b/docs/02_cleos/03_command-reference/multisig/multisig-unapprove.md @@ -0,0 +1,20 @@ +## Description +Unapprove proposed transaction + +--- +## Positional Arguments +- `proposer` _TEXT_ - Proposer name (string) +- `proposal_name` _TEXT_ - Proposal name (string) +- `permissions` _TEXT_ - The JSON string of filename defining approving permissions +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `--return-packed` used in conjunction with --dont-broadcast to get the packed transaction +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ - set the delay_sec seconds, defaults to 0s \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/net/connect.md b/docs/02_cleos/03_command-reference/net/connect.md new file mode 100755 index 00000000000..3bbd9278b0b --- /dev/null +++ b/docs/02_cleos/03_command-reference/net/connect.md @@ -0,0 +1,18 @@ +## Description +Start a new connection to a peer + +**Command** + +```shell +./cleos net connect +``` + +**Output** + +```shell +start a new connection to a peer +Usage: ./cleos net connect host + +Positionals: + host TEXT The hostname:port to connect to. +``` diff --git a/docs/02_cleos/03_command-reference/net/disconnect.md b/docs/02_cleos/03_command-reference/net/disconnect.md new file mode 100755 index 00000000000..a605422d5bb --- /dev/null +++ b/docs/02_cleos/03_command-reference/net/disconnect.md @@ -0,0 +1,18 @@ +## Description +close an existing connection + +**Command** + +```shell +$ ./cleos net disconnect +``` + +**Output** + +```shell +close an existing connection +Usage: ./cleos net disconnect host + +Positionals: + host TEXT The hostname:port to disconnect from. +``` diff --git a/docs/02_cleos/03_command-reference/net/index.md b/docs/02_cleos/03_command-reference/net/index.md new file mode 100755 index 00000000000..bbfcdf583df --- /dev/null +++ b/docs/02_cleos/03_command-reference/net/index.md @@ -0,0 +1,19 @@ +## Description +Interact with local p2p network connections. + +## Command + +```shell +Interact with local p2p network connections +Usage: ./cleos net SUBCOMMAND +``` + +## Subcommands + + - [cleos net connect](connect) start a new connection to a peer + + - [cleos net disconnect](disconnect) ` close an existing connection + + - [cleos net status](status) status of existing connection + + - [cleos net peers](peers) status of all existing peers \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/net/peers.md b/docs/02_cleos/03_command-reference/net/peers.md new file mode 100755 index 00000000000..49ae89de590 --- /dev/null +++ b/docs/02_cleos/03_command-reference/net/peers.md @@ -0,0 +1,18 @@ +## Description +status of all existing peers + +**Command** + +```shell +$ ./cleos net status +``` +**Output** + +```shell +ERROR: RequiredError: host +status of existing connection +Usage: ./cleos net status host + +Positionals: + host TEXT The hostname:port to query status of connection +``` diff --git a/docs/02_cleos/03_command-reference/net/status.md b/docs/02_cleos/03_command-reference/net/status.md new file mode 100755 index 00000000000..fb7ef0d307d --- /dev/null +++ b/docs/02_cleos/03_command-reference/net/status.md @@ -0,0 +1,16 @@ +## Description +status of existing connection + +**Command** + +```shell +$ ./cleos net status +``` +**Output** + +```shell +Usage: ./cleos net status host + +Positionals: + host TEXT The hostname:port to query status of connection +``` diff --git a/docs/02_cleos/03_command-reference/push/index.md b/docs/02_cleos/03_command-reference/push/index.md new file mode 100755 index 00000000000..11f6233d9ca --- /dev/null +++ b/docs/02_cleos/03_command-reference/push/index.md @@ -0,0 +1,10 @@ +## Description +Pushes arbitrary transactions to the blockchain. + +## Commands + +[push action](push-action.md) Push a transaction with a single action + +[push transaction](push-transaction.md) Push an arbitrary JSON transaction + +[push transactions](push-transactions.md) Push an array of arbitrary JSON transactions \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/push/push-action.md b/docs/02_cleos/03_command-reference/push/push-action.md new file mode 100755 index 00000000000..82e8f657274 --- /dev/null +++ b/docs/02_cleos/03_command-reference/push/push-action.md @@ -0,0 +1,33 @@ +## Description +Push a transaction with a single action + +## Positionals + `contract` _Type: Text_ - The account providing the contract to execute + + `action` _Type: Text_ - The action to execute on the contract + + `data` _Type: Text_ - The arguments to the contract + +**Output** + +## Options + + ` -h,--help` - Print this help message and exit + + `-x,--expiration` - set the time in seconds before a transaction expires, defaults to 30s + + `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times + +` -s,--skip-sign` - Specify if unlocked wallet keys should be used to sign transaction + +`-j,--json` - print result as json + +`-d,--dont-broadcast` - don't broadcast transaction to the network (just print to stdout) + +`-p,--permission` _Type: Text_ - An account and permission level to authorize, as in 'account@permission' + +`--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) + +`--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) + +`--delay-sec` _UINT_ - set the delay_sec seconds, defaults to 0s diff --git a/docs/02_cleos/03_command-reference/push/push-transaction.md b/docs/02_cleos/03_command-reference/push/push-transaction.md new file mode 100755 index 00000000000..97533cc51be --- /dev/null +++ b/docs/02_cleos/03_command-reference/push/push-transaction.md @@ -0,0 +1,36 @@ +## Description + +Push an arbitrary JSON transaction + +## Positional Parameters +- `transaction` (text) The JSON of the transaction to push, or the name of a JSON file containing the transaction + +## Options +This command has no options + +` -h,--help` - Print this help message and exit + +`-x,--expiration` - set the time in seconds before a transaction expires, defaults to 30s + +`-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times + +` -s,--skip-sign` - Specify if unlocked wallet keys should be used to sign transaction + +`-j,--json` - print result as json + +`-d,--dont-broadcast` - don't broadcast transaction to the network (just print to stdout) + +`-p,--permission` _Type: Text_ - An account and permission level to authorize, as in 'account@permission' + +`--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) + +`--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) + +`--delay-sec` _UINT_ - set the delay_sec seconds, defaults to 0s + +## Example + + +```text +$ push transaction {} +``` diff --git a/docs/02_cleos/03_command-reference/push/push-transactions.md b/docs/02_cleos/03_command-reference/push/push-transactions.md new file mode 100755 index 00000000000..b2ea4aed0a4 --- /dev/null +++ b/docs/02_cleos/03_command-reference/push/push-transactions.md @@ -0,0 +1,8 @@ +## Description +Push an array of arbitrary JSON transactions + +## Positional Arguments +Pushes an array of arbitrary JSON transactions. +## Options +This command has no options +## Examples diff --git a/docs/02_cleos/03_command-reference/set/index.md b/docs/02_cleos/03_command-reference/set/index.md new file mode 100755 index 00000000000..0484ab9ff38 --- /dev/null +++ b/docs/02_cleos/03_command-reference/set/index.md @@ -0,0 +1,21 @@ +## Description +Sets or updates the blockchain state. +## Commands + +Set or update blockchain state + +```shell +Usage: ./cleos set [OPTIONS] SUBCOMMAND + +Options: + -h,--help Print this help message and exit +``` + +## Subcommands + +- [code](set-code) Create or update the code on an account +- [abi](set-abi) Create or update the abi on an account +- [contract](set-contract) Create or update the contract on an account +- [account](set-account) set or update blockchain account state +- [action](set-action) set or update blockchain action state + diff --git a/docs/02_cleos/03_command-reference/set/set-abi.md b/docs/02_cleos/03_command-reference/set/set-abi.md new file mode 100755 index 00000000000..3890ceacf4d --- /dev/null +++ b/docs/02_cleos/03_command-reference/set/set-abi.md @@ -0,0 +1,24 @@ +## Description +Create or update the abi on an account + +## Positionals +* `account` _TEXT_ - The account to set code for (required) +* `abi-file` _TEXT_ - The fullpath containing the contract WAST or WASM (required) + +## Options +* `-h,--help` Print this help message and exit +* `-a,--abi' _TEXT_ - The ABI for the contract +* `-x,--expiration _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +* `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +* `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +* `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +* `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +* `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +* `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +* `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +* `--delay-sec` _UINT_ - set the delay_sec seconds, defaults to 0s +## Usage + +```shell +cleos set abi someaccount1 ./path/to/abi.abi +``` diff --git a/docs/02_cleos/03_command-reference/set/set-account.md b/docs/02_cleos/03_command-reference/set/set-account.md new file mode 100755 index 00000000000..a0df2f42772 --- /dev/null +++ b/docs/02_cleos/03_command-reference/set/set-account.md @@ -0,0 +1,84 @@ +## Description +set parameters dealing with account permissions + +## Positionals +- `account` _TEXT_ - The account to set/delete a permission authority for +- `permission` _TEXT_ - The permission name to set/delete an authority for +- `authority` _TEXT_ - [delete] NULL, [create/update] public key, JSON string, or filename defining the authority +- `parent` _TEXT_ - [create] The permission name of this parents permission (Defaults to: "Active") +## Options + +`-h,--help` Print this help message and exit + +`--add-code` [code] add 'eosio.code' permission to specified permission authority + +`--remove-code` [code] remove 'eosio.code' permission from specified permission authority + +`-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s + +`-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times + +`-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction + +`-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) + +`-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) + +`-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') + +`--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) + +`--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) + +`--delay-sec` _UINT_ - set the delay_sec seconds, defaults to 0s + +## Command +To modify the permissions of an account, you must have the authority over the account and the permission of which you are modifying. The set account permission command is subject to change so it's associated Class is not fully documented. + +The first example associates a new key to the active permissions of an account. + +```shell +$ ./cleos set account permission test active '{"threshold" : 1, "keys" : [{"permission":{"key":"EOS8X7Mp7apQWtL6T2sfSZzBcQNUqZB7tARFEm9gA9Tn9nbMdsvBB","permission":"active"},"weight":1}], "accounts" : [{"permission":{"account":"acc2","permission":"active"},"weight":50}]}' owner +``` +This second example modifies the same account permission, but removes the key set in the last example, and grants active authority of the @test account to another account. + +```shell +$ ./cleos set account permission test active '{"threshold" : 1, "keys" : [], "accounts" : [{"permission":{"account":"sandwich","permission":"active"},"weight":1},{"permission":{"account":"acc1","permission":"active"},"weight":50}]}' owner +``` +The third example demonstrates how to setup permissions for multisig. + +```shell +$ ./cleos set account permission test active '{"threshold" : 100, "keys" : [{"permission":{"key":"EOS8X7Mp7apQWtL6T2sfSZzBcQNUqZB7tARFEm9gA9Tn9nbMdsvBB","permission":"active"},"weight":25}], "accounts" : [{"permission":{"account":"@sandwich","permission":"active"},"weight":75}]}' owner +``` +The JSON object used in this command is actually composed of two different types of objects + +The authority JSON object ... + +```json +{ + "threshold" : 100, /*An integer that defines cumulative signature weight required for authorization*/ + "keys" : [], /*An array made up of individual permissions defined with an EOS PUBLIC KEY*/ + "accounts" : [] /*An array made up of individual permissions defined with an EOS ACCOUNT*/ +} +``` +...which includes one or more permissions objects. + +```json +/*Set Permission with Key*/ +{ + "permission" : { + "key" : "EOS8X7Mp7apQWtL6T2sfSZzBcQNUqZB7tARFEm9gA9Tn9nbMdsvBB", + "permission" : "active" + }, + weight : 25 /*Set the weight of a signature from this permission*/ +} + +/*Set Permission with Account*/ +{ + "permission" : { + "account" : "sandwich", + "permission" : "active" + }, + weight : 75 /*Set the weight of a signature from this permission*/ +} +``` diff --git a/docs/02_cleos/03_command-reference/set/set-action.md b/docs/02_cleos/03_command-reference/set/set-action.md new file mode 100755 index 00000000000..f19d5525165 --- /dev/null +++ b/docs/02_cleos/03_command-reference/set/set-action.md @@ -0,0 +1,72 @@ +## Description +Sets or updates an action's state on the blockchain. + +**Command** + +```shell +$ ./cleos set action +``` +**Output** + +```shell +ERROR: RequiredError: Subcommand required +set or update blockchain action state +Usage: ./cleos set action [OPTIONS] SUBCOMMAND + +Options: + -h,--help Print this help message and exit + +Subcommands: + permission set parmaters dealing with account permissions +``` +**Command** + +```shell +$ ./cleos set action permission +``` + +## Positionals + +`account` TEXT The account to set/delete a permission authority for (required + +`code` _Text_ The account that owns the code for the action + +`type` _Type_ the type of the action + +`requirement` _Type_ The permission name require for executing the given action + +## Options +`-h,--help` Print this help message and exit + +`-x,--expiration` _Type:Text_ - set the time in seconds before a transaction expires, defaults to 30s + +`-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times + +`-s,--skip-sign` Specify if unlocked wallet keys +should be used to sign transaction + +`-j,--json` print result as json + +`-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) + +`--return-packed` used in conjunction with --dont-broadcast to get the packed transaction + +`-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) + +`-p,--permission` _Type:Text_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') + +`--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) + +`--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) + +`--delay-sec` _UINT_ - set the delay_sec seconds, defaults to 0s + +## Usage + +```shell +#Link a `voteproducer` action to the 'voting' permissions +cleos set action permission sandwichfarm eosio.system voteproducer voting -p sandwichfarm@voting + +#Now can execute the transaction with the previously set permissions. +cleos system voteproducer approve sandwichfarm someproducer -p sandwichfarm@voting +``` diff --git a/docs/02_cleos/03_command-reference/set/set-code.md b/docs/02_cleos/03_command-reference/set/set-code.md new file mode 100755 index 00000000000..538f8e7a16c --- /dev/null +++ b/docs/02_cleos/03_command-reference/set/set-code.md @@ -0,0 +1,42 @@ +## Description +Sets or updates an account's code on the blockchain. + +## Positionals + +* `account` _TEXT_ - The account to set code for (required) +* `code-file` _TEXT_ - The fullpath containing the contract WAST or WASM (required) + +## Options + +`-h,--help` Print this help message and exit +`-a,--abi` _TEXT_ - The ABI for the contract + +`-c,--clear` Remove contract on an account + +`--suppress-duplicate-check` Don't check for duplicate + +`-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s + +`-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times + +`-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction + +`-j,--json` print result as json + +`-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) + +`--return-packed` used in conjunction with --dont-broadcast to get the packed transaction + +`-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) + +`-p,--permission` _Type:Text_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') + +* `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +* `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +* `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +* `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +* `--delay-sec` _UINT_ - set the delay_sec seconds, defaults to 0s + +```shell +cleos set code someaccount1 ./path/to/wasm +``` diff --git a/docs/02_cleos/03_command-reference/set/set-contract.md b/docs/02_cleos/03_command-reference/set/set-contract.md new file mode 100755 index 00000000000..b05bd349881 --- /dev/null +++ b/docs/02_cleos/03_command-reference/set/set-contract.md @@ -0,0 +1,79 @@ +## Description +Creates or updates the contract on an account. +## Positional Parameters + `account` _TEXT_ - The account to publish a contract for + `wast-file` _TEXT_ - The file containing the contract WAST or WASM + `abi-file` _TEXT_ - The ABI for the contract +## Options + +`-h,--help` Print this help message and exit + +`-a,--abi` _TEXT_ - The ABI for the contract + +`-c,--clear` Remove contract on an account + +`--suppress-duplicate-check` Don't check for duplicate + +`-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s + +`-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times + +`-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction + +`-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) + +`--return-packed` used in conjunction with --dont-broadcast to get the packed transaction + +`-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) + +`-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') + +`--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) + +`--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) + +`--delay-sec` _UINT_ - set the delay_sec seconds, defaults to 0s + +## Example +Here we deploy the standard currency contract. + +```shell +$ ./cleos set contract currency ../../../contracts/currency/currency.wast ../../../contracts/currency/currency.abi +``` +This will output something similar to... + +```shell +Reading WAST... +Assembling WASM... +Publishing contract... +{ + "transaction_id": "9990306e13f630a9c5436a5a0b6fb8fe2c7f3da2f342b4898a39c4a2c17dcdb3", + "processed": { + "refBlockNum": 1208, + "refBlockPrefix": 3058534156, + "expiration": "2017-08-24T18:29:52", + "scope": [ + "currency", + "eos" + ], + "signatures": [], + "messages": [{ + "code": "eos", + "type": "setcode", + "authorization": [{ + "account": "currency", + "permission": "active" + } + ], + "data": "00000079b822651d0000e8150061736d0100000001390a60017e0060037e7e7f017f60047e7e7f7f017f60017f0060057e7e7e7f7f017f60027f7f0060027f7f017f60027e7f0060000060027e7e00029d010a03656e7606617373657274000503656e76086c6f61645f693634000403656e76067072696e7469000003656e76067072696e746e000003656e76067072696e7473000303656e760b726561644d657373616765000603656e760a72656d6f76655f693634000103656e760b7265717569726541757468000003656e760d726571756972654e6f74696365000003656e760973746f72655f6936340002030706000007030809040401700000050301000107cc0107066d656d6f72790200205f5a4e33656f733133726571756972654e6f74696365454e535f344e616d6545000a1e5f5a4e33656f7331317265717569726541757468454e535f344e616d6545000b345f5a4e3863757272656e6379313273746f72654163636f756e74454e33656f73344e616d6545524b4e535f374163636f756e7445000c355f5a4e3863757272656e637932336170706c795f63757272656e63795f7472616e7366657245524b4e535f385472616e7366657245000d04696e6974000e056170706c79000f0a9d0d060600200010080b0600200010070b3400024020012903084200510d0020004280808080a8d7bee3082001411010091a0f0b20004280808080a8d7bee308200110061a0b8a0604017e027f047e017f4100410028020441206b2208360204200029030821052000290300210720002903102104411010042004100241c000100442808080c887d7c8b21d100341d00010042007100341e000100420051003200029030021052000290308100820051008200029030010072000290300210142002105423b210441f00021034200210603400240024002400240024020054206560d0020032c00002202419f7f6a41ff017141194b0d01200241a0016a21020c020b420021072005420b580d020c030b200241ea016a41002002414f6a41ff01714105491b21020b2002ad42388642388721070b2007421f83200442ffffffff0f838621070b200341016a2103200542017c2105200720068421062004427b7c2204427a520d000b420021052008420037031820082006370310200142808080c887d7c8b21d4280808080a8d7bee308200841106a411010011a200041086a2903002101423b210441f00021034200210603400240024002400240024020054206560d0020032c00002202419f7f6a41ff017141194b0d01200241a0016a21020c020b420021072005420b580d020c030b200241ea016a41002002414f6a41ff01714105491b21020b2002ad42388642388721070b2007421f83200442ffffffff0f838621070b200341016a2103200542017c2105200720068421062004427b7c2204427a520d000b2008200637030020084200370308200142808080c887d7c8b21d4280808080a8d7bee3082008411010011a200841186a2203290300200041106a22022903005a418001100020032003290300200229030022057d370300200520082903087c20055a41b00110002008200829030820022903007c370308200029030021050240024020032903004200510d0020054280808080a8d7bee308200841106a411010091a0c010b20054280808080a8d7bee308200841106a10061a0b200041086a290300210502400240200841086a2903004200510d0020054280808080a8d7bee3082008411010091a0c010b20054280808080a8d7bee308200810061a0b4100200841206a3602040b980303027f057e017f4100410028020441106b220736020442002103423b210241e00121014200210403400240024002400240024020034207560d0020012c00002200419f7f6a41ff017141194b0d01200041a0016a21000c020b420021052003420b580d020c030b200041ea016a41002000414f6a41ff01714105491b21000b2000ad42388642388721050b2005421f83200242ffffffff0f838621050b200141016a2101200342017c2103200520048421042002427b7c2202427a520d000b42002103423b210241f00021014200210603400240024002400240024020034206560d0020012c00002200419f7f6a41ff017141194b0d01200041a0016a21000c020b420021052003420b580d020c030b200041ea016a41002000414f6a41ff01714105491b21000b2000ad42388642388721050b2005421f83200242ffffffff0f838621050b200141016a2101200342017c2103200520068421062002427b7c2202427a520d000b2007428094ebdc033703082007200637030020044280808080a8d7bee3082007411010091a4100200741106a3602040bb10303027f047e017f4100410028020441206b220836020442002105423b210441e00121034200210603400240024002400240024020054207560d0020032c00002202419f7f6a41ff017141194b0d01200241a0016a21020c020b420021072005420b580d020c030b200241ea016a41002002414f6a41ff01714105491b21020b2002ad42388642388721070b2007421f83200442ffffffff0f838621070b200341016a2103200542017c2105200720068421062004427b7c2204427a520d000b024020062000520d0042002105423b210441f00121034200210603400240024002400240024020054207560d0020032c00002202419f7f6a41ff017141194b0d01200241a0016a21020c020b420021072005420b580d020c030b200241ea016a41002002414f6a41ff01714105491b21020b2002ad42388642388721070b2007421f83200442ffffffff0f838621070b200341016a2103200542017c2105200720068421062004427b7c2204427a520d000b20062001520d00200842003703102008420037030820084200370318200841086a4118100541174b4180021000200841086a100d0b4100200841206a3602040b0bff010b0041040b04200500000041100b2254686973206170706561727320746f2062652061207472616e73666572206f6620000041c0000b0220000041d0000b072066726f6d20000041e0000b0520746f20000041f0000b086163636f756e7400004180010b2c696e746567657220756e646572666c6f77207375627472616374696e6720746f6b656e2062616c616e6365000041b0010b26696e7465676572206f766572666c6f7720616464696e6720746f6b656e2062616c616e6365000041e0010b0963757272656e6379000041f0010b097472616e7366657200004180020b1e6d6573736167652073686f72746572207468616e2065787065637465640000fd02046e616d651006617373657274020000086c6f61645f693634050000000000067072696e74690100067072696e746e0100067072696e747301000b726561644d6573736167650200000a72656d6f76655f693634030000000b726571756972654175746801000d726571756972654e6f7469636501000973746f72655f6936340400000000205f5a4e33656f733133726571756972654e6f74696365454e535f344e616d65450101301e5f5a4e33656f7331317265717569726541757468454e535f344e616d6545010130345f5a4e3863757272656e6379313273746f72654163636f756e74454e33656f73344e616d6545524b4e535f374163636f756e74450201300131355f5a4e3863757272656e637932336170706c795f63757272656e63795f7472616e7366657245524b4e535f385472616e73666572450901300131013201330134013501360137013804696e69740801300131013201330134013501360137056170706c7909013001310132013301340135013601370138010b4163636f756e744e616d65044e616d6502087472616e7366657200030466726f6d0b4163636f756e744e616d6502746f0b4163636f756e744e616d65087175616e746974790655496e743634076163636f756e740002036b65790655496e7436340762616c616e63650655496e74363401000000b298e982a4087472616e736665720100000080bafac608076163636f756e74" + } + ], + "output": [{ + "notify": [], + "sync_transactions": [], + "async_transactions": [] + } + ] + } +} +``` diff --git a/docs/02_cleos/03_command-reference/sign.md b/docs/02_cleos/03_command-reference/sign.md new file mode 100644 index 00000000000..abced2d041c --- /dev/null +++ b/docs/02_cleos/03_command-reference/sign.md @@ -0,0 +1,16 @@ +## Description +Sign a transaction + +## Usage +```sh +$ cleos sign [OPTIONS] transaction +``` + +## Positional Parameters +- `transaction` _TEXT_ - The JSON string or filename defining the transaction to sign + +## Options +- `-k,--private-key` _TEXT_ - The private key that will be used to sign the transaction +- `--public-key` _TEXT_ - Ask keosd to sign with the corresponding private key of the given public key +- `-c,--chain-id` _TEXT_ - The chain id that will be used to sign the transaction +- `-p,--push-transaction` - Push transaction after signing diff --git a/docs/02_cleos/03_command-reference/system/index.md b/docs/02_cleos/03_command-reference/system/index.md new file mode 100755 index 00000000000..207da518e40 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/index.md @@ -0,0 +1,23 @@ +## Description + +Send eosio.system contract action to the blockchain. + +## Subcommands + +- [system newaccount](system-newaccount.md) - Create a new account on the blockchain with initial resources +- [system regproducer](system-regproducer.md) - Register a new producer +- [system unregprod](system-unregprod.md) - Unregister an existing producer +- [system voteproducer](system-voteproducer.md) - Vote for a producer +- [system listproducers](system-listproducers.md) - List producers +- [system delegatebw](system-delegatebw.md) - Delegate bandwidth +- [system undelegatebw](system-undelegatebw.md) - Undelegate bandwidth +- [system listbw](system-listbw.md) - List delegated bandwidth +- [system bidname](system-bidname.md) - Name bidding +- [system bidnameinfo](system-bidnameinfo.md) - Get bidname info +- [system buyram](system-buyram.md) - Buy RAM +- [system sellram](system-sellram.md) - Sell RAM +- [system claimrewards](system-claimrewards.md) - Claim producer rewards +- [system regproxy](system-regproxy.md) - Register an account as a proxy (for voting) +- [system unregproxy](system-unregproxy.md) - Unregister an account as a proxy (for voting) +- [system canceldelay](system-canceldelay.md) - Cancel a delayed transaction + - [system rex](system-rex.md) - Actions related to REX (the resource exchange) diff --git a/docs/02_cleos/03_command-reference/system/system-bidname.md b/docs/02_cleos/03_command-reference/system/system-bidname.md new file mode 100755 index 00000000000..c1d03163a42 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-bidname.md @@ -0,0 +1,28 @@ +## Description + +Place a bid on "premium" usernames (account names less than 12 characters) + +## Positionals +* `bidder` _TEXT_ - The bidding account (required) +* `newname` _TEXT_ - The bidding name (required) +* `bid` _TEXT_ - The amount of CORE SYMBOL to bid (required) +## Options +* `-h,--help` - Print this help message and exit +* `-x,--expiration` - set the time in seconds before a transaction expires, defaults to 30s +* `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +* `-s,--skip-sign` - Specify if unlocked wallet keys should be used to sign transaction +* `-j,--json` - print result as json +* `-d,--dont-broadcast` - don't broadcast transaction to the network (just print to stdout) +* `-r,--ref-block` _TEXT_ - set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +* `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' +* `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +* `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Usage + + +```shell +cleos system bidname accountname1 bob "10 SYS" +``` diff --git a/docs/02_cleos/03_command-reference/system/system-bidnameinfo.md b/docs/02_cleos/03_command-reference/system/system-bidnameinfo.md new file mode 100755 index 00000000000..d5541078e4b --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-bidnameinfo.md @@ -0,0 +1,16 @@ +## Description + +Returns data about a particular name and it's bidding status + +## Positionals +* `name` _TEXT_ - The name to lookup + +## Options +* `-h,--help` - Print this help message and exit +* `-j,--json` - Print result as json + +## Usage + +```shell +cleos system bidnameinfo bob +``` diff --git a/docs/02_cleos/03_command-reference/system/system-buyram.md b/docs/02_cleos/03_command-reference/system/system-buyram.md new file mode 100755 index 00000000000..028df3f8384 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-buyram.md @@ -0,0 +1,22 @@ +## Description +Buy RAM + +## Positional Arguments +- `payer` _TEXT_ - Theaccount paying for RAM +- `receiver` _TEXT_ - The account receiving bought RAM +- `amount` _TEXT_ - The amount of EOS to pay for RAM + +## Options +- `-h,--help` Print this help message and exit +- `-k,--kbytes` buyram in number of kibibytes (KiB) +- `-b,--bytes` buyram in number of bytes +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-j,--json` - print result as json +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ - set the delay_sec seconds, defaults to 0s diff --git a/docs/02_cleos/03_command-reference/system/system-canceldelay.md b/docs/02_cleos/03_command-reference/system/system-canceldelay.md new file mode 100755 index 00000000000..12b8c89a991 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-canceldelay.md @@ -0,0 +1,20 @@ +## Description + +Cancel a delayed transaction + +## Positional Arguments +- `canceling_account` _TEXT_ - Account from authorization on the original delayed transaction +- `canceling_permission` _TEXT_ - Permission from authorization on the original delayed transaction +- `trx_id` _TEXT_ - The transaction id of the original delayed transaction +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/system/system-claimrewards.md b/docs/02_cleos/03_command-reference/system/system-claimrewards.md new file mode 100755 index 00000000000..63c21575d1a --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-claimrewards.md @@ -0,0 +1,20 @@ +## Description + +Claim producer rewards + +## Positional Arguments +- `owner` _TEXT_ - The account to claim rewards for +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples diff --git a/docs/02_cleos/03_command-reference/system/system-delegatebw.md b/docs/02_cleos/03_command-reference/system/system-delegatebw.md new file mode 100755 index 00000000000..9cd96812433 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-delegatebw.md @@ -0,0 +1,24 @@ +--- +title: "system delegatebw" +excerpt: "Delegate bandwidth" +--- +## Positional Arguments +- `from` _TEXT_ - The account undelegating bandwidth +- `receiver` _TEXT_ - The account to undelegate bandwidth from +- `stake_net_quantity` _TEXT_ - The amount of EOS to delegate for network bandwidth +- `stake_cpu_quantity` _TEXT_ - The amount of EOS to delegate for CPU bandwidth + +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples diff --git a/docs/02_cleos/03_command-reference/system/system-listbw.md b/docs/02_cleos/03_command-reference/system/system-listbw.md new file mode 100755 index 00000000000..325f30efc45 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-listbw.md @@ -0,0 +1,12 @@ +## Description + +List delegated bandwidth + +## Positional Arguments +- `account` _TEXT_ - The account delegated bandwidth + +## Options +`-j,--json` - Output in JSON format +`-h,--help` - Print this help message and exit + +## Examples diff --git a/docs/02_cleos/03_command-reference/system/system-listproducers.md b/docs/02_cleos/03_command-reference/system/system-listproducers.md new file mode 100755 index 00000000000..c728b0fb27b --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-listproducers.md @@ -0,0 +1,12 @@ +## Description + +List producers + +## Positional Arguments + +## Options + +`-h,--help` Print this help message and exit +`-j,--json` Output in JSON format +`-l,--limit` _UINT_ The maximum number of rows to return +`-L,--lower` _TEXT_ lower bound value of key, defaults to first \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/system/system-newaccount.md b/docs/02_cleos/03_command-reference/system/system-newaccount.md new file mode 100755 index 00000000000..5e0b92cb1dc --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-newaccount.md @@ -0,0 +1,28 @@ +## Description + +Create an account, buy ram, stake for bandwidth for the account + +## Positional Arguments +- `creator` _TEXT_ - The name of the account creating the new account +- `name` _TEXT_ - The name of the new account +- `OwnerKey` _TEXT_ - The owner public key for the new account +- `ActiveKey` _TEXT_ - The active public key for the new account +## Options +- `-h,--help` Print this help message and exit +- `--stake-net` _TEXT_ - The amount of EOS delegated for net bandwidth +- `--stake-cpu` _TEXT_ - The amount of EOS delegated for CPU bandwidth +- `--buy-ram-bytes` _UINT_ - The amount of RAM bytes to purchase for the new account in kilobytes KiB, default is 8 KiB +- `--buy-ram-EOS` _TEXT_ - The amount of RAM bytes to purchase for the new account in EOS +- `--transfer` - Transfer voting power and right to unstake EOS to receiver +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples diff --git a/docs/02_cleos/03_command-reference/system/system-regproducer.md b/docs/02_cleos/03_command-reference/system/system-regproducer.md new file mode 100755 index 00000000000..eb0914d5352 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-regproducer.md @@ -0,0 +1,22 @@ +--- +title: "system regproducer" +excerpt: "Register a new producer" +--- +## Positional Arguments +- `account` _TEXT_ - The account to register as a producer +- `producer_key` _TEXT_ - The producer's public key +- `url` _TEXT_ - URL where info about producer can be found +- `location` _UINT_ - Relative location for purpose of nearest neighbor scheduling +## Options +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples diff --git a/docs/02_cleos/03_command-reference/system/system-regproxy.md b/docs/02_cleos/03_command-reference/system/system-regproxy.md new file mode 100755 index 00000000000..97d53a7e0d3 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-regproxy.md @@ -0,0 +1,19 @@ +--- +title: "system regproxy" +excerpt: "Register an account as a proxy (for voting)" +--- +## Positional Arguments +`proxy` _TEXT_ - The proxy account to register +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json +## Examples diff --git a/docs/02_cleos/03_command-reference/system/system-rex-buyrex.md b/docs/02_cleos/03_command-reference/system/system-rex-buyrex.md new file mode 100755 index 00000000000..5bae51820a7 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-buyrex.md @@ -0,0 +1,27 @@ +--- +title: "system rex buyrex" +excerpt: "Buy REX using tokens in owner's REX fund" +--- +## Positionals +- `from` _TEXT_ - Account buying REX tokens (required) +- `amount` _TEXT_ - Amount to be taken from REX fund and used in buying REX (required) + +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples + + +```text +cleos system rex buyrex accountname1 "1 REX" +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-cancelrexorder.md b/docs/02_cleos/03_command-reference/system/system-rex-cancelrexorder.md new file mode 100755 index 00000000000..07e74046858 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-cancelrexorder.md @@ -0,0 +1,26 @@ +--- +title: "system rex cancelrexorder" +excerpt: "Cancel queued REX sell order if one exists" +--- +## Positionals +- `owner` _TEXT_ - Owner account of sell order (required) + +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples + + +```text +cleos system rex cancelrexorder accountname1 +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-closerex.md b/docs/02_cleos/03_command-reference/system/system-rex-closerex.md new file mode 100755 index 00000000000..979b1211641 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-closerex.md @@ -0,0 +1,26 @@ +--- +title: "system rex closerex" +excerpt: "Delete unused REX-related user table entries" +--- +## Positionals +- `owner` _TEXT_ - Account which owns the REX fund (required) + +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples + + +```text +cleos system rex closerex accountname1 +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-consolidate.md b/docs/02_cleos/03_command-reference/system/system-rex-consolidate.md new file mode 100755 index 00000000000..2f1f8b1f018 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-consolidate.md @@ -0,0 +1,26 @@ +--- +title: "system rex consolidate" +excerpt: "Consolidate REX maturity buckets into one that matures in 4 days" +--- +## Positionals +- `owner` _TEXT_ - Account which owns the REX fund (required) + +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples + + +```text +cleos system rex consolidate accountname1 +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-defundcpuloan.md b/docs/02_cleos/03_command-reference/system/system-rex-defundcpuloan.md new file mode 100755 index 00000000000..167fa2c8f7f --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-defundcpuloan.md @@ -0,0 +1,26 @@ +--- +title: "system rex defundcpuloan" +excerpt: "Withdraw from a CPU loan fund" +--- +## Positionals +- `from` _TEXT_ - Loan owner (required) +- `loan_num` _TEXT_ - Loan ID (required) +- `payment` _TEXT_ - Amount to be deposited (required) +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples + +```text +cleos system rex defundcpuloan accountname1 abc123 "1 EOS" +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-defundnetloan.md b/docs/02_cleos/03_command-reference/system/system-rex-defundnetloan.md new file mode 100755 index 00000000000..80193deb667 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-defundnetloan.md @@ -0,0 +1,29 @@ +## Description + +Withdraw from a Network loan fund + +## Positionals +- `from` _TEXT_ - Loan owner (required) +- `loan_num` _TEXT_ - Loan ID (required) +- `payment` _TEXT_ - Amount to be deposited (required) + +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-j,--json` print result as json +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s + +## Examples + + + +```text +cleos system rex defundnetloan accountname1 abc123 "1 EOS" +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-deposit.md b/docs/02_cleos/03_command-reference/system/system-rex-deposit.md new file mode 100755 index 00000000000..da277a71554 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-deposit.md @@ -0,0 +1,25 @@ +--- +title: "system rex deposit" +excerpt: "Deposit into owner's REX fund by transfering from owner's liquid token balance" +--- +## Positionals +- `owner` _TEXT_ - Account which owns the REX fund (required) +- `amount` _TEXT_ - Amount to be deposited into REX fund (required) + +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-j,--json` print result as json +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +## Examples + + +```text +cleos system rex deposit accountname1 "1 SYS" +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-fundcpuloan.md b/docs/02_cleos/03_command-reference/system/system-rex-fundcpuloan.md new file mode 100755 index 00000000000..c40c414830a --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-fundcpuloan.md @@ -0,0 +1,27 @@ +--- +title: "system rex fundcpuloan" +excerpt: "Deposit into a CPU loan fund" +--- +## Positionals + +- `from` _TEXT_ - Loan owner (required) +- `loan_num` _TEXT_ - Loan ID (required) +- `payment` _TEXT_ - Amount to be deposited (required) +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-j,--json` print result as json +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s + +## Examples + +```text +cleos system rex fundcpuloan accountname1 abc123 "1 EOS" +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-fundnetloan.md b/docs/02_cleos/03_command-reference/system/system-rex-fundnetloan.md new file mode 100755 index 00000000000..f855d313567 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-fundnetloan.md @@ -0,0 +1,26 @@ +--- +title: "system rex fundnetloan" +excerpt: "Deposit into a Network loan fund" +--- +## Positionals +- `from` _TEXT_ - Loan owner (required) +- `loan_num` _TEXT_ - Loan ID (required) +- `payment` _TEXT_ - Amount to be deposited (required) +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-j,--json` print result as json +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +## Examples + + +```text +cleos system rex fundnetloan accountname1 abc123 "1 EOS" +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-lendrex.md b/docs/02_cleos/03_command-reference/system/system-rex-lendrex.md new file mode 100755 index 00000000000..c9003931d6c --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-lendrex.md @@ -0,0 +1,27 @@ +--- +title: "system rex lendrex" +excerpt: "Deposit tokens to REX fund and use the tokens to buy REX" +--- +## Positionals +- `from` _TEXT_ - Account buying REX tokens (required) +- `amount` _TEXT_ - Amount to be taken from REX fund and used in buying REX (required) + +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples + + +```text +cleos system rex lendrex accountname1 "1 REX" +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-mvfromsavings.md b/docs/02_cleos/03_command-reference/system/system-rex-mvfromsavings.md new file mode 100755 index 00000000000..b872b97661b --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-mvfromsavings.md @@ -0,0 +1,27 @@ +--- +title: "system rex mvfromsavings" +excerpt: "Move REX tokens out of savings bucket" +--- +## Positionals +- `owner` _TEXT_ - Account which owns the REX fund (required) +- `rex` _TEXT_ - Amount of REX to be moved out of savings bucket (required) + +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples + + +```text +cleos system rex mvfromsavings accountname1 "1 REX" +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-mvtosavings.md b/docs/02_cleos/03_command-reference/system/system-rex-mvtosavings.md new file mode 100755 index 00000000000..91682612b1c --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-mvtosavings.md @@ -0,0 +1,28 @@ +--- +title: "system rex mvtosavings" +excerpt: "Move REX tokens to savings bucket" +--- +## Positionals +- `owner` _TEXT_ - Account which owns the REX fund (required) +- `rex` _TEXT_ - Amount to be Amount of REX to be moved to savings bucket (required) into REX fund (required) + +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + + +## Examples + + +```text +cleos system rex mvtosavings accountname1 "1 REX" +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-rentcpu.md b/docs/02_cleos/03_command-reference/system/system-rex-rentcpu.md new file mode 100755 index 00000000000..f9bd424042d --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-rentcpu.md @@ -0,0 +1,24 @@ +## Positionals +- `from` _TEXT_ - Account buying REX tokens (required) +- `receiver` _TEXT_ - Account to whom rented CPU bandwidth is staked (required) +- `loan_payment` _TEXT_ - Loan fee to be paid, used to calculate amount of rented bandwidth (required) +- `loan_fund` _TEXT_ - Loan fund to be used in automatic renewal, can be 0 tokens (required) +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples + + +```text +cleos system rex rentcpu accountname1 accountname2 "1 EOS" 0 +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-rentnet.md b/docs/02_cleos/03_command-reference/system/system-rex-rentnet.md new file mode 100755 index 00000000000..d51b4f1c6ad --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-rentnet.md @@ -0,0 +1,24 @@ +## Positionals +- `from` _TEXT_ - Account buying REX tokens (required) +- `receiver` _TEXT_ - Account to whom rented NET bandwidth is staked (required) +- `loan_payment` _TEXT_ - Loan fee to be paid, used to calculate amount of rented bandwidth (required) +- `loan_fund` _TEXT_ - Loan fund to be used in automatic renewal, can be 0 tokens (required) +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples + + +```text +cleos system rex rentnet accountname1 accountname2 "1 EOS" 0 +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-rexexec.md b/docs/02_cleos/03_command-reference/system/system-rex-rexexec.md new file mode 100755 index 00000000000..006a66ca714 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-rexexec.md @@ -0,0 +1,23 @@ +## Positionals +- `user` _TEXT_ - User executing the action (required) +- `max` _TEXT_ - Maximum number of CPU loans, Network loans, and sell orders to be processed (required) + +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples + + +```text +cleos system rex rexexec accountname1 10 +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-sellrex.md b/docs/02_cleos/03_command-reference/system/system-rex-sellrex.md new file mode 100755 index 00000000000..9863b43ae87 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-sellrex.md @@ -0,0 +1,20 @@ +## Positionals +- `from` _TEXT_ - Account selling REX tokens (required) +- `rex` _TEXT_ - Amount of REX tokens to be sold (required) + +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +## Examples + + +```text +cleos system rex sellrex accountname1 "1 REX" +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-unstaketorex.md b/docs/02_cleos/03_command-reference/system/system-rex-unstaketorex.md new file mode 100755 index 00000000000..f5ea340c0ae --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-unstaketorex.md @@ -0,0 +1,24 @@ +## Positionals +- `from` _TEXT_ - Account buying REX tokens (required) +- `receiver` _TEXT_ - Account that tokens have been staked to (required) +- `from_cpu` _TEXT_ - Amount to be unstaked from CPU resources and used in REX purchase (required) +- `from_net` _TEXT_ - Amount to be unstaked from Net resources and used in REX purchase (required) +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples + + +```text +cleos system rex unstaketorex accountname1 accountname2 "1 EOS" 0 +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-updaterex.md b/docs/02_cleos/03_command-reference/system/system-rex-updaterex.md new file mode 100755 index 00000000000..58c8f63c766 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-updaterex.md @@ -0,0 +1,22 @@ +## Positionals +- `owner` _TEXT_ - Account which owns the REX fund (required) + +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples + + +```text +cleos system rex updaterex accountname1 +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex-withdraw.md b/docs/02_cleos/03_command-reference/system/system-rex-withdraw.md new file mode 100755 index 00000000000..a6ba219437a --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex-withdraw.md @@ -0,0 +1,23 @@ +## Positionals +- `owner` _TEXT_ - Account which owns the REX fund (required) +- `amount` _TEXT_ - Amount to be withdrawn from REX fund (required) + +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - Force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ - Set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - Set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - Set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples + + +```text +cleos system rex withdraw accountname1 "1 SYS" +``` diff --git a/docs/02_cleos/03_command-reference/system/system-rex.md b/docs/02_cleos/03_command-reference/system/system-rex.md new file mode 100755 index 00000000000..8d872d1727b --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-rex.md @@ -0,0 +1,25 @@ +## Description + +Actions related to REX (the resource exchange). + +## Subcommands + +- [deposit](system-rex-deposit) - Deposit into owner's REX fund by transfering from owner's liquid token balance +- [withdraw](system-rex-withdraw) - Withdraw from owner's REX fund by transfering to owner's liquid token balance +- [buyrex](system-rex-buyrex) - Buy REX using tokens in owner's REX fund +- [lendrex](system-rex-lendrex) - Deposit tokens to REX fund and use the tokens to buy REX +- [unstaketorex](system-rex-unstaketorex) - Buy REX using staked tokens +- [sellrex](system-rex-sellrex) - Sell REX tokens +- [cancelrexorder](system-rex-cancelrexorder) - Cancel queued REX sell order if one exists +- [mvtosavings](system-rex-mvtosavings) - Move REX tokens to savings bucket +- [mvfromsavings](system-rex-mvfromsavings) - Move REX tokens out of savings bucket +- [rentcpu](system-rex-rentcpu) - Rent CPU bandwidth for 30 days +- [rentnet](system-rex-rentnet) - Rent Network bandwidth for 30 days +- [fundcpuloan](system-rex-fundcpuloan) - Deposit into a CPU loan fund +- [fundnetloan](system-rex-fundnetloan) - Deposit into a Network loan fund +- [defundcpuloan](system-rex-defundcpuloan) - Withdraw from a CPU loan fund +- [defundnetloan](system-rex-defundnetloan) - Withdraw from a Network loan fund +- [consolidate](system-rex-consolidate) - Consolidate REX maturity buckets into one that matures in 4 days +- [updaterex](system-rex-updaterex) - Update REX owner vote stake and vote weight +- [rexexec](system-rex-rexexec) - Perform REX maintenance by processing expired loans and unfilled sell orders +- [closerex](system-rex-closerex) - Delete unused REX-related user table entries diff --git a/docs/02_cleos/03_command-reference/system/system-sellram.md b/docs/02_cleos/03_command-reference/system/system-sellram.md new file mode 100755 index 00000000000..0230f70002a --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-sellram.md @@ -0,0 +1,17 @@ +## Positional Arguments +- `account` _TEXT_ - The account to receive EOS for sold RAM +- `bytes` _UINT_ - Number of RAM bytes to sell +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples diff --git a/docs/02_cleos/03_command-reference/system/system-undelegatebw.md b/docs/02_cleos/03_command-reference/system/system-undelegatebw.md new file mode 100755 index 00000000000..d1342c5f3de --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-undelegatebw.md @@ -0,0 +1,20 @@ +## Positional Arguments +- `from` _TEXT_ - The account undelegating bandwidth +- `receiver` _TEXT_ - The account to undelegate bandwidth from +- `unstake_net_quantity` _TEXT_ - The amount of EOS to undelegate for network bandwidth +- `unstake_cpu_quantity` _TEXT_ - The amount of EOS to undelegate for CPU bandwidth + +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples diff --git a/docs/02_cleos/03_command-reference/system/system-unregprod.md b/docs/02_cleos/03_command-reference/system/system-unregprod.md new file mode 100755 index 00000000000..2eae07b63dd --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-unregprod.md @@ -0,0 +1,16 @@ +## Positional Arguments +`account` _TEXT_ - The account to unregister as a producer +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples diff --git a/docs/02_cleos/03_command-reference/system/system-unregproxy.md b/docs/02_cleos/03_command-reference/system/system-unregproxy.md new file mode 100755 index 00000000000..9e61609a946 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-unregproxy.md @@ -0,0 +1,16 @@ +## Positional Arguments +- `proxy` _TEXT_ - The proxy account to unregister +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples diff --git a/docs/02_cleos/03_command-reference/system/system-voteproducer-approve.md b/docs/02_cleos/03_command-reference/system/system-voteproducer-approve.md new file mode 100755 index 00000000000..1cdaebeda2d --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-voteproducer-approve.md @@ -0,0 +1,17 @@ +## Positional Arguments + - `voter` _TEXT_ - The voting account + - `producer` _TEXT_ - The account to vote for +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples diff --git a/docs/02_cleos/03_command-reference/system/system-voteproducer-prods.md b/docs/02_cleos/03_command-reference/system/system-voteproducer-prods.md new file mode 100755 index 00000000000..73efca2d6f0 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-voteproducer-prods.md @@ -0,0 +1,17 @@ +## Positional Arguments +- `voter` _TEXT_ - The voting account +- `producers` _TEXT ..._ - The account(s) to vote for. All options from this position and following will be treated as the producer list. +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples diff --git a/docs/02_cleos/03_command-reference/system/system-voteproducer-proxy.md b/docs/02_cleos/03_command-reference/system/system-voteproducer-proxy.md new file mode 100755 index 00000000000..ecf1fda7920 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-voteproducer-proxy.md @@ -0,0 +1,18 @@ +## Positional Arguments +- `voter` _TEXT_ - The voting account +- `proxy` _TEXT_ - The proxy account + +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples diff --git a/docs/02_cleos/03_command-reference/system/system-voteproducer-unapprove.md b/docs/02_cleos/03_command-reference/system/system-voteproducer-unapprove.md new file mode 100755 index 00000000000..f43dcabd7de --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-voteproducer-unapprove.md @@ -0,0 +1,17 @@ +## Positional Arguments +- `voter` _TEXT_ - The voting account +- `producer` _TEXT_ - The account to remove from voted producers +## Options +- `-h,--help` Print this help message and exit +- `-x,--expiration` _TEXT_ - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` Specify if unlocked wallet keys should be used to sign transaction +- `-d,--dont-broadcast` - Don't broadcast transaction to the network (just print to stdout) +- `-r,--ref-block` _TEXT_ set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `-p,--permission` _TEXT_ - An account and permission level to authorize, as in 'account@permission' (defaults to 'account@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ set the delay_sec seconds, defaults to 0s +- `-j,--json` print result as json + +## Examples diff --git a/docs/02_cleos/03_command-reference/system/system-voteproducer.md b/docs/02_cleos/03_command-reference/system/system-voteproducer.md new file mode 100755 index 00000000000..4a4af137b23 --- /dev/null +++ b/docs/02_cleos/03_command-reference/system/system-voteproducer.md @@ -0,0 +1,5 @@ +## Subcommands +- [voteproducer proxy](system-voteproducer-proxy) - Vote your stake through a proxy +- [voteproducer prods](system-voteproducer-prods) - Vote for one or more producers +- [voteproducer approve](system-voteproducer-approve) -Add one producer to list of voted producers +- [voteproducer unapprove](system-voteproducer-unapprove) - Remove one producer from list of voted producers \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/transfer.md b/docs/02_cleos/03_command-reference/transfer.md new file mode 100755 index 00000000000..d0386d07dac --- /dev/null +++ b/docs/02_cleos/03_command-reference/transfer.md @@ -0,0 +1,81 @@ +## Description +Transfer tokens from account to account + +## Positional Parameters +- `sender` _TEXT_ - The account sending EOS +- `recipient` _TEXT_ - The account receiving EOS +- `amount` _UINT_ - The amount of EOS to send +- `memo` _TEXT_ - The memo for the transfer + +## Options +- `-c,--contract` _TEXT_ - The contract which controls the token +- `--pay-ram-to-open` - Pay ram to open recipient's token balance row +- `-x,--expiration` - set the time in seconds before a transaction expires, defaults to 30s +- `-f,--force-unique` - force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times +- `-s,--skip-sign` - Specify if unlocked wallet keys should be used to sign transaction +- `-j,--json` - print result as json +- `--json-file` _TEXT_ - save result in json format into a file +- `-d,--dont-broadcast` - don't broadcast transaction to the network (just print to stdout) +- `--return-packed` - used in conjunction with --dont-broadcast to get the packed transaction +- `-r,--ref-block` _TEXT_ - set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) +- `--use-old-rpc` - use old RPC push_transaction, rather than new RPC send_transaction +- `-p,--permission` _TEXT_ ... - An account and permission level to authorize, as in 'account@permission' (defaults to 'sender@active') +- `--max-cpu-usage-ms` _UINT_ - set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) +- `--max-net-usage` _UINT_ - set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) +- `--delay-sec` _UINT_ - set the delay_sec seconds, defaults to 0s + +## Example +Transfer 1000 SYS from **inita** to **tester** + +```shell +$ ./cleos transfer inita tester 1000 +``` +The response will look something like this + +```shell +{ + "transaction_id": "eb4b94b72718a369af09eb2e7885b3f494dd1d8a20278a6634611d5edd76b703", + "processed": { + "refBlockNum": 2206, + "refBlockPrefix": 221394282, + "expiration": "2017-09-05T08:03:58", + "scope": [ + "inita", + "tester" + ], + "signatures": [ + "1f22e64240e1e479eee6ccbbd79a29f1a6eb6020384b4cca1a958e7c708d3e562009ae6e60afac96f9a3b89d729a50cd5a7b5a7a647540ba1678831bf970e83312" + ], + "messages": [{ + "code": "eos", + "type": "transfer", + "authorization": [{ + "account": "inita", + "permission": "active" + } + ], + "data": { + "from": "inita", + "to": "tester", + "amount": 1000, + "memo": "" + }, + "hex_data": "000000008040934b00000000c84267a1e80300000000000000" + } + ], + "output": [{ + "notify": [{ + "name": "tester", + "output": { ... } + },{ + "name": "inita", + "output": { ... } + } + ], + "sync_transactions": [], + "async_transactions": [] + } + ] + } +} +``` diff --git a/docs/02_cleos/03_command-reference/version/client.md b/docs/02_cleos/03_command-reference/version/client.md new file mode 100755 index 00000000000..7350fd9a96a --- /dev/null +++ b/docs/02_cleos/03_command-reference/version/client.md @@ -0,0 +1,11 @@ +## Description + +Retrieve version information of the client + +## Positionals +none +## Usage + +```shell +$ ./cleos version client +``` diff --git a/docs/02_cleos/03_command-reference/version/index.md b/docs/02_cleos/03_command-reference/version/index.md new file mode 100755 index 00000000000..40a54e2dbff --- /dev/null +++ b/docs/02_cleos/03_command-reference/version/index.md @@ -0,0 +1,23 @@ +## Description + +Retrieve version information + +## Command + +```shell +$ ./cleos version +``` + +## Subcommands +[client](client) - Retrieve version information of the client + +```shell +$ ./cleos version client +``` + +## Output + + +```shell +Build version: 7f854a61 +``` diff --git a/docs/02_cleos/03_command-reference/wallet/create.md b/docs/02_cleos/03_command-reference/wallet/create.md new file mode 100755 index 00000000000..b4594459019 --- /dev/null +++ b/docs/02_cleos/03_command-reference/wallet/create.md @@ -0,0 +1,10 @@ +## Description + +Creates a wallet with the specified name. If no name is given, the wallet will be created with the name 'default' + +## Positionals +None +## Options +- `-n, --name` _TEXT_ - The name of the new wallet +- `-f, --file` _TEXT_ - Name of file to write wallet password output to. (Must be set, unless "--to-console" is passed +- `--to-console` - Print password to console \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/wallet/create_key.md b/docs/02_cleos/03_command-reference/wallet/create_key.md new file mode 100755 index 00000000000..2e7ae0d70b7 --- /dev/null +++ b/docs/02_cleos/03_command-reference/wallet/create_key.md @@ -0,0 +1,23 @@ +## Description + +Creates a key pair within the wallet so that you don't need to manually import it like you would with `cleos create key`. By default, this will create a key with the type \"favored\" by the wallet, which is a K1 key. But this command also lets you create a key in R1 format. + +## Positionals + +`key_type` _TEXT_ - "K1" or "R1" Key type to create + +## Options + +-n,--name TEXT=default The name of the wallet to create key into + +## Usage + +```shell +$ cleos wallet create_key K1 +``` + +## Outputs + +```shell +Created new private key with a public key of: "EOS67xHKzQArkWZN6rKLCq7NLvaj8kurwxzRxoTVz6cgDJkiWdGug" +``` diff --git a/docs/02_cleos/03_command-reference/wallet/import.md b/docs/02_cleos/03_command-reference/wallet/import.md new file mode 100755 index 00000000000..38563a1ed1a --- /dev/null +++ b/docs/02_cleos/03_command-reference/wallet/import.md @@ -0,0 +1,9 @@ +## Description +Imports private key into wallet + +## Positionals +None + +## Options +- `-n, --name` _TEXT_ - The name of the wallet to import key into. +- `--private-key` _TEXT_ - Private key in WIF format to import. \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/wallet/index.md b/docs/02_cleos/03_command-reference/wallet/index.md new file mode 100755 index 00000000000..f50f5fadf6e --- /dev/null +++ b/docs/02_cleos/03_command-reference/wallet/index.md @@ -0,0 +1,15 @@ +## Description +Subcommand used to interact with the local wallet. + +## Subcommands + +- [create](create.md) Create a new wallet locally +- [create_key](create_key.md) Creates a key pair within the wallet +- [open](open.md) Open an existing wallet +- [lock](lock.md) Lock wallet +- [lock_all](lock_all.md) Lock all unlocked wallets +- [unlock](unlock.md) Unlock wallet +- [import](import.md) Import private key into wallet +- [list](list.md) List opened wallets, * = unlocked +- [keys](keys.md) List of private keys from all unlocked wallets in wif format. +- [private_keys](private_keys.md) List of public keys from all unlocked wallets. \ No newline at end of file diff --git a/docs/02_cleos/03_command-reference/wallet/keys.md b/docs/02_cleos/03_command-reference/wallet/keys.md new file mode 100755 index 00000000000..02e3dc2d0d4 --- /dev/null +++ b/docs/02_cleos/03_command-reference/wallet/keys.md @@ -0,0 +1,25 @@ +## Description + +List of public keys from all unlocked wallets. These are the keys that could be used to sign transactions. + +## Positionals +None. +## Options +None. +## Usage + + +```shell +$ ./cleos wallet keys +``` + +## Outputs + + +```shell +[[ + "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", + "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" + ] +] +``` diff --git a/docs/02_cleos/03_command-reference/wallet/list.md b/docs/02_cleos/03_command-reference/wallet/list.md new file mode 100755 index 00000000000..2a48734a521 --- /dev/null +++ b/docs/02_cleos/03_command-reference/wallet/list.md @@ -0,0 +1,33 @@ +## Description + +Lists opened wallets, * = unlocked + +## Positionals +None + +## Options +None + +## Usage + + +```shell +$ ./cleos wallet list +``` + +## Outputs + + +```shell +Wallets: +[ + "default *", + "second-wallet *" +] + +or when there are no wallets +Wallets: +[ +] + +``` diff --git a/docs/02_cleos/03_command-reference/wallet/lock.md b/docs/02_cleos/03_command-reference/wallet/lock.md new file mode 100755 index 00000000000..7c749abbbfe --- /dev/null +++ b/docs/02_cleos/03_command-reference/wallet/lock.md @@ -0,0 +1,23 @@ +## Description + +Locks a wallet + +## Positionals +None + +## Options +- `-n, --name` _TEXT_ - The name of the wallet to lock +## Usage + + +```shell +$ cleos wallet lock +or +$ ./cleos wallet lock -n second-wallet +``` + +## Outputs + +```shell +Locked: 'default' +``` diff --git a/docs/02_cleos/03_command-reference/wallet/lock_all.md b/docs/02_cleos/03_command-reference/wallet/lock_all.md new file mode 100755 index 00000000000..633cb9bdb4d --- /dev/null +++ b/docs/02_cleos/03_command-reference/wallet/lock_all.md @@ -0,0 +1,21 @@ +## Description +Locks all unlocked wallets + + +## Positionals +None +## Options +None +## Usage + + +```shell +$ ./cleos wallet lock_all +``` + +## Outputs + + +```shell +Locked All Wallets +``` diff --git a/docs/02_cleos/03_command-reference/wallet/open.md b/docs/02_cleos/03_command-reference/wallet/open.md new file mode 100755 index 00000000000..574d992ed24 --- /dev/null +++ b/docs/02_cleos/03_command-reference/wallet/open.md @@ -0,0 +1,23 @@ +## Description + +Opens an existing wallet + +## Positionals +None +## Options +- `-n, --name` _TEXT_ - The name of the wallet to open. +## Usage + + +```shell +$ cleos wallet open +or +$ cleos wallet open -n second-wallet +``` + +## Outputs + + +```shell +Opened: default +``` diff --git a/docs/02_cleos/03_command-reference/wallet/private_keys.md b/docs/02_cleos/03_command-reference/wallet/private_keys.md new file mode 100755 index 00000000000..74426291db3 --- /dev/null +++ b/docs/02_cleos/03_command-reference/wallet/private_keys.md @@ -0,0 +1,25 @@ +## Description + +It is possible to query for the public and private key pairs of an individual wallet. The wallet must already be unlocked and you must give the password again + +## Positionals +None + +## Options +`-n,--name` _TEXT_ - The name of the wallet to list keys from, otherwise - default + +`--password` _TEXT_ - The password returned by wallet create + +## Usage + + +```javascript +cleos wallet private_keys +``` + +## Outputs + + +```text + +``` diff --git a/docs/02_cleos/03_command-reference/wallet/unlock.md b/docs/02_cleos/03_command-reference/wallet/unlock.md new file mode 100755 index 00000000000..20ad682e84b --- /dev/null +++ b/docs/02_cleos/03_command-reference/wallet/unlock.md @@ -0,0 +1,21 @@ +## Description +Unlocks a wallet + +## Positionals +None +## Options +- `-n, --name` _TEXT_ - The name of the wallet to unlock. +- `--password` _TEXT_ - The password returned by wallet create. +## Usage +To unlock a wallet, specify the password provided when it was created. + +```shell +$ ./cleos wallet unlock -n second-wallet --password PW5Ji6JUrLjhKAVn68nmacLxwhvtqUAV18J7iycZppsPKeoGGgBEw +``` + +## Outputs + + +```shell +Unlocked: 'second-wallet' +``` diff --git a/docs/02_cleos/04_troubleshooting.md b/docs/02_cleos/04_troubleshooting.md new file mode 100644 index 00000000000..b41e734cf59 --- /dev/null +++ b/docs/02_cleos/04_troubleshooting.md @@ -0,0 +1,25 @@ +--- +content_title: Cleos Troubleshooting +--- + +## Cannot connect to RPC endpoint + +Check if your local nodeos is running by visiting the following URL in your browser: + +```shell + +curl http://localhost:8888/v1/chain/get_info + +``` + +If you are trying to connect a remote nodeos API endpoint, try to visit the API endpoint with the following suffix: + +```shell +http://API_ENDPOINT:PORT/v1/chain/get_info +``` + +Replace API_ENDPOINT and PORT with your remote nodeos API endpoint detail + +## “Missing Authorizations" + +That means you are not using the required authorizations. Most likely you are not using correct EOSIO account or permission level to sign the transaction diff --git a/docs/02_cleos/05_FAQ.md b/docs/02_cleos/05_FAQ.md new file mode 100644 index 00000000000..1813e8dd656 --- /dev/null +++ b/docs/02_cleos/05_FAQ.md @@ -0,0 +1,11 @@ +--- +content_title: Cleos FAQ +--- + +## Domain Socket (IPC) vs. HTTPS (RPC) + +There are two options to connect `cleos` to `keosd`. You can either use domain sockets or HTTPS. Uses domain socket offering many benefits. It reduces the chance of leaking access of `keosd` to the LAN, WAN or Internet. Also, unlike HTTPS protocol where many attack vectors exist such as CORS, a domain socket can only be used for the intended use case Inter-Processes Communication + +## What does "transaction executed locally, but may not be confirmed by the network yet" means? + +It means the transaction has been successfully accepted and executed by the instance of nodeos that cleos submitted it directly to. That instance of nodeos should relay the transaction to additional instances via the peer-to-peer protocol but, there is no guarantee that these additional instances accepted or executed the transaction. There is also no guarantee, at this point, that the transaction has been accepted and executed by a valid block producer and subsequently included in a valid block in the blockchain. If you require stronger confirmation of a transaction's inclusion in the immutable blockchain, you must take extra steps to monitor for the transaction's presence in an irreversible block diff --git a/docs/02_cleos/index.md b/docs/02_cleos/index.md new file mode 100644 index 00000000000..4328cbeca94 --- /dev/null +++ b/docs/02_cleos/index.md @@ -0,0 +1,122 @@ +--- +content_title: Cleos +--- + +## Introduction + +`cleos` is a command line tool that interfaces with the REST API exposed by `nodeos`. Developers can also use `cleos` to deploy and test EOSIO smart contracts. + +## Installation + +`cleos` is distributed as part of the [EOSIO software suite](https://github.com/EOSIO/eos/blob/master/README.md). To install `cleos` just visit the [EOSIO Software Installation](../00_install/index.md) section. + +## Using Cleos + +To use `cleos`, you need the end point (IP address and port number) of a running `nodeos` instance. Also, the `nodeos` instance must be configured to load the `eosio::chain_api_plugin` when launched. This allows `nodeos` to respond to the RPC requests coming from `cleos`. + +### Cleos Commands + +For a list of all `cleos` commands, run: + +```sh +$ cleos --help +``` + +```console +Command Line Interface to EOSIO Client +Usage: cleos [OPTIONS] SUBCOMMAND + +Options: + -h,--help Print this help message and exit + -u,--url TEXT=http://127.0.0.1:8888/ + the http/https URL where nodeos is running + --wallet-url TEXT=unix:///Users/username/eosio-wallet/keosd.sock + the http/https URL where keosd is running + -r,--header pass specific HTTP header; repeat this option to pass multiple headers + -n,--no-verify don't verify peer certificate when using HTTPS + --no-auto-keosd don't automatically launch a keosd if one is not currently running + -v,--verbose output verbose errors and action console output + --print-request print HTTP request to STDERR + --print-response print HTTP response to STDERR + +Subcommands: + version Retrieve version information + create Create various items, on and off the blockchain + convert Pack and unpack transactions + get Retrieve various items and information from the blockchain + set Set or update blockchain state + transfer Transfer tokens from account to account + net Interact with local p2p network connections + wallet Interact with local wallet + sign Sign a transaction + push Push arbitrary transactions to the blockchain + multisig Multisig contract commands + wrap Wrap contract commands + system Send eosio.system contract action to the blockchain. +``` + +### Cleos Subcommands + +To get help with any particular subcommand, run `cleos SUBCOMMAND --help`. For instance: + +```sh +$ cleos create --help +``` + +```console +Create various items, on and off the blockchain +Usage: cleos create SUBCOMMAND + +Subcommands: + key Create a new keypair and print the public and private keys + account Create a new account on the blockchain + (assumes system contract does not restrict RAM usage) +``` + +`cleos` can also provide usage help for subcommands within subcommands. For instance: + +```sh +$ cleos create account --help +``` + +```console +Create a new account on the blockchain (assumes system contract does not restrict RAM usage) +Usage: cleos create account [OPTIONS] creator name OwnerKey [ActiveKey] + +Positionals: + creator TEXT The name of the account creating the new account (required) + name TEXT The name of the new account (required) + OwnerKey TEXT The owner public key or permission level for the new account (required) + ActiveKey TEXT The active public key or permission level for the new account + +Options: + -h,--help Print this help message and exit + -x,--expiration set the time in seconds before a transaction expires, defaults to 30s + -f,--force-unique force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times + -s,--skip-sign Specify if unlocked wallet keys should be used to sign transaction + -j,--json print result as json + --json-file TEXT save result in json format into a file + -d,--dont-broadcast don't broadcast transaction to the network (just print to stdout) + --return-packed used in conjunction with --dont-broadcast to get the packed transaction + -r,--ref-block TEXT set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake) + --use-old-rpc use old RPC push_transaction, rather than new RPC send_transaction + -p,--permission TEXT ... An account and permission level to authorize, as in 'account@permission' (defaults to 'creator@active') + --max-cpu-usage-ms UINT set an upper limit on the milliseconds of cpu usage budget, for the execution of the transaction (defaults to 0 which means no limit) + --max-net-usage UINT set an upper limit on the net usage budget, in bytes, for the transaction (defaults to 0 which means no limit) + --delay-sec UINT set the delay_sec seconds, defaults to 0s +``` + +## Cleos Example + +The following `cleos` command creates a local wallet named `mywallet` and displays the password to the screen: + +```sh +$ cleos wallet create -n mywallet --to-console +``` + +```console +Creating wallet: mywallet +Save password to use in the future to unlock this wallet. +Without password imported keys will not be retrievable. +"PW5JbF34UdA193Eps1bjrWVJRaNMt1VKddLn4Dx6SPVTfMDRnMBWN" +``` diff --git a/docs/03_keosd/10_usage.md b/docs/03_keosd/10_usage.md new file mode 100644 index 00000000000..9e1846b266f --- /dev/null +++ b/docs/03_keosd/10_usage.md @@ -0,0 +1,116 @@ +--- +content_title: Keosd Usage +--- + +[[info | Recommended Usage]] +| For most users, the easiest way to use `keosd` is to have `cleos` launch it automatically. Wallet files will be created in the default directory (`~/eosio-wallet`). + +## Launching keosd manually + +`keosd` can be launched manually from the terminal by running: + +```sh +$ keosd +``` + +By default, `keosd` creates the folder `~/eosio-wallet` and populates it with a basic `config.ini` file. The location of the config file can be specified on the command line using the `--config-dir` argument. The configuration file contains the HTTP server endpoint for incoming HTTP connections and other parameters for cross-origin resource sharing. + +[[info | Wallet Location]] +| The location of the wallet data folder can be specified on the command line with the `--data-dir` option. + +## Auto-locking + +By default, `keosd` is set to lock your wallet after 15 minutes of inactivity. This is configurable in the `config.ini` by setting the timeout seconds in `unlock-timeout`. Setting it to 0 will cause `keosd` to always lock your wallet. + +## Stopping keosd + +The most effective way to stop `keosd` is to find the keosd process and send a SIGTERM signal to it. + +## Other options + +For a list of all commands known to `keosd`, simply run it with no arguments: + +```sh +$ keosd --help +``` + +```console +Application Options: + +Config Options for eosio::http_plugin: + --unix-socket-path arg (=keosd.sock) The filename (relative to data-dir) to + create a unix socket for HTTP RPC; set + blank to disable. + --http-server-address arg The local IP and port to listen for + incoming http connections; leave blank + to disable. + --https-server-address arg The local IP and port to listen for + incoming https connections; leave blank + to disable. + --https-certificate-chain-file arg Filename with the certificate chain to + present on https connections. PEM + format. Required for https. + --https-private-key-file arg Filename with https private key in PEM + format. Required for https + --https-ecdh-curve arg (=secp384r1) Configure https ECDH curve to use: + secp384r1 or prime256v1 + --access-control-allow-origin arg Specify the Access-Control-Allow-Origin + to be returned on each request. + --access-control-allow-headers arg Specify the Access-Control-Allow-Header + s to be returned on each request. + --access-control-max-age arg Specify the Access-Control-Max-Age to + be returned on each request. + --access-control-allow-credentials Specify if Access-Control-Allow-Credent + ials: true should be returned on each + request. + --max-body-size arg (=1048576) The maximum body size in bytes allowed + for incoming RPC requests + --http-max-bytes-in-flight-mb arg (=500) + Maximum size in megabytes http_plugin + should use for processing http + requests. 503 error response when + exceeded. + --verbose-http-errors Append the error log to HTTP responses + --http-validate-host arg (=1) If set to false, then any incoming + "Host" header is considered valid + --http-alias arg Additionaly acceptable values for the + "Host" header of incoming HTTP + requests, can be specified multiple + times. Includes http/s_server_address + by default. + --http-threads arg (=2) Number of worker threads in http thread + pool + +Config Options for eosio::wallet_plugin: + --wallet-dir arg (=".") The path of the wallet files (absolute + path or relative to application data + dir) + --unlock-timeout arg (=900) Timeout for unlocked wallet in seconds + (default 900 (15 minutes)). Wallets + will automatically lock after specified + number of seconds of inactivity. + Activity is defined as any wallet + command e.g. list-wallets. + --yubihsm-url URL Override default URL of + http://localhost:12345 for connecting + to yubihsm-connector + --yubihsm-authkey key_num Enables YubiHSM support using given + Authkey + +Application Config Options: + --plugin arg Plugin(s) to enable, may be specified + multiple times + +Application Command Line Options: + -h [ --help ] Print this help message and exit. + -v [ --version ] Print version information. + --print-default-config Print default configuration template + -d [ --data-dir ] arg Directory containing program runtime + data + --config-dir arg Directory containing configuration + files such as config.ini + -c [ --config ] arg (=config.ini) Configuration file name relative to + config-dir + -l [ --logconf ] arg (=logging.json) Logging configuration file name/path + for library users +``` diff --git a/docs/03_keosd/15_plugins/http_plugin.md b/docs/03_keosd/15_plugins/http_plugin.md new file mode 100644 index 00000000000..fe2f3cc9d18 --- /dev/null +++ b/docs/03_keosd/15_plugins/http_plugin.md @@ -0,0 +1,3 @@ +--- +link: /01_nodeos/03_plugins/http_plugin/index.md +--- diff --git a/docs/03_keosd/15_plugins/index.md b/docs/03_keosd/15_plugins/index.md new file mode 100644 index 00000000000..77c787a117a --- /dev/null +++ b/docs/03_keosd/15_plugins/index.md @@ -0,0 +1,14 @@ +--- +content_title: Keosd Plugins +--- + +## Overview + +Plugins extend the core functionality implemented in `keosd`. For information on specific plugins, just select from the list below: + +* [`http_plugin`](../../01_nodeos/03_plugins/http_plugin/index.md) +* [`wallet_api_plugin`](wallet_api_plugin/index.md) +* [`wallet_plugin`](wallet_plugin/index.md) + +[[info | Keosd is modular]] +| Plugins add incremental functionality to `keosd`. Unlike runtime plugins, `keosd` plugins are built at compile-time. diff --git a/docs/03_keosd/15_plugins/wallet_api_plugin/index.md b/docs/03_keosd/15_plugins/wallet_api_plugin/index.md new file mode 100644 index 00000000000..8a796498a6b --- /dev/null +++ b/docs/03_keosd/15_plugins/wallet_api_plugin/index.md @@ -0,0 +1,41 @@ +# wallet_api_plugin + +## Description + +The `wallet_api_plugin` exposes functionality from the [`wallet_plugin`](../wallet_plugin/index.md) to the RPC API interface managed by the [`http_plugin`](../http_plugin/index.md). + +[[caution | Caution]] +| This plugin exposes wallets. Therefore, running this plugin on a publicly accessible node is not recommended. As of 1.2.0, `nodeos` will no longer allow the `wallet_api_plugin`. + +## Usage + +```sh +# config.ini +plugin = eosio::wallet_api_plugin + +# command-line +$ nodeos ... --plugin eosio::wallet_api_plugin +``` + +## Options + +None + +## Dependencies + +* [`wallet_plugin`](../wallet_plugin/index.md) +* [`http_plugin`](../http_plugin/index.md) + +### Load Dependency Examples + +```sh +# config.ini +plugin = eosio::wallet_plugin +[options] +plugin = eosio::http_plugin +[options] + +# command-line +$ nodeos ... --plugin eosio::wallet_plugin [options] \ + --plugin eosio::http_plugin [options] +``` diff --git a/docs/03_keosd/15_plugins/wallet_plugin/index.md b/docs/03_keosd/15_plugins/wallet_plugin/index.md new file mode 100644 index 00000000000..34c62e304ab --- /dev/null +++ b/docs/03_keosd/15_plugins/wallet_plugin/index.md @@ -0,0 +1,41 @@ +# wallet_plugin + +## Description + +The `wallet_plugin` adds access to wallet functionality from a node. + +[[caution | Caution]] +| This plugin is not designed to be loaded as a plugin on a publicly accessible node without further security measures. This is particularly true when loading the `wallet_api_plugin`, which should not be loaded on a publicly accessible node under any circumstances. + +## Usage + +```sh +# config.ini +plugin = eosio::wallet_plugin + +# command-line +$ nodeos ... --plugin eosio::wallet_plugin +``` + +## Options + +None + +## Dependencies + +* [`wallet_plugin`](../wallet_plugin/index.md) +* [`http_plugin`](../http_plugin/index.md) + +### Load Dependency Examples + +```sh +# config.ini +plugin = eosio::wallet_plugin +[options] +plugin = eosio::http_plugin +[options] + +# command-line +$ nodeos ... --plugin eosio::wallet_plugin [options] \ + --plugin eosio::http_plugin [options] +``` diff --git a/docs/03_keosd/20_security.md b/docs/03_keosd/20_security.md new file mode 100644 index 00000000000..04219f8ff34 --- /dev/null +++ b/docs/03_keosd/20_security.md @@ -0,0 +1,17 @@ +--- +content_title: Keosd Security +--- + +### How does `keosd` Locking/Unlocking works and what are the security implications? + +From a user's perspective, when a wallet is created, it remains in an `unlocked` state. Depending on the way `keosd` is launched, it may remain `unlocked` until the process is restarted. When a wallet is locked (either by timeout or process restart) the password is required to unlock it. + +However, it must be emphasized that `keosd` has no authentication/authorization mechanism besides locking/unlocking the wallet for storing/retrieving private keys and signing digital messages. + +### How is the `keosd` service accessed and what are the security implications? + +When a domain socket is used to access `keosd`, any UNIX user/group that has access rights to write to the socket file on the filesystem can submit transactions and receive signed transactions from `keosd` using any key in any unlocked wallet. + +In the case of a TCP socket bound to localhost, any local process (regardless of owner or permission) can do the same things mentioned above. That includes a snippet of JavaScript code in a web page running in a local browser (though some browsers may have some security mitigations for this). + +In the case of a TCP socket bound to a LAN/WAN address, any remote actor that can send packets to a machine running `keosd` may do the same. That present a huge security risk, even if the communication is encrypted or secured via HTTPS. diff --git a/docs/03_keosd/30_how-to-guides/how-to-attach-a-yubihsm-hard-wallet.md b/docs/03_keosd/30_how-to-guides/how-to-attach-a-yubihsm-hard-wallet.md new file mode 100644 index 00000000000..0bbf3e1024a --- /dev/null +++ b/docs/03_keosd/30_how-to-guides/how-to-attach-a-yubihsm-hard-wallet.md @@ -0,0 +1,68 @@ +## Goal + +Attach a YubiHSM as a hard wallet + +## Before you begin + +* Install the currently supported version of `keosd` + +* Install YubiHSM2 Software Toolkit (YubiHSM2 SDK) + +* Create an AuthKey with at least the following Capabilities: + + * sign-ecdsa + * generate-asymmetric-key + * export-wrapped + +* **Delete the default AuthKey** + +[[warning | Security]] +| It is extremely important to create a new AuthKey and remove the default AuthKey before proceed to the following steps. + +## Steps + +### Configure `keosd` + + There are two options to connect `keosd` to YubiHSM: + + #### Using a YubiHSM connector + + By default, `keosd` will connect to the YubiHSM connector on the default host and port. If a non-default URL is used, set the `--yubihsm-url` option or `yubihsm-url` in `config.ini` with the correct connector URL + + #### Directly connect via USB + + `keosd` also can directly connect to YubiHSM via USB protocol + + If this option is used, set `keosd` startup option as the below: + + ```shell + --yubihsm-url=ysb:// + ``` + +### Start `keosd` with AuthKey: + + ```shell + --yubihsm-authkey Your_AuthKey_Object_Number + ``` + + if a YubiHSM connector is used, check the YubiHSM connector is up and running by visiting YubiHSM URL: + http://YubiHSM_HOST:YubiHSM_PORT/connector/status ((Default HOST and Port: http://127.0.0.1:12345) + + You should see something like below: + + ```json + status=OK + serial=* + version=2.0.0 + pid=666 + address=localhost + port=12345 + ``` + +### Unlock YubiHSM wallet with the password of AuthKey using the following option: + + ```bash + cleos wallet unlock -n YubiHSM --password YOUR_AUTHKEY_PASSWORD + ``` + +After unlocking the wallet, you can use `cleos wallet` commands as usual. Beware as a part of security mechanism, some wallet subcommands, such as retrieve private keys, or remove a key, are not supported when a YubiHSM is used \ No newline at end of file diff --git a/docs/03_keosd/30_how-to-guides/index.md b/docs/03_keosd/30_how-to-guides/index.md new file mode 100644 index 00000000000..ec0bd09ccf8 --- /dev/null +++ b/docs/03_keosd/30_how-to-guides/index.md @@ -0,0 +1,5 @@ +--- +content_title: Keosd How-to Guides +--- + +* [How to attach a YubiHSM hard wallet](how-to-attach-a-yubihsm-hard-wallet.md) diff --git a/docs/03_keosd/35_wallet-specification.md b/docs/03_keosd/35_wallet-specification.md new file mode 100644 index 00000000000..f91867e624d --- /dev/null +++ b/docs/03_keosd/35_wallet-specification.md @@ -0,0 +1,144 @@ +--- +content_title: EOS Wallet Specification +--- + +## EOS Wallet Import Format (WIF) + +Wallet Import Format is an encoding for a private EDSA key. EOS uses the same version, checksum, and encoding scheme as the Bitcoin WIF addresses and should be compatible with existing libraries [1]. + +This is an example of a WIF Private Key: + +``` +5HpHagT65TZzG1PH3CSu63k8DbpvD8s5ip4nEB3kEsreAbuatmU +``` + +This encoding is good for: + +* Copy and Pasting private keys (ensures the entire key is copied) +* Including keys in text or user editable file formats +* Shortening the key-length + +This encoding is not good for: + +* Writing keys down by hand (even a single upper / lowercase mistake can cause a major problem) +* Binary or computer storage where code handles the key and data is already checked + +Considerations: + +* If a key could be written down or re-keyed, the BIP39 Mnemonic Code standard is a better option to use. +* It is a good idea to always label a WIF key using the word "Private" or "Private Key". + +## Private key to WIF + +1. A fake private key of all zeros is used. This is 32 bytes long (shown here as hex). + +``` +0000000000000000000000000000000000000000000000000000000000000000 +``` + +2. Add a 0x80 byte in front. This byte represents the Bitcoin mainnet. EOS uses the same version byte. When encoded the version byte helps to identify this as a private key. Unlike Bitcoin, EOS always uses compressed public keys (derived from a private key) and therefore does not suffix the private key with a 0x01 byte. + +``` +800000000000000000000000000000000000000000000000000000000000000000 +``` + +3. Perform a binary SHA-256 hash on the versioned key. + +``` +ce145d282834c009c24410812a60588c1085b63d65a7effc2e0a5e3a2e21b236 +``` + +4. Perform a binary SHA-256 hash on result of SHA-256 hash. + +``` +0565fba7ebf8143516e0222d7950c28589a34c3ee144c3876ceb01bfb0e9bb70 +``` + +5. Take the first 4 bytes of the second SHA-256 hash, this is the checksum. + +``` +0565fba7 +``` + +6. Add the 4 checksum bytes to the versioned key from step 2. + +``` +8000000000000000000000000000000000000000000000000000000000000000000565fba7 +``` + +7. [Base58](http://npmjs.com/package/bs58) encode the binary data from step 6. + +``` +5HpHagT65TZzG1PH3CSu63k8DbpvD8s5ip4nEB3kEsreAbuatmU +``` + +--- + +## WIF to private key (checksum checking) + +1. Start with the Wallet Import Private Key. + +``` +5HpHagT65TZzG1PH3CSu63k8DbpvD8s5ip4nEB3kEsreAbuatmU +``` + +2. [Base58](http://npmjs.com/package/bs58) decode the WIF string (shown as HEX here). + +``` +8000000000000000000000000000000000000000000000000000000000000000000565fba7 +``` + +3. Slice the decoded WIF into the versioned key and the checksum (last 4 bytes). + +``` +800000000000000000000000000000000000000000000000000000000000000000 +0565fba7 +``` + +4. Perform a binary SHA-256 hash on the versioned key. + +``` +ce145d282834c009c24410812a60588c1085b63d65a7effc2e0a5e3a2e21b236 +``` + +5. Perform a binary SHA-256 hash on result of SHA-256 hash. + +``` +0565fba7ebf8143516e0222d7950c28589a34c3ee144c3876ceb01bfb0e9bb70 +``` + +6. Take the first 4 bytes of the second SHA-256 hash, this is the checksum. + +``` +0565fba7 +``` + +7. Make sure the checksum in steps 3 and 6 match. + +8. Slice the versioned private key from step 3 into the version and private key. + +``` +80 +0000000000000000000000000000000000000000000000000000000000000000 +``` + +9. If the version is 0x80 then there is no error. + +--- + +## [Base58check](https://www.npmjs.com/package/base58check) + +Base58Check is a JavaScript implementation of this algorithm and may be used to encode and decode EOS WIF private keys. + +```sh +base58check = require('base58check') +wif = base58check.encode(privateKey = '00'.repeat(32), version = '80', encoding = 'hex') +assert.equal('5HpHagT65TZzG1PH3CSu63k8DbpvD8s5ip4nEB3kEsreAbuatmU', wif) +let {prefix, data} = base58check.decode(wif) +assert.equal(prefix.toString('hex'), '80') +assert.equal(data.toString('hex'), '00'.repeat(32)) +``` + +--- + +[1] Bitcoin WIF format - https://en.bitcoin.it/wiki/Wallet_import_format diff --git a/docs/03_keosd/40_troubleshooting.md b/docs/03_keosd/40_troubleshooting.md new file mode 100644 index 00000000000..8aa9c6d9280 --- /dev/null +++ b/docs/03_keosd/40_troubleshooting.md @@ -0,0 +1,13 @@ +--- +content_title: Keosd Troubleshooting +--- + +## How to solve the error "Failed to lock access to wallet directory; is another keosd running"? + +Since `cleos` may auto-launch an instance of `keosd`, it is possible to end up with multiple instances of `keosd` running. That can cause unexpected behavior or the error message above. + +To fix this issue, you can terminate all running `keosd` instances and restart `keosd`. The following command will find and terminate all instances of `keosd` running on the system: + +```sh +$ pkill keosd +``` diff --git a/docs/03_keosd/50_FAQ.md b/docs/03_keosd/50_FAQ.md new file mode 100644 index 00000000000..f58052c0437 --- /dev/null +++ b/docs/03_keosd/50_FAQ.md @@ -0,0 +1,11 @@ +--- +content_title: Keosd FAQ +--- + +### How does `keosd` store key pairs + +`keosd` encrypts key pairs under-the-hood before storing them on a wallet file. Depending on the wallet implementation, say Secure Clave or YubiHSM, a specific cryptographic algorithm will be used. When the standard file system of a UNIX-based OS is used, `keosd` encrypts key pairs using 256-bit AES in CBC mode. + +### How to enable the `keosd` Secure Enclave + +To enable the secure enclave feature of `keosd`, you need to sign a `keosd` binary with a certificate provided with your Apple Developer Account. Be aware that there might be some constraints imposed by App Store when signing from a console application. Therefore, the signed binaries might need to be resigned every 7 days. diff --git a/docs/03_keosd/index.md b/docs/03_keosd/index.md new file mode 100644 index 00000000000..43f2301dbb5 --- /dev/null +++ b/docs/03_keosd/index.md @@ -0,0 +1,18 @@ +--- +content_title: Keosd +--- + +## Introduction + +`keosd` is a key manager service daemon for storing private keys and signing digital messages. It provides a secure key storage medium for keys to be encrypted at rest in the associated wallet file. `keosd` also defines a secure enclave for signing transaction created by `cleos` or a third part library. + +## Installation + +`keosd` is distributed as part of the [EOSIO software suite](https://github.com/EOSIO/eos/blob/master/README.md). To install `keosd` just visit the [EOSIO Software Installation](../00_install/index.md) section. + +## Operation + +When a wallet is unlocked with the corresponding password, `cleos` can request `keosd` to sign a transaction with the appropriate private keys. Also, `keosd` provides support for hardware-based wallets such as Secure Encalve and YubiHSM. + +[[info | Audience]] +| `keosd` is intended to be used by EOSIO developers only. diff --git a/docs/eosio_components.png b/docs/eosio_components.png new file mode 100644 index 00000000000..59660cb7758 Binary files /dev/null and b/docs/eosio_components.png differ diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000000..6589f21f43d --- /dev/null +++ b/docs/index.md @@ -0,0 +1,16 @@ +--- +content_title: EOSIO Overview +--- + +EOSIO is the next-generation blockchain platform for creating and deploying smart contracts and distributed applications. EOSIO comes with a number of programs. The primary ones included in EOSIO are the following: + +* [Nodeos](01_nodeos/index.md) (node + eos = nodeos) - core service daemon that runs a node for block production, API endpoints, or local development. +* [Cleos](02_cleos/index.md) (cli + eos = cleos) - command line interface to interact with the blockchain (via `nodeos`) and manage wallets (via `keosd`). +* [Keosd](03_keosd/index.md) (key + eos = keosd) - component that manages EOSIO keys in wallets and provides a secure enclave for digital signing. + +The basic relationship between these components is illustrated in the diagram below. + +![EOSIO components](eosio_components.png) + +[[info | What's Next?]] +| [Install the EOSIO Software](00_install/index.md) before exploring the sections above. diff --git a/eosio-wasm-spec-tests b/eosio-wasm-spec-tests new file mode 160000 index 00000000000..25c46086d69 --- /dev/null +++ b/eosio-wasm-spec-tests @@ -0,0 +1 @@ +Subproject commit 25c46086d694f5973ae1123e6a8aaa6e7232a2d4 diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index 54bb2f80e09..160c7cec10a 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -6,6 +6,7 @@ add_subdirectory( wasm-jit ) add_subdirectory( appbase ) add_subdirectory( chain ) add_subdirectory( testing ) +add_subdirectory( version ) #turn tools&tests off; not needed for library build set(BUILD_TESTS OFF CACHE BOOL "Build GTest-based tests") @@ -14,6 +15,16 @@ set(RUN_RE2C OFF CACHE BOOL "Run re2c") set(WITH_EXCEPTIONS ON CACHE BOOL "Build with exceptions enabled" FORCE) add_subdirectory( wabt ) +set(USE_EXISTING_SOFTFLOAT ON CACHE BOOL "use pre-exisiting softfloat lib") +set(ENABLE_TOOLS OFF CACHE BOOL "Build tools") +set(ENABLE_TESTS OFF CACHE BOOL "Build tests") +set(ENABLE_ADDRESS_SANITIZER OFF CACHE BOOL "Use address sanitizer") +set(ENABLE_UNDEFINED_BEHAVIOR_SANITIZER OFF CACHE BOOL "Use UB sanitizer") +set(ENABLE_PROFILE OFF CACHE BOOL "Enable for profile builds") +if(eos-vm IN_LIST EOSIO_WASM_RUNTIMES OR eos-vm-jit IN_LIST EOSIO_WASM_RUNTIMES) +add_subdirectory( eos-vm ) +endif() + set(ENABLE_STATIC ON) set(CMAKE_MACOSX_RPATH OFF) set(BUILD_ONLY_LIB ON CACHE BOOL "Library only build") diff --git a/libraries/appbase b/libraries/appbase index d864831e7b1..72e93b39672 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit d864831e7b12639e9f63fe721b9d4ae225e03fb0 +Subproject commit 72e93b396726916a596482897ab13f99a8197379 diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 254d462c5ed..a2bc309a8fb 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -5,11 +5,55 @@ file(GLOB HEADERS "include/eosio/chain/*.hpp" "include/eosio/chain/webassembly/*.hpp" "${CMAKE_CURRENT_BINARY_DIR}/include/eosio/chain/core_symbol.hpp" ) +if(APPLE AND UNIX) + set(PLATFORM_TIMER_IMPL platform_timer_macos.cpp) +else() + try_run(POSIX_TIMER_TEST_RUN_RESULT POSIX_TIMER_TEST_COMPILE_RESULT ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/platform_timer_posix_test.c) + if(POSIX_TIMER_TEST_RUN_RESULT EQUAL 0) + set(PLATFORM_TIMER_IMPL platform_timer_posix.cpp) + else() + set(PLATFORM_TIMER_IMPL platform_timer_asio_fallback.cpp) + endif() +endif() + +if("eos-vm-oc" IN_LIST EOSIO_WASM_RUNTIMES) + set(CHAIN_EOSVMOC_SOURCES webassembly/eos-vm-oc/code_cache.cpp + webassembly/eos-vm-oc/executor.cpp + webassembly/eos-vm-oc/memory.cpp + webassembly/eos-vm-oc/intrinsic.cpp + webassembly/eos-vm-oc/LLVMJIT.cpp + webassembly/eos-vm-oc/LLVMEmitIR.cpp + webassembly/eos-vm-oc/compile_monitor.cpp + webassembly/eos-vm-oc/compile_trampoline.cpp + webassembly/eos-vm-oc/ipc_helpers.cpp + webassembly/eos-vm-oc/gs_seg_helpers.c + webassembly/eos-vm-oc.cpp) + + if(LLVM_VERSION VERSION_LESS 7.1 AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + enable_language(ASM-LLVMWAR) + list(APPEND CHAIN_EOSVMOC_SOURCES webassembly/eos-vm-oc/llvmWARshim.llvmwar) + else() + list(APPEND CHAIN_EOSVMOC_SOURCES webassembly/eos-vm-oc/llvmWARshim.cpp) + endif() + + llvm_map_components_to_libnames(LLVM_LIBS support core passes mcjit native orcjit) + include_directories(${LLVM_INCLUDE_DIRS}) + add_definitions(${LLVM_DEFINITIONS}) + + option(EOSVMOC_ENABLE_DEVELOPER_OPTIONS "enable developer options for EOS VM OC" OFF) +endif() + +if("eos-vm" IN_LIST EOSIO_WASM_RUNTIMES OR "eos-vm-jit" IN_LIST EOSIO_WASM_RUNTIMES) + set(CHAIN_EOSVM_SOURCES "webassembly/eos-vm.cpp") + set(CHAIN_EOSVM_LIBRARIES eos-vm) +endif() + ## SORT .cpp by most likely to change / break compile add_library( eosio_chain merkle.cpp name.cpp transaction.cpp + block.cpp block_header.cpp block_header_state.cpp block_state.cpp @@ -21,6 +65,7 @@ add_library( eosio_chain transaction_context.cpp eosio_contract.cpp eosio_contract_abi.cpp + eosio_contract_abi_bin.cpp chain_config.cpp chain_id_type.cpp genesis_state.cpp @@ -37,8 +82,9 @@ add_library( eosio_chain asset.cpp snapshot.cpp - webassembly/wavm.cpp webassembly/wabt.cpp + ${CHAIN_EOSVMOC_SOURCES} + ${CHAIN_EOSVM_SOURCES} # get_config.cpp # @@ -49,22 +95,41 @@ add_library( eosio_chain protocol_state_object.cpp protocol_feature_activation.cpp protocol_feature_manager.cpp + producer_schedule.cpp genesis_intrinsics.cpp whitelisted_intrinsics.cpp thread_utils.cpp + platform_timer_accuracy.cpp + ${PLATFORM_TIMER_IMPL} ${HEADERS} ) target_link_libraries( eosio_chain fc chainbase Logging IR WAST WASM Runtime - softfloat builtins wabt + softfloat builtins wabt ${CHAIN_EOSVM_LIBRARIES} ${LLVM_LIBS} ) target_include_directories( eosio_chain PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/../wasm-jit/Include" + "${CMAKE_CURRENT_SOURCE_DIR}/libraries/eos-vm/include" "${CMAKE_SOURCE_DIR}/libraries/wabt" "${CMAKE_BINARY_DIR}/libraries/wabt" ) +if("eos-vm-oc" IN_LIST EOSIO_WASM_RUNTIMES) + target_link_libraries(eosio_chain "-Wl,-wrap=main") +endif() + +foreach(RUNTIME ${EOSIO_WASM_RUNTIMES}) + string(TOUPPER "${RUNTIME}" RUNTIMEUC) + string(REPLACE "-" "_" RUNTIMEUC ${RUNTIMEUC}) + target_compile_definitions(eosio_chain PUBLIC "EOSIO_${RUNTIMEUC}_RUNTIME_ENABLED") +endforeach() + +if(EOSVMOC_ENABLE_DEVELOPER_OPTIONS) + message(WARNING "EOS VM OC Developer Options are enabled; these are NOT supported") + target_compile_definitions(eosio_chain PUBLIC EOSIO_EOS_VM_OC_DEVELOPER) +endif() + install( TARGETS eosio_chain RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR} @@ -72,7 +137,7 @@ install( TARGETS eosio_chain ) install( DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/eosio/chain/ DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}/eosio/chain - FILES_MATCHING PATTERN "*.hpp" PATTERN "*.h" PATTERN "webassembly" EXCLUDE + FILES_MATCHING PATTERN "*.hpp" PATTERN "*.h" EXCLUDE ) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/include/eosio/chain/core_symbol.hpp DESTINATION ${CMAKE_INSTALL_FULL_INCLUDEDIR}/eosio/chain) #if(MSVC) diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index 91950de1f12..d5e100428c0 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include @@ -13,6 +9,7 @@ using namespace boost; + namespace eosio { namespace chain { const size_t abi_serializer::max_recursion_depth; @@ -78,7 +75,7 @@ namespace eosio { namespace chain { // TODO: Add proper support for floating point types. For now this is good enough. built_in_types.emplace("float32", pack_unpack()); built_in_types.emplace("float64", pack_unpack()); - built_in_types.emplace("float128", pack_unpack()); + built_in_types.emplace("float128", pack_unpack()); built_in_types.emplace("time_point", pack_unpack()); built_in_types.emplace("time_point_sec", pack_unpack()); @@ -118,7 +115,6 @@ namespace eosio { namespace chain { structs[st.name] = st; for( const auto& td : abi.types ) { - EOS_ASSERT(_is_type(td.type, ctx), invalid_type_inside_abi, "invalid type ${type}", ("type",td.type)); EOS_ASSERT(!_is_type(td.new_type_name, ctx), duplicate_abi_type_def_exception, "type already exists", ("new_type_name",impl::limit_size(td.new_type_name))); typedefs[td.new_type_name] = td.type; @@ -422,6 +418,12 @@ namespace eosio { namespace chain { _variant_to_binary(fundamental_type(rtype), var, ds, ctx); ++i; } + } else if( is_optional(rtype) ) { + char flag = !var.is_null(); + fc::raw::pack(ds, flag); + if( flag ) { + _variant_to_binary(fundamental_type(rtype), var, ds, ctx); + } } else if( (v_itr = variants.find(rtype)) != variants.end() ) { ctx.hint_variant_type_if_in_array( v_itr ); auto& v = v_itr->second; diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index dc7687cf05c..02c964b0dee 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -355,10 +355,14 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a if( control.is_builtin_activated( builtin_protocol_feature_t::no_duplicate_deferred_id ) ) { auto exts = trx.validate_and_extract_extensions(); if( exts.size() > 0 ) { - EOS_ASSERT( exts.size() == 1, invalid_transaction_extension, - "only one extension is currently supported for deferred transactions" + auto itr = exts.lower_bound( deferred_transaction_generation_context::extension_id() ); + + EOS_ASSERT( exts.size() == 1 && itr != exts.end(), invalid_transaction_extension, + "only the deferred_transaction_generation_context extension is currently supported for deferred transactions" ); - const auto& context = exts.front().get(); + + const auto& context = itr->second.get(); + EOS_ASSERT( context.sender == receiver, ill_formed_deferred_transaction_generation_context, "deferred transaction generaction context contains mismatching sender", ("expected", receiver)("actual", context.sender) @@ -372,8 +376,8 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a ("expected", trx_context.id)("actual", context.sender_trx_id) ); } else { - FC_ASSERT( trx.transaction_extensions.size() == 0, "invariant failure" ); - trx.transaction_extensions.emplace_back( + emplace_extension( + trx.transaction_extensions, deferred_transaction_generation_context::extension_id(), fc::raw::pack( deferred_transaction_generation_context( trx_context.id, sender_id, receiver ) ) ); @@ -648,11 +652,11 @@ int apply_context::get_context_free_data( uint32_t index, char* buffer, size_t b return copy_size; } -int apply_context::db_store_i64( uint64_t scope, uint64_t table, const account_name& payer, uint64_t id, const char* buffer, size_t buffer_size ) { +int apply_context::db_store_i64( name scope, name table, const account_name& payer, uint64_t id, const char* buffer, size_t buffer_size ) { return db_store_i64( receiver, scope, table, payer, id, buffer, buffer_size); } -int apply_context::db_store_i64( uint64_t code, uint64_t scope, uint64_t table, const account_name& payer, uint64_t id, const char* buffer, size_t buffer_size ) { +int apply_context::db_store_i64( name code, name scope, name table, const account_name& payer, uint64_t id, const char* buffer, size_t buffer_size ) { // require_write_lock( scope ); const auto& tab = find_or_create_table( code, scope, table, payer ); auto tableid = tab.id; @@ -788,7 +792,7 @@ int apply_context::db_previous_i64( int iterator, uint64_t& primary ) { return keyval_cache.add(*itr); } -int apply_context::db_find_i64( uint64_t code, uint64_t scope, uint64_t table, uint64_t id ) { +int apply_context::db_find_i64( name code, name scope, name table, uint64_t id ) { //require_read_lock( code, scope ); // redundant? const auto* tab = find_table( code, scope, table ); @@ -802,7 +806,7 @@ int apply_context::db_find_i64( uint64_t code, uint64_t scope, uint64_t table, u return keyval_cache.add( *obj ); } -int apply_context::db_lowerbound_i64( uint64_t code, uint64_t scope, uint64_t table, uint64_t id ) { +int apply_context::db_lowerbound_i64( name code, name scope, name table, uint64_t id ) { //require_read_lock( code, scope ); // redundant? const auto* tab = find_table( code, scope, table ); @@ -818,7 +822,7 @@ int apply_context::db_lowerbound_i64( uint64_t code, uint64_t scope, uint64_t ta return keyval_cache.add( *itr ); } -int apply_context::db_upperbound_i64( uint64_t code, uint64_t scope, uint64_t table, uint64_t id ) { +int apply_context::db_upperbound_i64( name code, name scope, name table, uint64_t id ) { //require_read_lock( code, scope ); // redundant? const auto* tab = find_table( code, scope, table ); @@ -834,7 +838,7 @@ int apply_context::db_upperbound_i64( uint64_t code, uint64_t scope, uint64_t ta return keyval_cache.add( *itr ); } -int apply_context::db_end_i64( uint64_t code, uint64_t scope, uint64_t table ) { +int apply_context::db_end_i64( name code, name scope, name table ) { //require_read_lock( code, scope ); // redundant? const auto* tab = find_table( code, scope, table ); @@ -880,7 +884,7 @@ action_name apply_context::get_sender() const { const action_trace& creator_trace = trx_context.get_action_trace( trace.creator_action_ordinal ); return creator_trace.receiver; } - return 0; + return action_name(); } } } /// eosio::chain diff --git a/libraries/chain/asset.cpp b/libraries/chain/asset.cpp index 580156f6d21..7176f9dcc2e 100644 --- a/libraries/chain/asset.cpp +++ b/libraries/chain/asset.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index 83988c16657..9e76ff63bf3 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -1,8 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ - #include #include #include @@ -14,6 +9,7 @@ #include #include #include +#include namespace eosio { namespace chain { @@ -140,6 +136,10 @@ namespace eosio { namespace chain { time_point initial_creation_time ) { + for(const key_weight& k: auth.keys) + EOS_ASSERT(k.key.which() < _db.get().num_supported_key_types, unactivated_key_type, + "Unactivated key type used when creating permission"); + auto creation_time = initial_creation_time; if( creation_time == time_point() ) { creation_time = _control.pending_block_time(); @@ -167,6 +167,10 @@ namespace eosio { namespace chain { time_point initial_creation_time ) { + for(const key_weight& k: auth.keys) + EOS_ASSERT(k.key.which() < _db.get().num_supported_key_types, unactivated_key_type, + "Unactivated key type used when creating permission"); + auto creation_time = initial_creation_time; if( creation_time == time_point() ) { creation_time = _control.pending_block_time(); @@ -188,6 +192,10 @@ namespace eosio { namespace chain { } void authorization_manager::modify_permission( const permission_object& permission, const authority& auth ) { + for(const key_weight& k: auth.keys) + EOS_ASSERT(k.key.which() < _db.get().num_supported_key_types, unactivated_key_type, + "Unactivated key type used when modifying permission"); + _db.modify( permission, [&](permission_object& po) { po.auth = auth; po.last_updated = _control.pending_block_time(); @@ -238,7 +246,7 @@ namespace eosio { namespace chain { auto link = _db.find(key); // If no specific link found, check for a contract-wide default if (link == nullptr) { - boost::get<2>(key) = ""; + boost::get<2>(key) = {}; link = _db.find(key); } diff --git a/libraries/chain/block.cpp b/libraries/chain/block.cpp new file mode 100644 index 00000000000..91c93f6c359 --- /dev/null +++ b/libraries/chain/block.cpp @@ -0,0 +1,64 @@ +#include + +namespace eosio { namespace chain { + void additional_block_signatures_extension::reflector_init() { + static_assert( fc::raw::has_feature_reflector_init_on_unpacked_reflected_types, + "additional_block_signatures_extension expects FC to support reflector_init" ); + + EOS_ASSERT( signatures.size() > 0, ill_formed_additional_block_signatures_extension, + "Additional block signatures extension must contain at least one signature", + ); + + set unique_sigs; + + for( const auto& s : signatures ) { + auto res = unique_sigs.insert( s ); + EOS_ASSERT( res.second, ill_formed_additional_block_signatures_extension, + "Signature ${s} was repeated in the additional block signatures extension", + ("s", s) + ); + } + } + + flat_multimap signed_block::validate_and_extract_extensions()const { + using decompose_t = block_extension_types::decompose_t; + + flat_multimap results; + + uint16_t id_type_lower_bound = 0; + + for( size_t i = 0; i < block_extensions.size(); ++i ) { + const auto& e = block_extensions[i]; + auto id = e.first; + + EOS_ASSERT( id >= id_type_lower_bound, invalid_block_extension, + "Block extensions are not in the correct order (ascending id types required)" + ); + + auto iter = results.emplace(std::piecewise_construct, + std::forward_as_tuple(id), + std::forward_as_tuple() + ); + + auto match = decompose_t::extract( id, e.second, iter->second ); + EOS_ASSERT( match, invalid_block_extension, + "Block extension with id type ${id} is not supported", + ("id", id) + ); + + if( match->enforce_unique ) { + EOS_ASSERT( i == 0 || id > id_type_lower_bound, invalid_block_header_extension, + "Block extension with id type ${id} is not allowed to repeat", + ("id", id) + ); + } + + + id_type_lower_bound = id; + } + + return results; + + } + +} } /// namespace eosio::chain \ No newline at end of file diff --git a/libraries/chain/block_header.cpp b/libraries/chain/block_header.cpp index 692089dc9e6..0efc7728a1f 100644 --- a/libraries/chain/block_header.cpp +++ b/libraries/chain/block_header.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include @@ -28,14 +24,10 @@ namespace eosio { namespace chain { return result; } - vector block_header::validate_and_extract_header_extensions()const { - using block_header_extensions_t = block_header_extension_types::block_header_extensions_t; + flat_multimap block_header::validate_and_extract_header_extensions()const { using decompose_t = block_header_extension_types::decompose_t; - static_assert( std::is_same::value, - "block_header_extensions is not setup as expected" ); - - vector results; + flat_multimap results; uint16_t id_type_lower_bound = 0; @@ -47,9 +39,12 @@ namespace eosio { namespace chain { "Block header extensions are not in the correct order (ascending id types required)" ); - results.emplace_back(); + auto iter = results.emplace(std::piecewise_construct, + std::forward_as_tuple(id), + std::forward_as_tuple() + ); - auto match = decompose_t::extract( id, e.second, results.back() ); + auto match = decompose_t::extract( id, e.second, iter->second ); EOS_ASSERT( match, invalid_block_header_extension, "Block header extension with id type ${id} is not supported", ("id", id) @@ -62,6 +57,7 @@ namespace eosio { namespace chain { ); } + id_type_lower_bound = id; } diff --git a/libraries/chain/block_header_state.cpp b/libraries/chain/block_header_state.cpp index 59a1eaa3394..aea87103283 100644 --- a/libraries/chain/block_header_state.cpp +++ b/libraries/chain/block_header_state.cpp @@ -4,12 +4,18 @@ namespace eosio { namespace chain { - - bool block_header_state::is_active_producer( account_name n )const { - return producer_to_last_produced.find(n) != producer_to_last_produced.end(); + namespace detail { + bool is_builtin_activated( const protocol_feature_activation_set_ptr& pfa, + const protocol_feature_set& pfs, + builtin_protocol_feature_t feature_codename ) + { + auto digest = pfs.get_builtin_digest(feature_codename); + const auto& protocol_features = pfa->protocol_features; + return digest && protocol_features.find(*digest) != protocol_features.end(); + } } - producer_key block_header_state::get_scheduled_producer( block_timestamp_type t )const { + producer_authority block_header_state::get_scheduled_producer( block_timestamp_type t )const { auto index = t.slot % (active_schedule.producers.size() * config::producer_repetitions); index /= config::producer_repetitions; return active_schedule.producers[index]; @@ -40,13 +46,13 @@ namespace eosio { namespace chain { (when = header.timestamp).slot++; } - auto prokey = get_scheduled_producer(when); + auto proauth = get_scheduled_producer(when); - auto itr = producer_to_last_produced.find( prokey.producer_name ); + auto itr = producer_to_last_produced.find( proauth.producer_name ); if( itr != producer_to_last_produced.end() ) { EOS_ASSERT( itr->second < (block_num+1) - num_prev_blocks_to_confirm, producer_double_confirm, "producer ${prod} double-confirming known range", - ("prod", prokey.producer_name)("num", block_num+1) + ("prod", proauth.producer_name)("num", block_num+1) ("confirmed", num_prev_blocks_to_confirm)("last_produced", itr->second) ); } @@ -57,8 +63,8 @@ namespace eosio { namespace chain { result.active_schedule_version = active_schedule.version; result.prev_activated_protocol_features = activated_protocol_features; - result.block_signing_key = prokey.block_signing_key; - result.producer = prokey.producer_name; + result.valid_block_signing_authority = proauth.authority; + result.producer = proauth.producer_name; result.blockroot_merkle = blockroot_merkle; result.blockroot_merkle.append( id ); @@ -108,7 +114,7 @@ namespace eosio { namespace chain { } result.dpos_proposed_irreversible_blocknum = new_dpos_proposed_irreversible_blocknum; - result.dpos_irreversible_blocknum = calc_dpos_last_irreversible( prokey.producer_name ); + result.dpos_irreversible_blocknum = calc_dpos_last_irreversible( proauth.producer_name ); result.prev_pending_schedule = pending_schedule; @@ -120,7 +126,7 @@ namespace eosio { namespace chain { flat_map new_producer_to_last_produced; for( const auto& pro : result.active_schedule.producers ) { - if( pro.producer_name == prokey.producer_name ) { + if( pro.producer_name == proauth.producer_name ) { new_producer_to_last_produced[pro.producer_name] = result.block_num; } else { auto existing = producer_to_last_produced.find( pro.producer_name ); @@ -131,14 +137,14 @@ namespace eosio { namespace chain { } } } - new_producer_to_last_produced[prokey.producer_name] = result.block_num; + new_producer_to_last_produced[proauth.producer_name] = result.block_num; result.producer_to_last_produced = std::move( new_producer_to_last_produced ); flat_map new_producer_to_last_implied_irb; for( const auto& pro : result.active_schedule.producers ) { - if( pro.producer_name == prokey.producer_name ) { + if( pro.producer_name == proauth.producer_name ) { new_producer_to_last_implied_irb[pro.producer_name] = dpos_proposed_irreversible_blocknum; } else { auto existing = producer_to_last_implied_irb.find( pro.producer_name ); @@ -156,9 +162,9 @@ namespace eosio { namespace chain { } else { result.active_schedule = active_schedule; result.producer_to_last_produced = producer_to_last_produced; - result.producer_to_last_produced[prokey.producer_name] = result.block_num; + result.producer_to_last_produced[proauth.producer_name] = result.block_num; result.producer_to_last_implied_irb = producer_to_last_implied_irb; - result.producer_to_last_implied_irb[prokey.producer_name] = dpos_proposed_irreversible_blocknum; + result.producer_to_last_implied_irb[proauth.producer_name] = dpos_proposed_irreversible_blocknum; } return result; @@ -167,8 +173,9 @@ namespace eosio { namespace chain { signed_block_header pending_block_header_state::make_block_header( const checksum256_type& transaction_mroot, const checksum256_type& action_mroot, - optional&& new_producers, - vector&& new_protocol_feature_activations + const optional& new_producers, + vector&& new_protocol_feature_activations, + const protocol_feature_set& pfs )const { signed_block_header h; @@ -180,23 +187,46 @@ namespace eosio { namespace chain { h.transaction_mroot = transaction_mroot; h.action_mroot = action_mroot; h.schedule_version = active_schedule_version; - h.new_producers = std::move(new_producers); if( new_protocol_feature_activations.size() > 0 ) { - h.header_extensions.emplace_back( - protocol_feature_activation::extension_id(), - fc::raw::pack( protocol_feature_activation{ std::move(new_protocol_feature_activations) } ) + emplace_extension( + h.header_extensions, + protocol_feature_activation::extension_id(), + fc::raw::pack( protocol_feature_activation{ std::move(new_protocol_feature_activations) } ) ); } + if (new_producers) { + if ( detail::is_builtin_activated(prev_activated_protocol_features, pfs, builtin_protocol_feature_t::wtmsig_block_signatures) ) { + // add the header extension to update the block schedule + emplace_extension( + h.header_extensions, + producer_schedule_change_extension::extension_id(), + fc::raw::pack( producer_schedule_change_extension( *new_producers ) ) + ); + } else { + legacy::producer_schedule_type downgraded_producers; + downgraded_producers.version = new_producers->version; + for (const auto &p : new_producers->producers) { + p.authority.visit([&downgraded_producers, &p](const auto& auth){ + EOS_ASSERT(auth.keys.size() == 1 && auth.keys.front().weight == auth.threshold, producer_schedule_exception, "multisig block signing present before enabled!"); + downgraded_producers.producers.emplace_back(legacy::producer_key{p.producer_name, auth.keys.front().key}); + }); + } + h.new_producers = std::move(downgraded_producers); + } + } + return h; } block_header_state pending_block_header_state::_finish_next( const signed_block_header& h, + const protocol_feature_set& pfs, const std::function&, const vector& )>& validator + )&& { EOS_ASSERT( h.timestamp == timestamp, block_validate_exception, "timestamp mismatch" ); @@ -205,19 +235,48 @@ namespace eosio { namespace chain { EOS_ASSERT( h.producer == producer, wrong_producer, "wrong producer specified" ); EOS_ASSERT( h.schedule_version == active_schedule_version, producer_schedule_exception, "schedule_version in signed block is corrupted" ); + auto exts = h.validate_and_extract_header_extensions(); + + std::optional maybe_new_producer_schedule; + std::optional maybe_new_producer_schedule_hash; + bool wtmsig_enabled = false; + + if (h.new_producers || exts.count(producer_schedule_change_extension::extension_id()) > 0 ) { + wtmsig_enabled = detail::is_builtin_activated(prev_activated_protocol_features, pfs, builtin_protocol_feature_t::wtmsig_block_signatures); + } + if( h.new_producers ) { + EOS_ASSERT(!wtmsig_enabled, producer_schedule_exception, "Block header contains legacy producer schedule outdated by activation of WTMsig Block Signatures" ); + EOS_ASSERT( !was_pending_promoted, producer_schedule_exception, "cannot set pending producer schedule in the same block in which pending was promoted to active" ); - EOS_ASSERT( h.new_producers->version == active_schedule.version + 1, producer_schedule_exception, "wrong producer schedule version specified" ); - EOS_ASSERT( prev_pending_schedule.schedule.producers.size() == 0, producer_schedule_exception, + + const auto& new_producers = *h.new_producers; + EOS_ASSERT( new_producers.version == active_schedule.version + 1, producer_schedule_exception, "wrong producer schedule version specified" ); + EOS_ASSERT( prev_pending_schedule.schedule.producers.empty(), producer_schedule_exception, "cannot set new pending producers until last pending is confirmed" ); + + maybe_new_producer_schedule_hash.emplace(digest_type::hash(new_producers)); + maybe_new_producer_schedule.emplace(new_producers); } - protocol_feature_activation_set_ptr new_activated_protocol_features; + if ( exts.count(producer_schedule_change_extension::extension_id()) > 0 ) { + EOS_ASSERT(wtmsig_enabled, producer_schedule_exception, "Block header producer_schedule_change_extension before activation of WTMsig Block Signatures" ); + EOS_ASSERT( !was_pending_promoted, producer_schedule_exception, "cannot set pending producer schedule in the same block in which pending was promoted to active" ); - auto exts = h.validate_and_extract_header_extensions(); - { - if( exts.size() > 0 ) { - const auto& new_protocol_features = exts.front().get().protocol_features; + const auto& new_producer_schedule = exts.lower_bound(producer_schedule_change_extension::extension_id())->second.get(); + + EOS_ASSERT( new_producer_schedule.version == active_schedule.version + 1, producer_schedule_exception, "wrong producer schedule version specified" ); + EOS_ASSERT( prev_pending_schedule.schedule.producers.empty(), producer_schedule_exception, + "cannot set new pending producers until last pending is confirmed" ); + + maybe_new_producer_schedule_hash.emplace(digest_type::hash(new_producer_schedule)); + maybe_new_producer_schedule.emplace(new_producer_schedule); + } + + protocol_feature_activation_set_ptr new_activated_protocol_features; + { // handle protocol_feature_activation + if( exts.count(protocol_feature_activation::extension_id() > 0) ) { + const auto& new_protocol_features = exts.lower_bound(protocol_feature_activation::extension_id())->second.get().protocol_features; validator( timestamp, prev_activated_protocol_features->protocol_features, new_protocol_features ); new_activated_protocol_features = std::make_shared( @@ -238,9 +297,9 @@ namespace eosio { namespace chain { result.header_exts = std::move(exts); - if( h.new_producers ) { - result.pending_schedule.schedule = *h.new_producers; - result.pending_schedule.schedule_hash = digest_type::hash( *h.new_producers ); + if( maybe_new_producer_schedule ) { + result.pending_schedule.schedule = std::move(*maybe_new_producer_schedule); + result.pending_schedule.schedule_hash = std::move(*maybe_new_producer_schedule_hash); result.pending_schedule.schedule_lib_num = block_number; } else { if( was_pending_promoted ) { @@ -259,17 +318,29 @@ namespace eosio { namespace chain { block_header_state pending_block_header_state::finish_next( const signed_block_header& h, + vector&& additional_signatures, + const protocol_feature_set& pfs, const std::function&, const vector& )>& validator, bool skip_validate_signee )&& { - auto result = std::move(*this)._finish_next( h, validator ); + if( !additional_signatures.empty() ) { + bool wtmsig_enabled = detail::is_builtin_activated(prev_activated_protocol_features, pfs, builtin_protocol_feature_t::wtmsig_block_signatures); + + EOS_ASSERT(wtmsig_enabled, producer_schedule_exception, "Block contains multiple signatures before WTMsig block signatures are enabled" ); + } + + auto result = std::move(*this)._finish_next( h, pfs, validator ); + + if( !additional_signatures.empty() ) { + result.additional_signatures = std::move(additional_signatures); + } // ASSUMPTION FROM controller_impl::apply_block = all untrusted blocks will have their signatures pre-validated here if( !skip_validate_signee ) { - result.verify_signee( result.signee() ); + result.verify_signee( ); } return result; @@ -277,15 +348,24 @@ namespace eosio { namespace chain { block_header_state pending_block_header_state::finish_next( signed_block_header& h, + const protocol_feature_set& pfs, const std::function&, const vector& )>& validator, - const std::function& signer + const signer_callback_type& signer )&& { - auto result = std::move(*this)._finish_next( h, validator ); + auto pfa = prev_activated_protocol_features; + + auto result = std::move(*this)._finish_next( h, pfs, validator ); result.sign( signer ); h.producer_signature = result.header.producer_signature; + + if( !result.additional_signatures.empty() ) { + bool wtmsig_enabled = detail::is_builtin_activated(pfa, pfs, builtin_protocol_feature_t::wtmsig_block_signatures); + EOS_ASSERT(wtmsig_enabled, producer_schedule_exception, "Block was signed with multiple signatures before WTMsig block signatures are enabled" ); + } + return result; } @@ -299,12 +379,14 @@ namespace eosio { namespace chain { */ block_header_state block_header_state::next( const signed_block_header& h, + vector&& _additional_signatures, + const protocol_feature_set& pfs, const std::function&, const vector& )>& validator, bool skip_validate_signee )const { - return next( h.timestamp, h.confirmed ).finish_next( h, validator, skip_validate_signee ); + return next( h.timestamp, h.confirmed ).finish_next( h, std::move(_additional_signatures), pfs, validator, skip_validate_signee ); } digest_type block_header_state::sig_digest()const { @@ -312,21 +394,41 @@ namespace eosio { namespace chain { return digest_type::hash( std::make_pair(header_bmroot, pending_schedule.schedule_hash) ); } - void block_header_state::sign( const std::function& signer ) { + void block_header_state::sign( const signer_callback_type& signer ) { auto d = sig_digest(); - header.producer_signature = signer( d ); - EOS_ASSERT( block_signing_key == fc::crypto::public_key( header.producer_signature, d ), - wrong_signing_key, "block is signed with unexpected key" ); - } + auto sigs = signer( d ); - public_key_type block_header_state::signee()const { - return fc::crypto::public_key( header.producer_signature, sig_digest(), true ); + EOS_ASSERT(!sigs.empty(), no_block_signatures, "Signer returned no signatures"); + header.producer_signature = sigs.back(); + sigs.pop_back(); + + additional_signatures = std::move(sigs); + + verify_signee(); } - void block_header_state::verify_signee( const public_key_type& signee )const { - EOS_ASSERT( block_signing_key == signee, wrong_signing_key, - "block not signed by expected key", - ("block_signing_key", block_signing_key)( "signee", signee ) ); + void block_header_state::verify_signee( )const { + std::set keys; + auto digest = sig_digest(); + keys.emplace(fc::crypto::public_key( header.producer_signature, digest, true )); + + for (const auto& s: additional_signatures) { + auto res = keys.emplace(s, digest, true); + EOS_ASSERT(res.second, wrong_signing_key, "block signed by same key twice", ("key", *res.first)); + } + + bool is_satisfied = false; + size_t relevant_sig_count = 0; + + std::tie(is_satisfied, relevant_sig_count) = producer_authority::keys_satisfy_and_relevant(keys, valid_block_signing_authority); + + EOS_ASSERT(relevant_sig_count == keys.size(), wrong_signing_key, + "block signed by unexpected key", + ("signing_keys", keys)("authority", valid_block_signing_authority)); + + EOS_ASSERT(is_satisfied, wrong_signing_key, + "block signatures do not satisfy the block signing authority", + ("signing_keys", keys)("authority", valid_block_signing_authority)); } /** @@ -335,10 +437,30 @@ namespace eosio { namespace chain { const vector& block_header_state::get_new_protocol_feature_activations()const { static const vector no_activations{}; - if( header_exts.size() == 0 || !header_exts.front().contains() ) + if( header_exts.count(protocol_feature_activation::extension_id()) == 0 ) return no_activations; - return header_exts.front().get().protocol_features; + return header_exts.lower_bound(protocol_feature_activation::extension_id())->second.get().protocol_features; + } + + block_header_state::block_header_state( legacy::snapshot_block_header_state_v2&& snapshot ) + { + block_num = snapshot.block_num; + dpos_proposed_irreversible_blocknum = snapshot.dpos_proposed_irreversible_blocknum; + dpos_irreversible_blocknum = snapshot.dpos_irreversible_blocknum; + active_schedule = producer_authority_schedule( snapshot.active_schedule ); + blockroot_merkle = std::move(snapshot.blockroot_merkle); + producer_to_last_produced = std::move(snapshot.producer_to_last_produced); + producer_to_last_implied_irb = std::move(snapshot.producer_to_last_implied_irb); + valid_block_signing_authority = block_signing_authority_v0{ 1, {{std::move(snapshot.block_signing_key), 1}} }; + confirm_count = std::move(snapshot.confirm_count); + id = std::move(snapshot.id); + header = std::move(snapshot.header); + pending_schedule.schedule_lib_num = snapshot.pending_schedule.schedule_lib_num; + pending_schedule.schedule_hash = std::move(snapshot.pending_schedule.schedule_hash); + pending_schedule.schedule = producer_authority_schedule( snapshot.pending_schedule.schedule ); + activated_protocol_features = std::move(snapshot.activated_protocol_features); } + } } /// namespace eosio::chain diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index fef0dd3f637..822de1ea8b8 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -1,15 +1,24 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include +#include +#include #include + #define LOG_READ (std::ios::in | std::ios::binary) #define LOG_WRITE (std::ios::out | std::ios::binary | std::ios::app) #define LOG_RW ( std::ios::in | std::ios::out | std::ios::binary ) +#define LOG_WRITE_C "ab+" +#define LOG_RW_C "rb+" + +#ifndef _WIN32 +#define FC_FOPEN(p, m) fopen(p, m) +#else +#define FC_CAT(s1, s2) s1 ## s2 +#define FC_PREL(s) FC_CAT(L, s) +#define FC_FOPEN(p, m) _wfopen(p, FC_PREL(m)) +#endif namespace eosio { namespace chain { @@ -20,18 +29,20 @@ namespace eosio { namespace chain { * Version 1: complete block log from genesis * Version 2: adds optional partial block log, cannot be used for replay without snapshot * this is in the form of an first_block_num that is written immediately after the version + * Version 3: improvement on version 2 to not require the genesis state be provided when not starting + * from block 1 */ - const uint32_t block_log::max_supported_version = 2; + const uint32_t block_log::max_supported_version = 3; namespace detail { + using unique_file = std::unique_ptr; + class block_log_impl { public: signed_block_ptr head; block_id_type head_id; - std::fstream block_stream; - std::fstream index_stream; - fc::path block_file; - fc::path index_file; + fc::cfile block_file; + fc::cfile index_file; bool open_files = false; bool genesis_written_to_block_log = false; uint32_t version = 0; @@ -45,35 +56,133 @@ namespace eosio { namespace chain { void reopen(); void close() { - if( block_stream.is_open() ) - block_stream.close(); - if( index_stream.is_open() ) - index_stream.close(); + if( block_file.is_open() ) + block_file.close(); + if( index_file.is_open() ) + index_file.close(); open_files = false; } + + template + void reset( const T& t, const signed_block_ptr& genesis_block, uint32_t first_block_num ); + + void write( const genesis_state& gs ); + + void write( const chain_id_type& chain_id ); + + void flush(); + + uint64_t append(const signed_block_ptr& b); + + template + static fc::optional extract_chain_context( const fc::path& data_dir, Lambda&& lambda ); }; - void block_log_impl::reopen() { + void detail::block_log_impl::reopen() { close(); // open to create files if they don't exist //ilog("Opening block log at ${path}", ("path", my->block_file.generic_string())); - block_stream.open(block_file.generic_string().c_str(), LOG_WRITE); - index_stream.open(index_file.generic_string().c_str(), LOG_WRITE); + block_file.open( LOG_WRITE_C ); + index_file.open( LOG_WRITE_C ); close(); - block_stream.open(block_file.generic_string().c_str(), LOG_RW); - index_stream.open(index_file.generic_string().c_str(), LOG_RW); + block_file.open( LOG_RW_C ); + index_file.open( LOG_RW_C ); open_files = true; } + + class reverse_iterator { + public: + reverse_iterator(); + // open a block log file and return the total number of blocks in it + uint32_t open(const fc::path& block_file_name); + uint64_t previous(); + uint32_t version() const { return _version; } + uint32_t first_block_num() const { return _first_block_num; } + constexpr static uint32_t _buf_len = 1U << 24; + private: + void update_buffer(); + + unique_file _file; + uint32_t _version = 0; + uint32_t _first_block_num = 0; + uint32_t _last_block_num = 0; + uint32_t _blocks_found = 0; + uint32_t _blocks_expected = 0; + uint64_t _current_position_in_file = 0; + uint64_t _eof_position_in_file = 0; + uint64_t _end_of_buffer_position = _unset_position; + uint64_t _start_of_buffer_position = 0; + std::unique_ptr _buffer_ptr; + std::string _block_file_name; + constexpr static int64_t _unset_position = -1; + constexpr static uint64_t _position_size = sizeof(_current_position_in_file); + }; + + constexpr uint64_t buffer_location_to_file_location(uint32_t buffer_location) { return buffer_location << 3; } + constexpr uint32_t file_location_to_buffer_location(uint32_t file_location) { return file_location >> 3; } + + class index_writer { + public: + index_writer(const fc::path& block_index_name, uint32_t blocks_expected); + void write(uint64_t pos); + void complete(); + void update_buffer_position(); + constexpr static uint64_t _buffer_bytes = 1U << 22; + private: + void prepare_buffer(); + bool shift_buffer(); + + unique_file _file; + const std::string _block_index_name; + const uint32_t _blocks_expected; + uint32_t _block_written; + std::unique_ptr _buffer_ptr; + int64_t _current_position = 0; + int64_t _start_of_buffer_position = 0; + int64_t _end_of_buffer_position = 0; + constexpr static uint64_t _max_buffer_length = file_location_to_buffer_location(_buffer_bytes); + }; + + /* + * @brief datastream adapter that adapts FILE* for use with fc unpack + * + * This class supports unpack functionality but not pack. + */ + class fileptr_datastream { + public: + explicit fileptr_datastream( FILE* file, const std::string& filename ) : _file(file), _filename(filename) {} + + void skip( size_t s ) { + auto status = fseek(_file, s, SEEK_CUR); + EOS_ASSERT( status == 0, block_log_exception, + "Could not seek past ${bytes} bytes in Block log file at '${blocks_log}'. Returned status: ${status}", + ("bytes", s)("blocks_log", _filename)("status", status) ); + } + + bool read( char* d, size_t s ) { + size_t result = fread( d, 1, s, _file ); + EOS_ASSERT( result == s, block_log_exception, + "only able to read ${act} bytes of the expected ${exp} bytes in file: ${file}", + ("act",result)("exp",s)("file", _filename) ); + return true; + } + + bool get( unsigned char& c ) { return get( *(char*)&c ); } + + bool get( char& c ) { return read(&c, 1); } + + private: + FILE* const _file; + const std::string _filename; + }; } block_log::block_log(const fc::path& data_dir) :my(new detail::block_log_impl()) { - my->block_stream.exceptions(std::fstream::failbit | std::fstream::badbit); - my->index_stream.exceptions(std::fstream::failbit | std::fstream::badbit); open(data_dir); } @@ -95,8 +204,8 @@ namespace eosio { namespace chain { if (!fc::is_directory(data_dir)) fc::create_directories(data_dir); - my->block_file = data_dir / "blocks.log"; - my->index_file = data_dir / "blocks.index"; + my->block_file.set_file_path( data_dir / "blocks.log" ); + my->index_file.set_file_path( data_dir / "blocks.index" ); my->reopen(); @@ -118,24 +227,24 @@ namespace eosio { namespace chain { * - If the index file head is not in the log file, delete the index and replay. * - If the index file head is in the log, but not up to date, replay from index head. */ - auto log_size = fc::file_size(my->block_file); - auto index_size = fc::file_size(my->index_file); + auto log_size = fc::file_size( my->block_file.get_file_path() ); + auto index_size = fc::file_size( my->index_file.get_file_path() ); if (log_size) { ilog("Log is nonempty"); - my->block_stream.seekg( 0 ); + my->block_file.seek( 0 ); my->version = 0; - my->block_stream.read( (char*)&my->version, sizeof(my->version) ); + my->block_file.read( (char*)&my->version, sizeof(my->version) ); EOS_ASSERT( my->version > 0, block_log_exception, "Block log was not setup properly" ); - EOS_ASSERT( my->version >= min_supported_version && my->version <= max_supported_version, block_log_unsupported_version, - "Unsupported version of block log. Block log version is ${version} while code supports version(s) [${min},${max}]", - ("version", my->version)("min", block_log::min_supported_version)("max", block_log::max_supported_version) ); + EOS_ASSERT( is_supported_version(my->version), block_log_unsupported_version, + "Unsupported version of block log. Block log version is ${version} while code supports version(s) [${min},${max}]", + ("version", my->version)("min", block_log::min_supported_version)("max", block_log::max_supported_version) ); my->genesis_written_to_block_log = true; // Assume it was constructed properly. if (my->version > 1){ my->first_block_num = 0; - my->block_stream.read( (char*)&my->first_block_num, sizeof(my->first_block_num) ); + my->block_file.read( (char*)&my->first_block_num, sizeof(my->first_block_num) ); EOS_ASSERT(my->first_block_num > 0, block_log_exception, "Block log is malformed, first recorded block number is 0 but must be greater than or equal to 1"); } else { my->first_block_num = 1; @@ -151,15 +260,15 @@ namespace eosio { namespace chain { if (index_size) { ilog("Index is nonempty"); uint64_t block_pos; - my->block_stream.seekg(-sizeof(uint64_t), std::ios::end); - my->block_stream.read((char*)&block_pos, sizeof(block_pos)); + my->block_file.seek_end(-sizeof(uint64_t)); + my->block_file.read((char*)&block_pos, sizeof(block_pos)); uint64_t index_pos; - my->index_stream.seekg(-sizeof(uint64_t), std::ios::end); - my->index_stream.read((char*)&index_pos, sizeof(index_pos)); + my->index_file.seek_end(-sizeof(uint64_t)); + my->index_file.read((char*)&index_pos, sizeof(index_pos)); if (block_pos < index_pos) { - ilog("block_pos < index_pos, close and reopen index_stream"); + ilog("block_pos < index_pos, close and reopen index_file"); construct_index(); } else if (block_pos > index_pos) { ilog("Index is incomplete"); @@ -172,31 +281,35 @@ namespace eosio { namespace chain { } else if (index_size) { ilog("Index is nonempty, remove and recreate it"); my->close(); - fc::remove_all(my->index_file); + fc::remove_all( my->index_file.get_file_path() ); my->reopen(); } } uint64_t block_log::append(const signed_block_ptr& b) { + return my->append(b); + } + + uint64_t detail::block_log_impl::append(const signed_block_ptr& b) { try { - EOS_ASSERT( my->genesis_written_to_block_log, block_log_append_fail, "Cannot append to block log until the genesis is first written" ); + EOS_ASSERT( genesis_written_to_block_log, block_log_append_fail, "Cannot append to block log until the genesis is first written" ); - my->check_open_files(); + check_open_files(); - my->block_stream.seekp(0, std::ios::end); - my->index_stream.seekp(0, std::ios::end); - uint64_t pos = my->block_stream.tellp(); - EOS_ASSERT(my->index_stream.tellp() == sizeof(uint64_t) * (b->block_num() - my->first_block_num), + block_file.seek_end(0); + index_file.seek_end(0); + uint64_t pos = block_file.tellp(); + EOS_ASSERT(index_file.tellp() == sizeof(uint64_t) * (b->block_num() - first_block_num), block_log_append_fail, "Append to index file occuring at wrong position.", - ("position", (uint64_t) my->index_stream.tellp()) - ("expected", (b->block_num() - my->first_block_num) * sizeof(uint64_t))); + ("position", (uint64_t) index_file.tellp()) + ("expected", (b->block_num() - first_block_num) * sizeof(uint64_t))); auto data = fc::raw::pack(*b); - my->block_stream.write(data.data(), data.size()); - my->block_stream.write((char*)&pos, sizeof(pos)); - my->index_stream.write((char*)&pos, sizeof(pos)); - my->head = b; - my->head_id = b->id(); + block_file.write(data.data(), data.size()); + block_file.write((char*)&pos, sizeof(pos)); + index_file.write((char*)&pos, sizeof(pos)); + head = b; + head_id = b->id(); flush(); @@ -206,65 +319,99 @@ namespace eosio { namespace chain { } void block_log::flush() { - my->block_stream.flush(); - my->index_stream.flush(); + my->flush(); } - void block_log::reset( const genesis_state& gs, const signed_block_ptr& first_block, uint32_t first_block_num ) { - my->close(); + void detail::block_log_impl::flush() { + block_file.flush(); + index_file.flush(); + } - fc::remove_all(my->block_file); - fc::remove_all(my->index_file); + template + void detail::block_log_impl::reset( const T& t, const signed_block_ptr& first_block, uint32_t first_bnum ) { + close(); - my->reopen(); + fc::remove_all( block_file.get_file_path() ); + fc::remove_all( index_file.get_file_path() ); - auto data = fc::raw::pack(gs); - my->version = 0; // version of 0 is invalid; it indicates that the genesis was not properly written to the block log - my->first_block_num = first_block_num; - my->block_stream.seekp(0, std::ios::end); - my->block_stream.write((char*)&my->version, sizeof(my->version)); - my->block_stream.write((char*)&my->first_block_num, sizeof(my->first_block_num)); - my->block_stream.write(data.data(), data.size()); - my->genesis_written_to_block_log = true; + reopen(); + + version = 0; // version of 0 is invalid; it indicates that subsequent data was not properly written to the block log + first_block_num = first_bnum; + + block_file.seek_end(0); + block_file.write((char*)&version, sizeof(version)); + block_file.write((char*)&first_block_num, sizeof(first_block_num)); + + write(t); + genesis_written_to_block_log = true; // append a totem to indicate the division between blocks and header - auto totem = npos; - my->block_stream.write((char*)&totem, sizeof(totem)); + auto totem = block_log::npos; + block_file.write((char*)&totem, sizeof(totem)); if (first_block) { append(first_block); } else { - my->head.reset(); - my->head_id = {}; + head.reset(); + head_id = {}; } - auto pos = my->block_stream.tellp(); + auto pos = block_file.tellp(); static_assert( block_log::max_supported_version > 0, "a version number of zero is not supported" ); - my->version = block_log::max_supported_version; - my->block_stream.seekp( 0 ); - my->block_stream.write( (char*)&my->version, sizeof(my->version) ); - my->block_stream.seekp( pos ); + + // going back to write correct version to indicate that all block log header data writes completed successfully + version = block_log::max_supported_version; + block_file.seek( 0 ); + block_file.write( (char*)&version, sizeof(version) ); + block_file.seek( pos ); flush(); } - std::pair block_log::read_block(uint64_t pos)const { + void block_log::reset( const genesis_state& gs, const signed_block_ptr& first_block ) { + my->reset(gs, first_block, 1); + } + + void block_log::reset( const chain_id_type& chain_id, uint32_t first_block_num ) { + EOS_ASSERT( first_block_num > 1, block_log_exception, + "Block log version ${ver} needs to be created with a genesis state if starting from block number 1." ); + my->reset(chain_id, signed_block_ptr(), first_block_num); + } + + void detail::block_log_impl::write( const genesis_state& gs ) { + auto data = fc::raw::pack(gs); + block_file.write(data.data(), data.size()); + } + + void detail::block_log_impl::write( const chain_id_type& chain_id ) { + block_file << chain_id; + } + + signed_block_ptr block_log::read_block(uint64_t pos)const { my->check_open_files(); - my->block_stream.seekg(pos); - std::pair result; - result.first = std::make_shared(); - fc::raw::unpack(my->block_stream, *result.first); - result.second = uint64_t(my->block_stream.tellg()) + 8; + my->block_file.seek(pos); + signed_block_ptr result = std::make_shared(); + auto ds = my->block_file.create_datastream(); + fc::raw::unpack(ds, *result); return result; } + void block_log::read_block_header(block_header& bh, uint64_t pos)const { + my->check_open_files(); + + my->block_file.seek(pos); + auto ds = my->block_file.create_datastream(); + fc::raw::unpack(ds, bh); + } + signed_block_ptr block_log::read_block_by_num(uint32_t block_num)const { try { signed_block_ptr b; uint64_t pos = get_block_pos(block_num); if (pos != npos) { - b = read_block(pos).first; + b = read_block(pos); EOS_ASSERT(b->block_num() == block_num, reversible_blocks_exception, "Wrong block was read from block log.", ("returned", b->block_num())("expected", block_num)); } @@ -272,13 +419,27 @@ namespace eosio { namespace chain { } FC_LOG_AND_RETHROW() } + block_id_type block_log::read_block_id_by_num(uint32_t block_num)const { + try { + uint64_t pos = get_block_pos(block_num); + if (pos != npos) { + block_header bh; + read_block_header(bh, pos); + EOS_ASSERT(bh.block_num() == block_num, reversible_blocks_exception, + "Wrong block header was read from block log.", ("returned", bh.block_num())("expected", block_num)); + return bh.id(); + } + return {}; + } FC_LOG_AND_RETHROW() + } + uint64_t block_log::get_block_pos(uint32_t block_num) const { my->check_open_files(); if (!(my->head && block_num <= block_header::num_from_id(my->head_id) && block_num >= my->first_block_num)) return npos; - my->index_stream.seekg(sizeof(uint64_t) * (block_num - my->first_block_num)); + my->index_file.seek(sizeof(uint64_t) * (block_num - my->first_block_num)); uint64_t pos; - my->index_stream.read((char*)&pos, sizeof(pos)); + my->index_file.read((char*)&pos, sizeof(pos)); return pos; } @@ -288,14 +449,14 @@ namespace eosio { namespace chain { uint64_t pos; // Check that the file is not empty - my->block_stream.seekg(0, std::ios::end); - if (my->block_stream.tellg() <= sizeof(pos)) + my->block_file.seek_end(0); + if (my->block_file.tellp() <= sizeof(pos)) return {}; - my->block_stream.seekg(-sizeof(pos), std::ios::end); - my->block_stream.read((char*)&pos, sizeof(pos)); + my->block_file.seek_end(-sizeof(pos)); + my->block_file.read((char*)&pos, sizeof(pos)); if (pos != npos) { - return read_block(pos).first; + return read_block(pos); } else { return {}; } @@ -305,6 +466,10 @@ namespace eosio { namespace chain { return my->head; } + const block_id_type& block_log::head_id()const { + return my->head_id; + } + uint32_t block_log::first_block_num() const { return my->first_block_num; } @@ -313,48 +478,42 @@ namespace eosio { namespace chain { ilog("Reconstructing Block Log Index..."); my->close(); - fc::remove_all(my->index_file); + fc::remove_all( my->index_file.get_file_path() ); my->reopen(); - uint64_t end_pos; - my->block_stream.seekg(-sizeof( uint64_t), std::ios::end); - my->block_stream.read((char*)&end_pos, sizeof(end_pos)); + my->close(); - if( end_pos == npos ) { - ilog( "Block log contains no blocks. No need to construct index." ); - return; - } + block_log::construct_index(my->block_file.get_file_path(), my->index_file.get_file_path()); + + my->reopen(); + } // construct_index - signed_block tmp; + void block_log::construct_index(const fc::path& block_file_name, const fc::path& index_file_name) { + detail::reverse_iterator block_log_iter; - uint64_t pos = 0; - if (my->version == 1) { - pos = 4; // Skip version which should have already been checked. - } else { - pos = 8; // Skip version and first block offset which should have already been checked - } - my->block_stream.seekg(pos); + ilog("Will read existing blocks.log file ${file}", ("file", block_file_name.generic_string())); + ilog("Will write new blocks.index file ${file}", ("file", index_file_name.generic_string())); - genesis_state gs; - fc::raw::unpack(my->block_stream, gs); + const uint32_t num_blocks = block_log_iter.open(block_file_name); - // skip the totem - if (my->version > 1) { - uint64_t totem; - my->block_stream.read((char*) &totem, sizeof(totem)); + ilog("block log version= ${version}", ("version", block_log_iter.version())); + + if (num_blocks == 0) { + return; } - my->index_stream.seekp(0, std::ios::end); - while( pos < end_pos ) { - fc::raw::unpack(my->block_stream, tmp); - my->block_stream.read((char*)&pos, sizeof(pos)); - if(tmp.block_num() % 1000 == 0) - ilog( "Block log index reconstructed for block ${n}", ("n", tmp.block_num())); - my->index_stream.write((char*)&pos, sizeof(pos)); + ilog("first block= ${first} last block= ${last}", + ("first", block_log_iter.first_block_num())("last", (block_log_iter.first_block_num() + num_blocks))); + + detail::index_writer index(index_file_name, num_blocks); + uint64_t position; + while ((position = block_log_iter.previous()) != npos) { + index.write(position); } - } // construct_index + index.complete(); + } fc::path block_log::repair_log( const fc::path& data_dir, uint32_t truncate_at_block ) { ilog("Recovering Block Log..."); @@ -397,7 +556,7 @@ namespace eosio { namespace chain { uint32_t version = 0; old_block_stream.read( (char*)&version, sizeof(version) ); EOS_ASSERT( version > 0, block_log_exception, "Block log was not setup properly" ); - EOS_ASSERT( version >= min_supported_version && version <= max_supported_version, block_log_unsupported_version, + EOS_ASSERT( is_supported_version(version), block_log_unsupported_version, "Unsupported version of block log. Block log version is ${version} while code supports version(s) [${min},${max}]", ("version", version)("min", block_log::min_supported_version)("max", block_log::max_supported_version) ); @@ -406,14 +565,37 @@ namespace eosio { namespace chain { uint32_t first_block_num = 1; if (version != 1) { old_block_stream.read ( (char*)&first_block_num, sizeof(first_block_num) ); + + // this assert is only here since repair_log is only used for --hard-replay-blockchain, which removes any + // existing state, if another API needs to use it, this can be removed and the check for the first block's + // previous block id will need to accommodate this. + EOS_ASSERT( first_block_num == 1, block_log_exception, + "Block log ${file} must contain a genesis state and start at block number 1. This block log " + "starts at block number ${first_block_num}.", + ("file", (backup_dir / "blocks.log").generic_string())("first_block_num", first_block_num)); + new_block_stream.write( (char*)&first_block_num, sizeof(first_block_num) ); } - genesis_state gs; - fc::raw::unpack(old_block_stream, gs); + if (contains_genesis_state(version, first_block_num)) { + genesis_state gs; + fc::raw::unpack(old_block_stream, gs); + + auto data = fc::raw::pack( gs ); + new_block_stream.write( data.data(), data.size() ); + } + else if (contains_chain_id(version, first_block_num)) { + chain_id_type chain_id; + old_block_stream >> chain_id; - auto data = fc::raw::pack( gs ); - new_block_stream.write( data.data(), data.size() ); + new_block_stream << chain_id; + } + else { + EOS_THROW( block_log_exception, + "Block log ${file} is not supported. version: ${ver} and first_block_num: ${fbn} does not contain " + "a genesis_state nor a chain_id.", + ("file", (backup_dir / "blocks.log").generic_string())("ver", version)("fbn", first_block_num)); + } if (version != 1) { auto expected_totem = npos; @@ -519,28 +701,487 @@ namespace eosio { namespace chain { return backup_dir; } - genesis_state block_log::extract_genesis_state( const fc::path& data_dir ) { + template + fc::optional detail::block_log_impl::extract_chain_context( const fc::path& data_dir, Lambda&& lambda ) { EOS_ASSERT( fc::is_directory(data_dir) && fc::is_regular_file(data_dir / "blocks.log"), block_log_not_found, - "Block log not found in '${blocks_dir}'", ("blocks_dir", data_dir) ); + "Block log not found in '${blocks_dir}'", ("blocks_dir", data_dir) ); std::fstream block_stream; block_stream.open( (data_dir / "blocks.log").generic_string().c_str(), LOG_READ ); uint32_t version = 0; block_stream.read( (char*)&version, sizeof(version) ); - EOS_ASSERT( version > 0, block_log_exception, "Block log was not setup properly." ); - EOS_ASSERT( version >= min_supported_version && version <= max_supported_version, block_log_unsupported_version, - "Unsupported version of block log. Block log version is ${version} while code supports version(s) [${min},${max}]", - ("version", version)("min", block_log::min_supported_version)("max", block_log::max_supported_version) ); + EOS_ASSERT( version >= block_log::min_supported_version && version <= block_log::max_supported_version, block_log_unsupported_version, + "Unsupported version of block log. Block log version is ${version} while code supports version(s) [${min},${max}]", + ("version", version)("min", block_log::min_supported_version)("max", block_log::max_supported_version) ); uint32_t first_block_num = 1; if (version != 1) { block_stream.read ( (char*)&first_block_num, sizeof(first_block_num) ); } - genesis_state gs; - fc::raw::unpack(block_stream, gs); - return gs; + return lambda(block_stream, version, first_block_num); + } + + fc::optional block_log::extract_genesis_state( const fc::path& data_dir ) { + return detail::block_log_impl::extract_chain_context(data_dir, [](std::fstream& block_stream, uint32_t version, uint32_t first_block_num ) -> fc::optional { + if (contains_genesis_state(version, first_block_num)) { + genesis_state gs; + fc::raw::unpack(block_stream, gs); + return gs; + } + + // current versions only have a genesis state if they start with block number 1 + return fc::optional(); + }); + } + + chain_id_type block_log::extract_chain_id( const fc::path& data_dir ) { + return *(detail::block_log_impl::extract_chain_context(data_dir, [](std::fstream& block_stream, uint32_t version, uint32_t first_block_num ) -> fc::optional { + // supported versions either contain a genesis state, or else the chain id only + if (contains_genesis_state(version, first_block_num)) { + genesis_state gs; + fc::raw::unpack(block_stream, gs); + return gs.compute_chain_id(); + } + EOS_ASSERT( contains_chain_id(version, first_block_num), block_log_exception, + "Block log error! version: ${version} with first_block_num: ${num} does not contain a " + "chain id or genesis state, so the chain id cannot be determined.", + ("version", version)("num", first_block_num) ); + chain_id_type chain_id; + fc::raw::unpack(block_stream, chain_id); + return chain_id; + })); + } + + detail::reverse_iterator::reverse_iterator() + : _file(nullptr, &fclose) + , _buffer_ptr(std::make_unique(_buf_len)) { + } + + uint32_t detail::reverse_iterator::open(const fc::path& block_file_name) { + _block_file_name = block_file_name.generic_string(); + _file.reset( FC_FOPEN(_block_file_name.c_str(), "r")); + EOS_ASSERT( _file, block_log_exception, "Could not open Block log file at '${blocks_log}'", ("blocks_log", _block_file_name) ); + _end_of_buffer_position = _unset_position; + + //read block log to see if version 1 or 2 and get first blocknum (implicit 1 if version 1) + _version = 0; + auto size = fread((char*)&_version, sizeof(_version), 1, _file.get()); + EOS_ASSERT( size == 1, block_log_exception, "Block log file at '${blocks_log}' could not be read.", ("file", _block_file_name) ); + EOS_ASSERT( block_log::is_supported_version(_version), block_log_unsupported_version, + "block log version ${v} is not supported", ("v", _version)); + if (_version == 1) { + _first_block_num = 1; + } + else { + size = fread((char*)&_first_block_num, sizeof(_first_block_num), 1, _file.get()); + EOS_ASSERT( size == 1, block_log_exception, "Block log file at '${blocks_log}' not formatted consistently with version ${v}.", ("file", _block_file_name)("v", _version) ); + } + + auto status = fseek(_file.get(), 0, SEEK_END); + EOS_ASSERT( status == 0, block_log_exception, "Could not open Block log file at '${blocks_log}'. Returned status: ${status}", ("blocks_log", _block_file_name)("status", status) ); + + _eof_position_in_file = ftell(_file.get()); + EOS_ASSERT( _eof_position_in_file > 0, block_log_exception, "Block log file at '${blocks_log}' could not be read.", ("blocks_log", _block_file_name) ); + _current_position_in_file = _eof_position_in_file - _position_size; + + update_buffer(); + + _blocks_found = 0; + char* buf = _buffer_ptr.get(); + const uint32_t index_of_pos = _current_position_in_file - _start_of_buffer_position; + const uint64_t block_pos = *reinterpret_cast(buf + index_of_pos); + + if (block_pos == block_log::npos) { + return 0; + } + + uint32_t bnum = 0; + if (block_pos >= _start_of_buffer_position) { + const uint32_t index_of_block = block_pos - _start_of_buffer_position; + bnum = *reinterpret_cast(buf + index_of_block + trim_data::blknum_offset); //block number of previous block (is big endian) + } + else { + const auto blknum_offset_pos = block_pos + trim_data::blknum_offset; + auto status = fseek(_file.get(), blknum_offset_pos, SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "Could not seek in '${blocks_log}' to position: ${pos}. Returned status: ${status}", ("blocks_log", _block_file_name)("pos", blknum_offset_pos)("status", status) ); + auto size = fread((void*)&bnum, sizeof(bnum), 1, _file.get()); + EOS_ASSERT( size == 1, block_log_exception, "Could not read in '${blocks_log}' at position: ${pos}", ("blocks_log", _block_file_name)("pos", blknum_offset_pos) ); + } + _last_block_num = fc::endian_reverse_u32(bnum) + 1; //convert from big endian to little endian and add 1 + _blocks_expected = _last_block_num - _first_block_num + 1; + return _blocks_expected; + } + + uint64_t detail::reverse_iterator::previous() { + EOS_ASSERT( _current_position_in_file != block_log::npos, + block_log_exception, + "Block log file at '${blocks_log}' first block already returned by former call to previous(), it is no longer valid to call this function.", ("blocks_log", _block_file_name) ); + + if (_version == 1 && _blocks_found == _blocks_expected) { + _current_position_in_file = block_log::npos; + return _current_position_in_file; + } + + if (_start_of_buffer_position > _current_position_in_file) { + update_buffer(); + } + + char* buf = _buffer_ptr.get(); + auto offset = _current_position_in_file - _start_of_buffer_position; + uint64_t block_location_in_file = *reinterpret_cast(buf + offset); + + ++_blocks_found; + if (block_location_in_file == block_log::npos) { + _current_position_in_file = block_location_in_file; + EOS_ASSERT( _blocks_found != _blocks_expected, + block_log_exception, + "Block log file at '${blocks_log}' formatting indicated last block: ${last_block_num}, first block: ${first_block_num}, but found ${num} blocks", + ("blocks_log", _block_file_name)("last_block_num", _last_block_num)("first_block_num", _first_block_num)("num", _blocks_found) ); + } + else { + const uint64_t previous_position_in_file = _current_position_in_file; + _current_position_in_file = block_location_in_file - _position_size; + EOS_ASSERT( _current_position_in_file < previous_position_in_file, + block_log_exception, + "Block log file at '${blocks_log}' formatting is incorrect, indicates position later location in file: ${pos}, which was retrieved at: ${orig_pos}.", + ("blocks_log", _block_file_name)("pos", _current_position_in_file)("orig_pos", previous_position_in_file) ); + } + + return block_location_in_file; + } + + void detail::reverse_iterator::update_buffer() { + EOS_ASSERT( _current_position_in_file != block_log::npos, block_log_exception, "Block log file not setup properly" ); + + // since we need to read in a new section, just need to ensure the next position is at the very end of the buffer + _end_of_buffer_position = _current_position_in_file + _position_size; + if (_end_of_buffer_position < _buf_len) { + _start_of_buffer_position = 0; + } + else { + _start_of_buffer_position = _end_of_buffer_position - _buf_len; + } + + auto status = fseek(_file.get(), _start_of_buffer_position, SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "Could not seek in '${blocks_log}' to position: ${pos}. Returned status: ${status}", ("blocks_log", _block_file_name)("pos", _start_of_buffer_position)("status", status) ); + char* buf = _buffer_ptr.get(); + auto size = fread((void*)buf, (_end_of_buffer_position - _start_of_buffer_position), 1, _file.get());//read tail of blocks.log file into buf + EOS_ASSERT( size == 1, block_log_exception, "blocks.log read fails" ); + } + + detail::index_writer::index_writer(const fc::path& block_index_name, uint32_t blocks_expected) + : _file(nullptr, &fclose) + , _block_index_name(block_index_name.generic_string()) + , _blocks_expected(blocks_expected) + , _block_written(blocks_expected) + , _buffer_ptr(std::make_unique(_max_buffer_length)) { + } + + void detail::index_writer::write(uint64_t pos) { + prepare_buffer(); + uint64_t* buffer = _buffer_ptr.get(); + buffer[_current_position - _start_of_buffer_position] = pos; + --_current_position; + if ((_block_written & 0xfffff) == 0) { //periodically print a progress indicator + ilog("block: ${block_written} position in file: ${pos}", ("block_written", _block_written)("pos",pos)); + } + --_block_written; + } + + void detail::index_writer::prepare_buffer() { + if (_file == nullptr) { + _file.reset(FC_FOPEN(_block_index_name.c_str(), "w")); + EOS_ASSERT( _file, block_log_exception, "Could not open Block index file at '${blocks_index}'", ("blocks_index", _block_index_name) ); + // allocate 8 bytes for each block position to store + const auto full_file_size = buffer_location_to_file_location(_blocks_expected); + auto status = fseek(_file.get(), full_file_size, SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "Could not allocate in '${blocks_index}' storage for all the blocks, size: ${size}. Returned status: ${status}", ("blocks_index", _block_index_name)("size", full_file_size)("status", status) ); + const auto block_end = file_location_to_buffer_location(full_file_size); + _current_position = block_end - 1; + update_buffer_position(); + } + + shift_buffer(); + } + + bool detail::index_writer::shift_buffer() { + if (_current_position >= _start_of_buffer_position) { + return false; + } + + const auto file_location_start = buffer_location_to_file_location(_start_of_buffer_position); + + auto status = fseek(_file.get(), file_location_start, SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "Could not navigate in '${blocks_index}' file_location_start: ${loc}, _start_of_buffer_position: ${_start_of_buffer_position}. Returned status: ${status}", ("blocks_index", _block_index_name)("loc", file_location_start)("_start_of_buffer_position",_start_of_buffer_position)("status", status) ); + + const auto buffer_size = _end_of_buffer_position - _start_of_buffer_position; + const auto file_size = buffer_location_to_file_location(buffer_size); + uint64_t* buf = _buffer_ptr.get(); + auto size = fwrite((void*)buf, file_size, 1, _file.get()); + EOS_ASSERT( size == 1, block_log_exception, "Writing Block Index file '${file}' failed at location: ${loc}", ("file", _block_index_name)("loc", file_location_start) ); + update_buffer_position(); + return true; + } + + void detail::index_writer::complete() { + const bool shifted = shift_buffer(); + EOS_ASSERT(shifted, block_log_exception, "Failed to write buffer to '${blocks_index}'", ("blocks_index", _block_index_name) ); + EOS_ASSERT(_current_position == -1, + block_log_exception, + "Should have written buffer, starting at the 0 index block position, to '${blocks_index}' but instead writing ${pos} position", + ("blocks_index", _block_index_name)("pos", _current_position) ); + } + + void detail::index_writer::update_buffer_position() { + _end_of_buffer_position = _current_position + 1; + if (_end_of_buffer_position < _max_buffer_length) { + _start_of_buffer_position = 0; + } + else { + _start_of_buffer_position = _end_of_buffer_position - _max_buffer_length; + } + } + + bool block_log::contains_genesis_state(uint32_t version, uint32_t first_block_num) { + return version <= 2 || first_block_num == 1; + } + + bool block_log::contains_chain_id(uint32_t version, uint32_t first_block_num) { + return version >= 3 && first_block_num > 1; + } + + bool block_log::is_supported_version(uint32_t version) { + return std::clamp(version, min_supported_version, max_supported_version) == version; + } + + bool block_log::trim_blocklog_front(const fc::path& block_dir, const fc::path& temp_dir, uint32_t truncate_at_block) { + using namespace std; + EOS_ASSERT( block_dir != temp_dir, block_log_exception, "block_dir and temp_dir need to be different directories" ); + ilog("In directory ${dir} will trim all blocks before block ${n} from blocks.log and blocks.index.", + ("dir", block_dir.generic_string())("n", truncate_at_block)); + trim_data original_block_log(block_dir); + if (truncate_at_block <= original_block_log.first_block) { + ilog("There are no blocks before block ${n} so do nothing.", ("n", truncate_at_block)); + return false; + } + if (truncate_at_block > original_block_log.last_block) { + ilog("All blocks are before block ${n} so do nothing (trim front would delete entire blocks.log).", ("n", truncate_at_block)); + return false; + } + + // ****** create the new block log file and write out the header for the file + fc::create_directories(temp_dir); + fc::path new_block_filename = temp_dir / "blocks.log"; + if (fc::remove(new_block_filename)) { + ilog("Removing old blocks.out file"); + } + fc::cfile new_block_file; + new_block_file.set_file_path(new_block_filename); + // need to open as append since the file doesn't already exist, then reopen without append to allow writing the + // file in any order + new_block_file.open( LOG_WRITE_C ); + new_block_file.close(); + new_block_file.open( LOG_RW_C ); + + static_assert( block_log::max_supported_version == 3, + "Code was written to support version 3 format, need to update this code for latest format." ); + uint32_t version = block_log::max_supported_version; + new_block_file.seek(0); + new_block_file.write((char*)&version, sizeof(version)); + new_block_file.write((char*)&truncate_at_block, sizeof(truncate_at_block)); + + new_block_file << original_block_log.chain_id; + + // append a totem to indicate the division between blocks and header + auto totem = block_log::npos; + new_block_file.write((char*)&totem, sizeof(totem)); + + const auto new_block_file_first_block_pos = new_block_file.tellp(); + // ****** end of new block log header + + // copy over remainder of block log to new block log + auto buffer = make_unique(detail::reverse_iterator::_buf_len); + char* buf = buffer.get(); + + // offset bytes to shift from old blocklog position to new blocklog position + const uint64_t original_file_block_pos = original_block_log.block_pos(truncate_at_block); + const uint64_t pos_delta = original_file_block_pos - new_block_file_first_block_pos; + auto status = fseek(original_block_log.blk_in, 0, SEEK_END); + EOS_ASSERT( status == 0, block_log_exception, "blocks.log seek failed" ); + + // all blocks to copy to the new blocklog + const uint64_t to_write = ftell(original_block_log.blk_in) - original_file_block_pos; + const auto pos_size = sizeof(uint64_t); + + // start with the last block's position stored at the end of the block + uint64_t original_pos = ftell(original_block_log.blk_in) - pos_size; + + const auto num_blocks = original_block_log.last_block - truncate_at_block + 1; + + fc::path new_index_filename = temp_dir / "blocks.index"; + detail::index_writer index(new_index_filename, num_blocks); + + uint64_t read_size = 0; + for(uint64_t to_write_remaining = to_write; to_write_remaining > 0; to_write_remaining -= read_size) { + read_size = to_write_remaining; + if (read_size > detail::reverse_iterator::_buf_len) { + read_size = detail::reverse_iterator::_buf_len; + } + + // read in the previous contiguous memory into the read buffer + const auto start_of_blk_buffer_pos = original_file_block_pos + to_write_remaining - read_size; + status = fseek(original_block_log.blk_in, start_of_blk_buffer_pos, SEEK_SET); + const auto num_read = fread(buf, read_size, 1, original_block_log.blk_in); + EOS_ASSERT( num_read == 1, block_log_exception, "blocks.log read failed" ); + + // walk this memory section to adjust block position to match the adjusted location + // of the block start and store in the new index file + while(original_pos >= start_of_blk_buffer_pos) { + const auto buffer_index = original_pos - start_of_blk_buffer_pos; + uint64_t& pos_content = *(uint64_t*)(buf + buffer_index); + const auto start_of_this_block = pos_content; + pos_content = start_of_this_block - pos_delta; + index.write(pos_content); + original_pos = start_of_this_block - pos_size; + } + new_block_file.seek(new_block_file_first_block_pos + to_write_remaining - read_size); + new_block_file.write(buf, read_size); + } + index.complete(); + fclose(original_block_log.blk_in); + original_block_log.blk_in = nullptr; + new_block_file.flush(); + new_block_file.close(); + + fc::path old_log = temp_dir / "old.log"; + rename(original_block_log.block_file_name, old_log); + rename(new_block_filename, original_block_log.block_file_name); + fc::path old_ind = temp_dir / "old.index"; + rename(original_block_log.index_file_name, old_ind); + rename(new_index_filename, original_block_log.index_file_name); + + return true; + } + + trim_data::trim_data(fc::path block_dir) { + + // code should follow logic in block_log::repair_log + + using namespace std; + block_file_name = block_dir / "blocks.log"; + index_file_name = block_dir / "blocks.index"; + blk_in = FC_FOPEN(block_file_name.generic_string().c_str(), "rb"); + EOS_ASSERT( blk_in != nullptr, block_log_not_found, "cannot read file ${file}", ("file",block_file_name.string()) ); + ind_in = FC_FOPEN(index_file_name.generic_string().c_str(), "rb"); + EOS_ASSERT( ind_in != nullptr, block_log_not_found, "cannot read file ${file}", ("file",index_file_name.string()) ); + auto size = fread((void*)&version,sizeof(version), 1, blk_in); + EOS_ASSERT( size == 1, block_log_unsupported_version, "invalid format for file ${file}", ("file",block_file_name.string())); + ilog("block log version= ${version}",("version",version)); + EOS_ASSERT( block_log::is_supported_version(version), block_log_unsupported_version, "block log version ${v} is not supported", ("v",version)); + + detail::fileptr_datastream ds(blk_in, block_file_name.string()); + if (version == 1) { + first_block = 1; + genesis_state gs; + fc::raw::unpack(ds, gs); + chain_id = gs.compute_chain_id(); + } + else { + size = fread((void *) &first_block, sizeof(first_block), 1, blk_in); + EOS_ASSERT(size == 1, block_log_exception, "invalid format for file ${file}", + ("file", block_file_name.string())); + if (block_log::contains_genesis_state(version, first_block)) { + genesis_state gs; + fc::raw::unpack(ds, gs); + chain_id = gs.compute_chain_id(); + } + else if (block_log::contains_chain_id(version, first_block)) { + ds >> chain_id; + } + else { + EOS_THROW( block_log_exception, + "Block log ${file} is not supported. version: ${ver} and first_block: ${first_block} does not contain " + "a genesis_state nor a chain_id.", + ("file", block_file_name.string())("ver", version)("first_block", first_block)); + } + + const auto expected_totem = block_log::npos; + std::decay_t actual_totem; + size = fread ( (char*)&actual_totem, sizeof(actual_totem), 1, blk_in); + + EOS_ASSERT(size == 1, block_log_exception, + "Expected to read ${size} bytes, but did not read any bytes", ("size", sizeof(actual_totem))); + EOS_ASSERT(actual_totem == expected_totem, block_log_exception, + "Expected separator between block log header and blocks was not found( expected: ${e}, actual: ${a} )", + ("e", fc::to_hex((char*)&expected_totem, sizeof(expected_totem) ))("a", fc::to_hex((char*)&actual_totem, sizeof(actual_totem) ))); + } + + const uint64_t start_of_blocks = ftell(blk_in); + + const auto status = fseek(ind_in, 0, SEEK_END); //get length of blocks.index (gives number of blocks) + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} end", ("file", index_file_name.string()) ); + const uint64_t file_end = ftell(ind_in); //get length of blocks.index (gives number of blocks) + last_block = first_block + file_end/sizeof(uint64_t) - 1; + + first_block_pos = block_pos(first_block); + EOS_ASSERT(start_of_blocks == first_block_pos, block_log_exception, + "Block log ${file} was determined to have its first block at ${determined}, but the block index " + "indicates the first block is at ${index}", + ("file", block_file_name.string())("determined", start_of_blocks)("index",first_block_pos)); + ilog("first block= ${first}",("first",first_block)); + ilog("last block= ${last}",("last",last_block)); + } + + trim_data::~trim_data() { + if (blk_in != nullptr) + fclose(blk_in); + if (ind_in != nullptr) + fclose(ind_in); + } + + uint64_t trim_data::block_index(uint32_t n) const { + using namespace std; + EOS_ASSERT( first_block <= n, block_log_exception, + "cannot seek in ${file} to block number ${b}, block number ${first} is the first block", + ("file", index_file_name.string())("b",n)("first",first_block) ); + EOS_ASSERT( n <= last_block, block_log_exception, + "cannot seek in ${file} to block number ${b}, block number ${last} is the last block", + ("file", index_file_name.string())("b",n)("last",last_block) ); + return sizeof(uint64_t) * (n - first_block); + } + + uint64_t trim_data::block_pos(uint32_t n) { + using namespace std; + // can indicate the location of the block after the last block + if (n == last_block + 1) { + return ftell(blk_in); + } + const uint64_t index_pos = block_index(n); + auto status = fseek(ind_in, index_pos, SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from beginning of file for block ${b}", ("file", index_file_name.string())("pos", index_pos)("b",n) ); + const uint64_t pos = ftell(ind_in); + EOS_ASSERT( pos == index_pos, block_log_exception, "cannot seek to ${file} entry for block ${b}", ("file", index_file_name.string())("b",n) ); + uint64_t block_n_pos; + auto size = fread((void*)&block_n_pos, sizeof(block_n_pos), 1, ind_in); //filepos of block n + EOS_ASSERT( size == 1, block_log_exception, "cannot read ${file} entry for block ${b}", ("file", index_file_name.string())("b",n) ); + + //read blocks.log and verify block number n is found at the determined file position + const auto calc_blknum_pos = block_n_pos + blknum_offset; + status = fseek(blk_in, calc_blknum_pos, SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from beginning of file", ("file", block_file_name.string())("pos", calc_blknum_pos) ); + const uint64_t block_offset_pos = ftell(blk_in); + EOS_ASSERT( block_offset_pos == calc_blknum_pos, block_log_exception, "cannot seek to ${file} ${pos} from beginning of file", ("file", block_file_name.string())("pos", calc_blknum_pos) ); + uint32_t prior_blknum; + size = fread((void*)&prior_blknum, sizeof(prior_blknum), 1, blk_in); //read bigendian block number of prior block + EOS_ASSERT( size == 1, block_log_exception, "cannot read prior block"); + const uint32_t bnum = fc::endian_reverse_u32(prior_blknum) + 1; //convert to little endian, add 1 since prior block + EOS_ASSERT( bnum == n, block_log_exception, + "At position ${pos} in ${file} expected to find ${exp_bnum} but found ${act_bnum}", + ("pos",block_offset_pos)("file", block_file_name.string())("exp_bnum",n)("act_bnum",bnum) ); + + return block_n_pos; } -} } /// eosio::chain + } } /// eosio::chain diff --git a/libraries/chain/block_state.cpp b/libraries/chain/block_state.cpp index 3a246038149..8a616ff305b 100644 --- a/libraries/chain/block_state.cpp +++ b/libraries/chain/block_state.cpp @@ -3,42 +3,102 @@ namespace eosio { namespace chain { + namespace { + constexpr auto additional_sigs_eid = additional_block_signatures_extension::extension_id(); + + /** + * Given a complete signed block, extract the validated additional signatures if present; + * + * @param b complete signed block + * @param pfs protocol feature set for digest access + * @param pfa activated protocol feature set to determine if extensions are allowed + * @return the list of additional signatures + * @throws if additional signatures are present before being supported by protocol feature activations + */ + vector extract_additional_signatures( const signed_block_ptr& b, + const protocol_feature_set& pfs, + const protocol_feature_activation_set_ptr& pfa ) + { + auto exts = b->validate_and_extract_extensions(); + + if ( exts.count(additional_sigs_eid) > 0 ) { + auto& additional_sigs = exts.lower_bound(additional_sigs_eid)->second.get(); + + return std::move(additional_sigs.signatures); + } + + return {}; + } + + /** + * Given a pending block header state, wrap the promotion to a block header state such that additional signatures + * can be allowed based on activations *prior* to the promoted block and properly injected into the signed block + * that is previously constructed and mutated by the promotion + * + * This cleans up lifetime issues involved with accessing activated protocol features and moving from the + * pending block header state + * + * @param cur the pending block header state to promote + * @param b the signed block that will receive signatures during this process + * @param pfs protocol feature set for digest access + * @param extras all the remaining parameters that pass through + * @return the block header state + * @throws if the block was signed with multiple signatures before the extension is allowed + */ + + template + block_header_state inject_additional_signatures( pending_block_header_state&& cur, + signed_block& b, + const protocol_feature_set& pfs, + Extras&& ... extras ) + { + auto pfa = cur.prev_activated_protocol_features; + block_header_state result = std::move(cur).finish_next(b, pfs, std::forward(extras)...); + + if (!result.additional_signatures.empty()) { + bool wtmsig_enabled = detail::is_builtin_activated(pfa, pfs, builtin_protocol_feature_t::wtmsig_block_signatures); + + EOS_ASSERT(wtmsig_enabled, block_validate_exception, + "Block has multiple signatures before activation of WTMsig Block Signatures"); + + // as an optimization we don't copy this out into the legitimate extension structure as it serializes + // the same way as the vector of signatures + static_assert(fc::reflector::total_member_count == 1); + static_assert(std::is_same_v>); + + emplace_extension(b.block_extensions, additional_sigs_eid, fc::raw::pack( result.additional_signatures )); + } + + return result; + } + + } + block_state::block_state( const block_header_state& prev, signed_block_ptr b, + const protocol_feature_set& pfs, const std::function&, const vector& )>& validator, bool skip_validate_signee ) - :block_header_state( prev.next( *b, validator, skip_validate_signee ) ) + :block_header_state( prev.next( *b, extract_additional_signatures(b, pfs, prev.activated_protocol_features), pfs, validator, skip_validate_signee ) ) ,block( std::move(b) ) {} block_state::block_state( pending_block_header_state&& cur, signed_block_ptr&& b, vector&& trx_metas, + const protocol_feature_set& pfs, const std::function&, const vector& )>& validator, - const std::function& signer + const signer_callback_type& signer ) - :block_header_state( std::move(cur).finish_next( *b, validator, signer ) ) + :block_header_state( inject_additional_signatures( std::move(cur), *b, pfs, validator, signer ) ) ,block( std::move(b) ) - ,trxs( std::move(trx_metas) ) - {} - - - block_state::block_state( pending_block_header_state&& cur, - const signed_block_ptr& b, - vector&& trx_metas, - const std::function&, - const vector& )>& validator, - bool skip_validate_signee - ) - :block_header_state( std::move(cur).finish_next( *b, validator, skip_validate_signee ) ) - ,block( b ) - ,trxs( std::move(trx_metas) ) + ,_pub_keys_recovered( true ) // called by produce_block so signature recovery of trxs must have been done + ,_cached_trxs( std::move(trx_metas) ) {} } } /// eosio::chain diff --git a/libraries/chain/chain_config.cpp b/libraries/chain/chain_config.cpp index 35c77f8325d..5aa72c2b9b1 100644 --- a/libraries/chain/chain_config.cpp +++ b/libraries/chain/chain_config.cpp @@ -1,8 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ - #include #include diff --git a/libraries/chain/chain_id_type.cpp b/libraries/chain/chain_id_type.cpp index 634d4623c98..bd0b4226b84 100644 --- a/libraries/chain/chain_id_type.cpp +++ b/libraries/chain/chain_id_type.cpp @@ -1,8 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ - #include #include diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 5471739f69f..480ea01cbd1 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -17,12 +17,14 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include @@ -30,6 +32,8 @@ #include #include +#include + namespace eosio { namespace chain { using resource_limits::resource_limits_manager; @@ -45,7 +49,8 @@ using controller_index_set = index_set< transaction_multi_index, generated_transaction_multi_index, table_id_multi_index, - code_index + code_index, + database_header_multi_index >; using contract_database_index_set = index_set< @@ -111,13 +116,13 @@ struct building_block { ,_new_protocol_feature_activations( new_protocol_feature_activations ) {} - pending_block_header_state _pending_block_header_state; - optional _new_pending_producer_schedule; - vector _new_protocol_feature_activations; - size_t _num_new_protocol_features_that_have_activated = 0; - vector _pending_trx_metas; - vector _pending_trx_receipts; - vector _actions; + pending_block_header_state _pending_block_header_state; + optional _new_pending_producer_schedule; + vector _new_protocol_feature_activations; + size_t _num_new_protocol_features_that_have_activated = 0; + vector _pending_trx_metas; + vector _pending_trx_receipts; + vector _actions; }; struct assembled_block { @@ -125,6 +130,9 @@ struct assembled_block { pending_block_header_state _pending_block_header_state; vector _trx_metas; signed_block_ptr _unsigned_block; + + // if the _unsigned_block pre-dates block-signing authorities this may be present. + optional _new_producer_authority_cache; }; struct completed_block { @@ -165,14 +173,14 @@ struct pending_state { return _block_stage.get()._block_state->block->transactions; } - const vector& get_trx_metas()const { + vector extract_trx_metas() { if( _block_stage.contains() ) - return _block_stage.get()._pending_trx_metas; + return std::move( _block_stage.get()._pending_trx_metas ); if( _block_stage.contains() ) - return _block_stage.get()._trx_metas; + return std::move( _block_stage.get()._trx_metas ); - return _block_stage.get()._block_state->trxs; + return _block_stage.get()._block_state->extract_trxs_metas(); } bool is_protocol_feature_activated( const digest_type& feature_digest )const { @@ -207,6 +215,14 @@ struct pending_state { }; struct controller_impl { + + // LLVM sets the new handler, we need to reset this to throw a bad_alloc exception so we can possibly exit cleanly + // and not just abort. + struct reset_new_handler { + reset_new_handler() { std::set_new_handler([](){ throw std::bad_alloc(); }); } + }; + + reset_new_handler rnh; // placed here to allow for this to be set before constructing the other fields controller& self; chainbase::database db; chainbase::database reversible_blocks; ///< a special database to persist blocks that have successfully been applied but are still reversible @@ -219,7 +235,7 @@ struct controller_impl { authorization_manager authorization; protocol_feature_manager protocol_features; controller::config conf; - chain_id_type chain_id; + const chain_id_type chain_id; // read by thread_pool threads, value will not be changed optional replay_head_time; db_read_mode read_mode = db_read_mode::SPECULATIVE; bool in_trx_requiring_checks = false; ///< if true, checks that are normally skipped on replay (e.g. auth checks) cannot be skipped @@ -227,18 +243,15 @@ struct controller_impl { bool trusted_producer_light_validation = false; uint32_t snapshot_head_block = 0; named_thread_pool thread_pool; + platform_timer timer; +#if defined(EOSIO_EOS_VM_RUNTIME_ENABLED) || defined(EOSIO_EOS_VM_JIT_RUNTIME_ENABLED) + vm::wasm_allocator wasm_alloc; +#endif typedef pair handler_key; map< account_name, map > apply_handlers; unordered_map< builtin_protocol_feature_t, std::function, enum_hash > protocol_feature_activation_handlers; - /** - * Transactions that were undone by pop_block or abort_block, transactions - * are removed from this list if they are re-applied in other blocks. Producers - * can query this list when scheduling new transactions into blocks. - */ - unapplied_transactions_type unapplied_transactions; - void pop_block() { auto prev = fork_db.get_block( head->header.previous ); @@ -254,11 +267,10 @@ struct controller_impl { if ( read_mode == db_read_mode::SPECULATIVE ) { EOS_ASSERT( head->block, block_validate_exception, "attempting to pop a block that was sparsely loaded from a snapshot"); - for( const auto& t : head->trxs ) - unapplied_transactions[t->signed_id] = t; } head = prev; + db.undo(); protocol_features.popped_blocks_to( prev->block_num ); @@ -283,8 +295,9 @@ struct controller_impl { apply_handlers[receiver][make_pair(contract,action)] = v; } - controller_impl( const controller::config& cfg, controller& s, protocol_feature_set&& pfs ) - :self(s), + controller_impl( const controller::config& cfg, controller& s, protocol_feature_set&& pfs, const chain_id_type& chain_id ) + :rnh(), + self(s), db( cfg.state_dir, cfg.read_only ? database::read_only : database::read_write, cfg.state_size, false, cfg.db_map_mode, cfg.db_hugepage_paths ), @@ -293,16 +306,15 @@ struct controller_impl { cfg.reversible_cache_size, false, cfg.db_map_mode, cfg.db_hugepage_paths ), blog( cfg.blocks_dir ), fork_db( cfg.state_dir ), - wasmif( cfg.wasm_runtime, db ), + wasmif( cfg.wasm_runtime, cfg.eosvmoc_tierup, db, cfg.state_dir, cfg.eosvmoc_config ), resource_limits( db ), authorization( s, db ), protocol_features( std::move(pfs) ), conf( cfg ), - chain_id( cfg.genesis.compute_chain_id() ), + chain_id( chain_id ), read_mode( cfg.read_mode ), thread_pool( "chain", cfg.thread_pool_size ) { - fork_db.open( [this]( block_timestamp_type timestamp, const flat_set& cur_features, const vector& new_features ) @@ -312,6 +324,8 @@ struct controller_impl { set_activation_handler(); set_activation_handler(); set_activation_handler(); + set_activation_handler(); + set_activation_handler(); self.irreversible_block.connect([this](const block_state_ptr& bsp) { wasmif.current_lib(bsp->block_num); @@ -319,7 +333,8 @@ struct controller_impl { #define SET_APP_HANDLER( receiver, contract, action) \ - set_apply_handler( #receiver, #contract, #action, &BOOST_PP_CAT(apply_, BOOST_PP_CAT(contract, BOOST_PP_CAT(_,action) ) ) ) + set_apply_handler( account_name(#receiver), account_name(#contract), action_name(#action), \ + &BOOST_PP_CAT(apply_, BOOST_PP_CAT(contract, BOOST_PP_CAT(_,action) ) ) ) SET_APP_HANDLER( eosio, eosio, newaccount ); SET_APP_HANDLER( eosio, eosio, setcode ); @@ -378,7 +393,7 @@ struct controller_impl { auto root_id = fork_db.root()->id; if( log_head ) { - EOS_ASSERT( root_id == log_head->id(), fork_database_exception, "fork database root does not match block log head" ); + EOS_ASSERT( root_id == blog.head_id(), fork_database_exception, "fork database root does not match block log head" ); } else { EOS_ASSERT( fork_db.root()->block_num == lib_num, fork_database_exception, "empty block log expects the first appended block to build off a block that is not the fork database root" ); @@ -395,7 +410,7 @@ struct controller_impl { for( auto bitr = branch.rbegin(); bitr != branch.rend(); ++bitr ) { if( read_mode == db_read_mode::IRREVERSIBLE ) { - apply_block( *bitr, controller::block_status::complete ); + apply_block( *bitr, controller::block_status::complete, trx_meta_cache_lookup{} ); head = (*bitr); fork_db.mark_valid( head ); } @@ -430,16 +445,18 @@ struct controller_impl { /** * Sets fork database head to the genesis state. */ - void initialize_blockchain_state() { + void initialize_blockchain_state(const genesis_state& genesis) { wlog( "Initializing new blockchain with genesis state" ); - producer_schedule_type initial_schedule{ 0, {{config::system_account_name, conf.genesis.initial_key}} }; + producer_authority_schedule initial_schedule = { 0, { producer_authority{config::system_account_name, block_signing_authority_v0{ 1, {{genesis.initial_key, 1}} } } } }; + legacy::producer_schedule_type initial_legacy_schedule{ 0, {{config::system_account_name, genesis.initial_key}} }; block_header_state genheader; genheader.active_schedule = initial_schedule; genheader.pending_schedule.schedule = initial_schedule; - genheader.pending_schedule.schedule_hash = fc::sha256::hash(initial_schedule); - genheader.header.timestamp = conf.genesis.initial_timestamp; - genheader.header.action_mroot = conf.genesis.compute_chain_id(); + // NOTE: if wtmsig block signatures are enabled at genesis time this should be the hash of a producer authority schedule + genheader.pending_schedule.schedule_hash = fc::sha256::hash(initial_legacy_schedule); + genheader.header.timestamp = genesis.initial_timestamp; + genheader.header.action_mroot = genesis.compute_chain_id(); genheader.id = genheader.header.id(); genheader.block_num = genheader.header.block_num(); @@ -448,7 +465,7 @@ struct controller_impl { head->activated_protocol_features = std::make_shared(); head->block = std::make_shared(genheader.header); db.set_revision( head->block_num ); - initialize_database(); + initialize_database(genesis); } void replay(std::function shutdown) { @@ -518,76 +535,134 @@ struct controller_impl { } } - void init(std::function shutdown, const snapshot_reader_ptr& snapshot) { - // Setup state if necessary (or in the default case stay with already loaded state): - uint32_t lib_num = 1u; - if( snapshot ) { + void startup(std::function shutdown, const snapshot_reader_ptr& snapshot) { + EOS_ASSERT( snapshot, snapshot_exception, "No snapshot reader provided" ); + ilog( "Starting initialization from snapshot, this may take a significant amount of time" ); + try { snapshot->validate(); if( blog.head() ) { - lib_num = blog.head()->block_num(); - read_from_snapshot( snapshot, blog.first_block_num(), lib_num ); + read_from_snapshot( snapshot, blog.first_block_num(), blog.head()->block_num() ); } else { read_from_snapshot( snapshot, 0, std::numeric_limits::max() ); - lib_num = head->block_num; - blog.reset( conf.genesis, signed_block_ptr(), lib_num + 1 ); + const uint32_t lib_num = head->block_num; + EOS_ASSERT( lib_num > 0, snapshot_exception, + "Snapshot indicates controller head at block number 0, but that is not allowed. " + "Snapshot is invalid." ); + blog.reset( chain_id, lib_num + 1 ); + } + const auto hash = calculate_integrity_hash(); + ilog( "database initialized with hash: ${hash}", ("hash", hash) ); + + init(shutdown); + } catch (boost::interprocess::bad_alloc& e) { + elog( "db storage not configured to have enough storage for the provided snapshot, please increase and retry snapshot" ); + throw e; + } + + ilog( "Finished initialization from snapshot" ); + } + + void startup(std::function shutdown, const genesis_state& genesis) { + EOS_ASSERT( db.revision() < 1, database_exception, "This version of controller::startup only works with a fresh state database." ); + const auto& genesis_chain_id = genesis.compute_chain_id(); + EOS_ASSERT( genesis_chain_id == chain_id, chain_id_type_exception, + "genesis state provided to startup corresponds to a chain ID (${genesis_chain_id}) that does not match the chain ID that controller was constructed with (${controller_chain_id})", + ("genesis_chain_id", genesis_chain_id)("controller_chain_id", chain_id) + ); + + if( fork_db.head() ) { + if( read_mode == db_read_mode::IRREVERSIBLE && fork_db.head()->id != fork_db.root()->id ) { + fork_db.rollback_head_to_root(); } + wlog( "No existing chain state. Initializing fresh blockchain state." ); } else { - if( db.revision() < 1 || !fork_db.head() ) { - if( fork_db.head() ) { - if( read_mode == db_read_mode::IRREVERSIBLE && fork_db.head()->id != fork_db.root()->id ) { - fork_db.rollback_head_to_root(); - } - wlog( "No existing chain state. Initializing fresh blockchain state." ); - } else { - EOS_ASSERT( db.revision() < 1, database_exception, - "No existing fork database despite existing chain state. Replay required." ); - wlog( "No existing chain state or fork database. Initializing fresh blockchain state and resetting fork database."); - } - initialize_blockchain_state(); // sets head to genesis state + wlog( "No existing chain state or fork database. Initializing fresh blockchain state and resetting fork database."); + } + initialize_blockchain_state(genesis); // sets head to genesis state - if( !fork_db.head() ) { - fork_db.reset( *head ); - } + if( !fork_db.head() ) { + fork_db.reset( *head ); + } - if( blog.head() ) { - EOS_ASSERT( blog.first_block_num() == 1, block_log_exception, - "block log does not start with genesis block" - ); - lib_num = blog.head()->block_num(); - } else { - blog.reset( conf.genesis, head->block ); - } - } else { - lib_num = fork_db.root()->block_num; - auto first_block_num = blog.first_block_num(); - if( blog.head() ) { - EOS_ASSERT( first_block_num <= lib_num && lib_num <= blog.head()->block_num(), - block_log_exception, - "block log does not contain last irreversible block", - ("block_log_first_num", first_block_num) - ("block_log_last_num", blog.head()->block_num()) - ("fork_db_lib", lib_num) - ); - lib_num = blog.head()->block_num(); - } else { - lib_num = fork_db.root()->block_num; - if( first_block_num != (lib_num + 1) ) { - blog.reset( conf.genesis, signed_block_ptr(), lib_num + 1 ); - } - } + if( blog.head() ) { + EOS_ASSERT( blog.first_block_num() == 1, block_log_exception, + "block log does not start with genesis block" + ); + } else { + blog.reset( genesis, head->block ); + } + init(shutdown); + } - if( read_mode == db_read_mode::IRREVERSIBLE && fork_db.head()->id != fork_db.root()->id ) { - fork_db.rollback_head_to_root(); - } - head = fork_db.head(); + void startup(std::function shutdown) { + EOS_ASSERT( db.revision() >= 1, database_exception, "This version of controller::startup does not work with a fresh state database." ); + EOS_ASSERT( fork_db.head(), fork_database_exception, "No existing fork database despite existing chain state. Replay required." ); + + uint32_t lib_num = fork_db.root()->block_num; + auto first_block_num = blog.first_block_num(); + if( blog.head() ) { + EOS_ASSERT( first_block_num <= lib_num && lib_num <= blog.head()->block_num(), + block_log_exception, + "block log (ranging from ${block_log_first_num} to ${block_log_last_num}) does not contain the last irreversible block (${fork_db_lib})", + ("block_log_first_num", first_block_num) + ("block_log_last_num", blog.head()->block_num()) + ("fork_db_lib", lib_num) + ); + lib_num = blog.head()->block_num(); + } else { + if( first_block_num != (lib_num + 1) ) { + blog.reset( chain_id, lib_num + 1 ); } } + + if( read_mode == db_read_mode::IRREVERSIBLE && fork_db.head()->id != fork_db.root()->id ) { + fork_db.rollback_head_to_root(); + } + head = fork_db.head(); + + init(shutdown); + } + + + static auto validate_db_version( const chainbase::database& db ) { + // check database version + const auto& header_idx = db.get_index().indices().get(); + + EOS_ASSERT(header_idx.begin() != header_idx.end(), bad_database_version_exception, + "state database version pre-dates versioning, please restore from a compatible snapshot or replay!"); + + auto header_itr = header_idx.begin(); + header_itr->validate(); + + return header_itr; + } + + void init(std::function shutdown) { + uint32_t lib_num = (blog.head() ? blog.head()->block_num() : fork_db.root()->block_num); + + auto header_itr = validate_db_version( db ); + + { + const auto& state_chain_id = db.get().chain_id; + EOS_ASSERT( state_chain_id == chain_id, chain_id_type_exception, + "chain ID in state (${state_chain_id}) does not match the chain ID that controller was constructed with (${controller_chain_id})", + ("state_chain_id", state_chain_id)("controller_chain_id", chain_id) + ); + } + + // upgrade to the latest compatible version + if (header_itr->version != database_header_object::current_version) { + db.modify(*header_itr, [](auto& header) { + header.version = database_header_object::current_version; + }); + } + // At this point head != nullptr && fork_db.head() != nullptr && fork_db.root() != nullptr. // Furthermore, fork_db.root()->block_num <= lib_num. // Also, even though blog.head() may still be nullptr, blog.first_block_num() is guaranteed to be lib_num + 1. EOS_ASSERT( db.revision() >= head->block_num, fork_database_exception, - "fork database head is inconsistent with state", + "fork database head (${head}) is inconsistent with state (${db})", ("db",db.revision())("head",head->block_num) ); if( db.revision() > head->block_num ) { @@ -618,7 +693,7 @@ struct controller_impl { reversible_blocks.remove( *itr ); EOS_ASSERT( itr == rbi.end() || itr->blocknum == lib_num + 1, reversible_blocks_exception, - "gap exists between last irreversible block and first reversible block", + "gap exists between last irreversible block (${lib}) and first reversible block (${first_reversible_block_num})", ("lib", lib_num)("first_reversible_block_num", itr->blocknum) ); @@ -662,8 +737,6 @@ struct controller_impl { // else no checks needed since fork_db will be completely reset on replay anyway } - bool report_integrity_hash = !!snapshot || (lib_num > head->block_num); - if( last_block_num > head->block_num ) { replay( shutdown ); // replay any irreversible and reversible blocks ahead of current head } @@ -681,14 +754,9 @@ struct controller_impl { pending_head = fork_db.pending_head() ) { wlog( "applying branch from fork database ending with block: ${id}", ("id", pending_head->id) ); - maybe_switch_forks( pending_head, controller::block_status::complete ); + maybe_switch_forks( pending_head, controller::block_status::complete, forked_branch_callback{}, trx_meta_cache_lookup{} ); } } - - if( report_integrity_hash ) { - const auto hash = calculate_integrity_hash(); - ilog( "database initialized with hash: ${hash}", ("hash", hash) ); - } } ~controller_impl() { @@ -776,10 +844,6 @@ struct controller_impl { section.add_row(chain_snapshot_header(), db); }); - snapshot->write_section([this]( auto §ion ){ - section.add_row(conf.genesis, db); - }); - snapshot->write_section([this]( auto §ion ){ section.template add_row(*fork_db.head(), db); }); @@ -792,6 +856,11 @@ struct controller_impl { return; } + // skip the database_header as it is only relevant to in-memory database + if (std::is_same::value) { + return; + } + snapshot->write_section([this]( auto& section ){ decltype(utils)::walk(db, [this, §ion]( const auto &row ) { section.add_row(row, db); @@ -805,17 +874,41 @@ struct controller_impl { resource_limits.add_to_snapshot(snapshot); } + static fc::optional extract_legacy_genesis_state( snapshot_reader& snapshot, uint32_t version ) { + fc::optional genesis; + using v2 = legacy::snapshot_global_property_object_v2; + + if (std::clamp(version, v2::minimum_version, v2::maximum_version) == version ) { + genesis.emplace(); + snapshot.read_section([&genesis=*genesis]( auto §ion ){ + section.read_row(genesis); + }); + } + return genesis; + } + void read_from_snapshot( const snapshot_reader_ptr& snapshot, uint32_t blog_start, uint32_t blog_end ) { - snapshot->read_section([this]( auto §ion ){ - chain_snapshot_header header; + chain_snapshot_header header; + snapshot->read_section([this, &header]( auto §ion ){ section.read_row(header, db); header.validate(); }); - - snapshot->read_section([this, blog_start, blog_end]( auto §ion ){ + { /// load and upgrade the block header state block_header_state head_header_state; - section.read_row(head_header_state, db); + using v2 = legacy::snapshot_block_header_state_v2; + + if (std::clamp(header.version, v2::minimum_version, v2::maximum_version) == header.version ) { + snapshot->read_section([this, &head_header_state]( auto §ion ) { + legacy::snapshot_block_header_state_v2 legacy_header_state; + section.read_row(legacy_header_state, db); + head_header_state = block_header_state(std::move(legacy_header_state)); + }); + } else { + snapshot->read_section([this,&head_header_state]( auto §ion ){ + section.read_row(head_header_state, db); + }); + } snapshot_head_block = head_header_state.block_num; EOS_ASSERT( blog_start <= (snapshot_head_block + 1) && snapshot_head_block <= blog_end, @@ -829,9 +922,10 @@ struct controller_impl { fork_db.reset( head_header_state ); head = fork_db.head(); snapshot_head_block = head->block_num; - }); - controller_index_set::walk_indices([this, &snapshot]( auto utils ){ + } + + controller_index_set::walk_indices([this, &snapshot, &header]( auto utils ){ using value_t = typename decltype(utils)::index_t::value_type; // skip the table_id_object as its inlined with contract tables section @@ -839,6 +933,32 @@ struct controller_impl { return; } + // skip the database_header as it is only relevant to in-memory database + if (std::is_same::value) { + return; + } + + // special case for in-place upgrade of global_property_object + if (std::is_same::value) { + using v2 = legacy::snapshot_global_property_object_v2; + + if (std::clamp(header.version, v2::minimum_version, v2::maximum_version) == header.version ) { + fc::optional genesis = extract_legacy_genesis_state(*snapshot, header.version); + EOS_ASSERT( genesis, snapshot_exception, + "Snapshot indicates chain_snapshot_header version 2, but does not contain a genesis_state. " + "It must be corrupted."); + snapshot->read_section([&db=this->db,gs_chain_id=genesis->compute_chain_id()]( auto §ion ) { + v2 legacy_global_properties; + section.read_row(legacy_global_properties, db); + + db.create([&legacy_global_properties,&gs_chain_id](auto& gpo ){ + gpo.initalize_from(legacy_global_properties, gs_chain_id); + }); + }); + return; // early out to avoid default processing + } + } + snapshot->read_section([this]( auto& section ) { bool more = !section.empty(); while(more) { @@ -855,6 +975,15 @@ struct controller_impl { resource_limits.read_from_snapshot(snapshot); db.set_revision( head->block_num ); + db.create([](const auto& header){ + // nothing to do + }); + + const auto& gpo = db.get(); + EOS_ASSERT( gpo.chain_id == chain_id, chain_id_type_exception, + "chain ID in snapshot (${snapshot_chain_id}) does not match the chain ID that controller was constructed with (${controller_chain_id})", + ("snapshot_chain_id", gpo.chain_id)("controller_chain_id", chain_id) + ); } sha256 calculate_integrity_hash() const { @@ -866,13 +995,16 @@ struct controller_impl { return enc.result(); } - void create_native_account( account_name name, const authority& owner, const authority& active, bool is_privileged = false ) { + void create_native_account( const fc::time_point& initial_timestamp, account_name name, const authority& owner, const authority& active, bool is_privileged = false ) { db.create([&](auto& a) { a.name = name; - a.creation_date = conf.genesis.initial_timestamp; + a.creation_date = initial_timestamp; if( name == config::system_account_name ) { - a.set_abi(eosio_contract_abi(abi_def())); + // The initial eosio ABI value affects consensus; see https://github.com/EOSIO/eos/issues/7794 + // TODO: This doesn't charge RAM; a fix requires a consensus upgrade. + a.abi.resize(sizeof(eosio_abi_bin)); + memcpy(a.abi.data(), eosio_abi_bin, sizeof(eosio_abi_bin)); } }); db.create([&](auto & a) { @@ -881,9 +1013,9 @@ struct controller_impl { }); const auto& owner_permission = authorization.create_permission(name, config::owner_name, 0, - owner, conf.genesis.initial_timestamp ); + owner, initial_timestamp ); const auto& active_permission = authorization.create_permission(name, config::active_name, owner_permission.id, - active, conf.genesis.initial_timestamp ); + active, initial_timestamp ); resource_limits.initialize_account(name); @@ -896,7 +1028,12 @@ struct controller_impl { resource_limits.verify_account_ram_usage(name); } - void initialize_database() { + void initialize_database(const genesis_state& genesis) { + // create the database header sigil + db.create([&]( auto& header ){ + // nothing to do for now + }); + // Initialize block summary index for (int i = 0; i < 0x10000; i++) db.create([&](block_summary_object&) {}); @@ -906,13 +1043,14 @@ struct controller_impl { bs.block_id = head->id; }); - conf.genesis.initial_configuration.validate(); - db.create([&](auto& gpo ){ - gpo.configuration = conf.genesis.initial_configuration; + genesis.initial_configuration.validate(); + db.create([&genesis,&chain_id=this->chain_id](auto& gpo ){ + gpo.configuration = genesis.initial_configuration; + gpo.chain_id = chain_id; }); db.create([&](auto& pso ){ - pso.num_supported_key_types = 2; + pso.num_supported_key_types = config::genesis_num_supported_key_types; for( const auto& i : genesis_intrinsics ) { add_intrinsic_to_whitelist( pso.whitelisted_intrinsics, i ); } @@ -923,26 +1061,26 @@ struct controller_impl { authorization.initialize_database(); resource_limits.initialize_database(); - authority system_auth(conf.genesis.initial_key); - create_native_account( config::system_account_name, system_auth, system_auth, true ); + authority system_auth(genesis.initial_key); + create_native_account( genesis.initial_timestamp, config::system_account_name, system_auth, system_auth, true ); auto empty_authority = authority(1, {}, {}); auto active_producers_authority = authority(1, {}, {}); active_producers_authority.accounts.push_back({{config::system_account_name, config::active_name}, 1}); - create_native_account( config::null_account_name, empty_authority, empty_authority ); - create_native_account( config::producers_account_name, empty_authority, active_producers_authority ); + create_native_account( genesis.initial_timestamp, config::null_account_name, empty_authority, empty_authority ); + create_native_account( genesis.initial_timestamp, config::producers_account_name, empty_authority, active_producers_authority ); const auto& active_permission = authorization.get_permission({config::producers_account_name, config::active_name}); const auto& majority_permission = authorization.create_permission( config::producers_account_name, config::majority_producers_permission_name, active_permission.id, active_producers_authority, - conf.genesis.initial_timestamp ); + genesis.initial_timestamp ); const auto& minority_permission = authorization.create_permission( config::producers_account_name, config::minority_producers_permission_name, majority_permission.id, active_producers_authority, - conf.genesis.initial_timestamp ); + genesis.initial_timestamp ); } // The returned scoped_exit should not exceed the lifetime of the pending which existed when make_block_restore_point was called. @@ -988,7 +1126,8 @@ struct controller_impl { etrx.set_reference_block( self.head_block_id() ); } - transaction_context trx_context( self, etrx, etrx.id(), start ); + transaction_checktime_timer trx_timer(timer); + transaction_context trx_context( self, etrx, etrx.id(), std::move(trx_timer), start ); trx_context.deadline = deadline; trx_context.explicit_billed_cpu_time = explicit_billed_cpu_time; trx_context.billed_cpu_time_us = billed_cpu_time_us; @@ -1044,7 +1183,8 @@ struct controller_impl { || (code == contract_whitelist_exception::code_value) || (code == contract_blacklist_exception::code_value) || (code == action_blacklist_exception::code_value) - || (code == key_blacklist_exception::code_value); + || (code == key_blacklist_exception::code_value) + || (code == sig_variable_size_limit_exception::code_value); } bool scheduled_failure_is_subjective( const fc::exception& e ) const { @@ -1083,9 +1223,8 @@ struct controller_impl { signed_transaction dtrx; fc::raw::unpack(ds,static_cast(dtrx) ); - transaction_metadata_ptr trx = std::make_shared( dtrx ); + transaction_metadata_ptr trx = transaction_metadata::create_no_recover_keys( packed_transaction( dtrx ), transaction_metadata::trx_type::scheduled ); trx->accepted = true; - trx->scheduled = true; transaction_trace_ptr trace; if( gtrx.expiration < self.pending_block_time() ) { @@ -1110,7 +1249,8 @@ struct controller_impl { uint32_t cpu_time_to_bill_us = billed_cpu_time_us; - transaction_context trx_context( self, dtrx, gtrx.trx_id ); + transaction_checktime_timer trx_timer(timer); + transaction_context trx_context( self, dtrx, gtrx.trx_id, std::move(trx_timer) ); trx_context.leeway = fc::microseconds(0); // avoid stealing cpu resource trx_context.deadline = deadline; trx_context.explicit_billed_cpu_time = explicit_billed_cpu_time; @@ -1262,9 +1402,8 @@ struct controller_impl { try { auto start = fc::time_point::now(); const bool check_auth = !self.skip_auth_check() && !trx->implicit; - // call recover keys so that trx->sig_cpu_usage is set correctly - const fc::microseconds sig_cpu_usage = check_auth ? std::get<0>( trx->recover_keys( chain_id ) ) : fc::microseconds(); - const flat_set& recovered_keys = check_auth ? std::get<1>( trx->recover_keys( chain_id ) ) : flat_set(); + const fc::microseconds sig_cpu_usage = trx->signature_cpu_usage(); + if( !explicit_billed_cpu_time ) { fc::microseconds already_consumed_time( EOS_PERCENT(sig_cpu_usage.count(), conf.sig_cpu_bill_pct) ); @@ -1275,8 +1414,9 @@ struct controller_impl { } } - const signed_transaction& trn = trx->packed_trx->get_signed_transaction(); - transaction_context trx_context(self, trn, trx->id, start); + const signed_transaction& trn = trx->packed_trx()->get_signed_transaction(); + transaction_checktime_timer trx_timer(timer); + transaction_context trx_context(self, trn, trx->id(), std::move(trx_timer), start); if ((bool)subjective_cpu_leeway && pending->_block_status == controller::block_status::incomplete) { trx_context.leeway = *subjective_cpu_leeway; } @@ -1290,8 +1430,8 @@ struct controller_impl { trx_context.enforce_whiteblacklist = false; } else { bool skip_recording = replay_head_time && (time_point(trn.expiration) <= *replay_head_time); - trx_context.init_for_input_trx( trx->packed_trx->get_unprunable_size(), - trx->packed_trx->get_prunable_size(), + trx_context.init_for_input_trx( trx->packed_trx()->get_unprunable_size(), + trx->packed_trx()->get_prunable_size(), skip_recording); } @@ -1300,7 +1440,7 @@ struct controller_impl { if( check_auth ) { authorization.check_authorization( trn.actions, - recovered_keys, + trx->recovered_keys(), {}, trx_context.delay, [&trx_context](){ trx_context.checktime(); }, @@ -1316,7 +1456,7 @@ struct controller_impl { transaction_receipt::status_enum s = (trx_context.delay == fc::seconds(0)) ? transaction_receipt::executed : transaction_receipt::delayed; - trace->receipt = push_receipt(*trx->packed_trx, s, trx_context.billed_cpu_time_us, trace->net_usage); + trace->receipt = push_receipt(*trx->packed_trx(), s, trx_context.billed_cpu_time_us, trace->net_usage); pending->_block_stage.get()._pending_trx_metas.emplace_back(trx); } else { transaction_receipt_header r; @@ -1345,9 +1485,6 @@ struct controller_impl { trx_context.squash(); } - if (!trx->implicit) { - unapplied_transactions.erase( trx->signed_id ); - } return trace; } catch( const disallowed_transaction_extensions_bad_block_exception& ) { throw; @@ -1359,10 +1496,6 @@ struct controller_impl { trace->except_ptr = std::current_exception(); } - if (!failure_is_subjective(*trace->except)) { - unapplied_transactions.erase( trx->signed_id ); - } - emit( self.accepted_transaction, trx ); emit( self.applied_transaction, std::tie(trace, trn) ); @@ -1479,22 +1612,23 @@ struct controller_impl { ilog( "promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ", ("proposed_num", *gpo.proposed_schedule_block_num)("n", pbhs.block_num) ("lib", pbhs.dpos_irreversible_blocknum) - ("schedule", static_cast(gpo.proposed_schedule) ) ); + ("schedule", producer_authority_schedule::from_shared(gpo.proposed_schedule) ) ); } EOS_ASSERT( gpo.proposed_schedule.version == pbhs.active_schedule_version + 1, producer_schedule_exception, "wrong producer schedule version specified" ); - pending->_block_stage.get()._new_pending_producer_schedule = gpo.proposed_schedule; + pending->_block_stage.get()._new_pending_producer_schedule = producer_authority_schedule::from_shared(gpo.proposed_schedule); db.modify( gpo, [&]( auto& gp ) { gp.proposed_schedule_block_num = optional(); - gp.proposed_schedule.clear(); + gp.proposed_schedule.version=0; + gp.proposed_schedule.producers.clear(); }); } try { - auto onbtrx = std::make_shared( get_on_block_transaction() ); - onbtrx->implicit = true; + transaction_metadata_ptr onbtrx = + transaction_metadata::create_no_recover_keys( packed_transaction( get_on_block_transaction() ), transaction_metadata::trx_type::implicit ); auto reset_in_trx_requiring_checks = fc::make_scoped_exit([old_value=in_trx_requiring_checks,this](){ in_trx_requiring_checks = old_value; }); @@ -1545,8 +1679,9 @@ struct controller_impl { auto block_ptr = std::make_shared( pbhs.make_block_header( calculate_trx_merkle(), calculate_action_merkle(), - std::move( bb._new_pending_producer_schedule ), - std::move( bb._new_protocol_feature_activations ) + bb._new_pending_producer_schedule, + std::move( bb._new_protocol_feature_activations ), + protocol_features.get_protocol_feature_set() ) ); block_ptr->transactions = std::move( bb._pending_trx_receipts ); @@ -1574,7 +1709,8 @@ struct controller_impl { id, std::move( bb._pending_block_header_state ), std::move( bb._pending_trx_metas ), - std::move( block_ptr ) + std::move( block_ptr ), + std::move( bb._new_pending_producer_schedule ) }; } FC_CAPTURE_AND_RETHROW() } /// finalize_block @@ -1607,11 +1743,11 @@ struct controller_impl { }); } + emit( self.accepted_block, bsp ); + if( add_to_fork_db ) { log_irreversible(); } - - emit( self.accepted_block, bsp ); } catch (...) { // dont bother resetting pending, instead abort the block reset_pending_on_exit.cancel(); @@ -1684,26 +1820,41 @@ struct controller_impl { } } - void apply_block( const block_state_ptr& bsp, controller::block_status s ) + void apply_block( const block_state_ptr& bsp, controller::block_status s, const trx_meta_cache_lookup& trx_lookup ) { try { try { const signed_block_ptr& b = bsp->block; const auto& new_protocol_feature_activations = bsp->get_new_protocol_feature_activations(); - EOS_ASSERT( b->block_extensions.size() == 0, block_validate_exception, "no supported block extensions" ); auto producer_block_id = b->id(); start_block( b->timestamp, b->confirmed, new_protocol_feature_activations, s, producer_block_id); - std::vector packed_transactions; - packed_transactions.reserve( b->transactions.size() ); - for( const auto& receipt : b->transactions ) { - if( receipt.trx.contains()) { - auto& pt = receipt.trx.get(); - auto mtrx = std::make_shared( std::make_shared( pt ) ); - if( !self.skip_auth_check() ) { - transaction_metadata::start_recover_keys( mtrx, thread_pool.get_executor(), chain_id, microseconds::maximum() ); + const bool existing_trxs_metas = !bsp->trxs_metas().empty(); + const bool pub_keys_recovered = bsp->is_pub_keys_recovered(); + const bool skip_auth_checks = self.skip_auth_check(); + std::vector> trx_metas; + bool use_bsp_cached = false; + if( pub_keys_recovered || (skip_auth_checks && existing_trxs_metas) ) { + use_bsp_cached = true; + } else { + trx_metas.reserve( b->transactions.size() ); + for( const auto& receipt : b->transactions ) { + if( receipt.trx.contains()) { + const auto& pt = receipt.trx.get(); + transaction_metadata_ptr trx_meta_ptr = trx_lookup ? trx_lookup( pt.id() ) : transaction_metadata_ptr{}; + if( trx_meta_ptr && ( skip_auth_checks || !trx_meta_ptr->recovered_keys().empty() ) ) { + trx_metas.emplace_back( std::move( trx_meta_ptr ), recover_keys_future{} ); + } else if( skip_auth_checks ) { + trx_metas.emplace_back( + transaction_metadata::create_no_recover_keys( pt, transaction_metadata::trx_type::input ), + recover_keys_future{} ); + } else { + auto ptrx = std::make_shared( pt ); + auto fut = transaction_metadata::start_recover_keys( + std::move( ptrx ), thread_pool.get_executor(), chain_id, microseconds::maximum() ); + trx_metas.emplace_back( transaction_metadata_ptr{}, std::move( fut ) ); + } } - packed_transactions.emplace_back( std::move( mtrx ) ); } } @@ -1714,7 +1865,12 @@ struct controller_impl { const auto& trx_receipts = pending->_block_stage.get()._pending_trx_receipts; auto num_pending_receipts = trx_receipts.size(); if( receipt.trx.contains() ) { - trace = push_transaction( packed_transactions.at(packed_idx++), fc::time_point::maximum(), receipt.cpu_usage_us, true ); + const auto& trx_meta = ( use_bsp_cached ? bsp->trxs_metas().at( packed_idx ) + : ( !!std::get<0>( trx_metas.at( packed_idx ) ) ? + std::get<0>( trx_metas.at( packed_idx ) ) + : std::get<1>( trx_metas.at( packed_idx ) ).get() ) ); + trace = push_transaction( trx_meta, fc::time_point::maximum(), receipt.cpu_usage_us, true ); + ++packed_idx; } else if( receipt.trx.contains() ) { trace = push_scheduled_transaction( receipt.trx.get(), fc::time_point::maximum(), receipt.cpu_usage_us, true ); } else { @@ -1750,17 +1906,10 @@ struct controller_impl { EOS_ASSERT( producer_block_id == ab._id, block_validate_exception, "Block ID does not match", ("producer_block_id",producer_block_id)("validator_block_id",ab._id) ); - auto bsp = std::make_shared( - std::move( ab._pending_block_header_state ), - b, - std::move( ab._trx_metas ), - []( block_timestamp_type timestamp, - const flat_set& cur_features, - const vector& new_features ) - {}, // validation of any new protocol features should have already occurred prior to apply_block - true // signature should have already been verified (assuming untrusted) prior to apply_block - ); - + if( !use_bsp_cached ) { + bsp->set_trxs_metas( std::move( ab._trx_metas ), !skip_auth_checks ); + } + // create completed_block with the existing block_state as we just verified it is the same as assembled_block pending->_block_stage = completed_block{ bsp }; commit_block(false); @@ -1790,6 +1939,7 @@ struct controller_impl { return std::make_shared( *prev, move( b ), + control->protocol_features.get_protocol_feature_set(), [control]( block_timestamp_type timestamp, const flat_set& cur_features, const vector& new_features ) @@ -1799,7 +1949,9 @@ struct controller_impl { } ); } - void push_block( std::future& block_state_future ) { + void push_block( std::future& block_state_future, + const forked_branch_callback& forked_branch_cb, const trx_meta_cache_lookup& trx_lookup ) + { controller::block_status s = controller::block_status::complete; EOS_ASSERT(!pending, block_validate_exception, "it is not valid to push a block when there is a pending block"); @@ -1821,7 +1973,7 @@ struct controller_impl { emit( self.accepted_block_header, bsp ); if( read_mode != db_read_mode::IRREVERSIBLE ) { - maybe_switch_forks( fork_db.pending_head(), s ); + maybe_switch_forks( fork_db.pending_head(), s, forked_branch_cb, trx_lookup ); } else { log_irreversible(); } @@ -1845,6 +1997,7 @@ struct controller_impl { auto bsp = std::make_shared( *head, b, + protocol_features.get_protocol_feature_set(), [this]( block_timestamp_type timestamp, const flat_set& cur_features, const vector& new_features ) @@ -1859,7 +2012,7 @@ struct controller_impl { emit( self.accepted_block_header, bsp ); if( s == controller::block_status::irreversible ) { - apply_block( bsp, s ); + apply_block( bsp, s, trx_meta_cache_lookup{} ); head = bsp; // On replay, log_irreversible is not called and so no irreversible_block signal is emittted. @@ -1873,17 +2026,19 @@ struct controller_impl { } else { EOS_ASSERT( read_mode != db_read_mode::IRREVERSIBLE, block_validate_exception, "invariant failure: cannot replay reversible blocks while in irreversible mode" ); - maybe_switch_forks( bsp, s ); + maybe_switch_forks( bsp, s, forked_branch_callback{}, trx_meta_cache_lookup{} ); } } FC_LOG_AND_RETHROW( ) } - void maybe_switch_forks( const block_state_ptr& new_head, controller::block_status s ) { + void maybe_switch_forks( const block_state_ptr& new_head, controller::block_status s, + const forked_branch_callback& forked_branch_cb, const trx_meta_cache_lookup& trx_lookup ) + { bool head_changed = true; if( new_head->header.previous == head->id ) { try { - apply_block( new_head, s ); + apply_block( new_head, s, trx_lookup ); fork_db.mark_valid( new_head ); head = new_head; } catch ( const fc::exception& e ) { @@ -1902,13 +2057,15 @@ struct controller_impl { } EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, "loss of sync between fork_db and chainbase during fork switch" ); // _should_ never fail + + if( forked_branch_cb ) forked_branch_cb( branches.second ); } for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr ) { optional except; try { apply_block( *ritr, (*ritr)->is_valid() ? controller::block_status::validated - : controller::block_status::complete ); + : controller::block_status::complete, trx_lookup ); fork_db.mark_valid( *ritr ); head = *ritr; } catch (const fc::exception& e) { @@ -1921,7 +2078,7 @@ struct controller_impl { // Remove the block that threw and all forks built off it. fork_db.remove( (*ritr)->id ); - // pop all blocks from the bad fork + // pop all blocks from the bad fork, discarding their transactions // ritr base is a forward itr to the last block successfully applied auto applied_itr = ritr.base(); for( auto itr = applied_itr; itr != branches.first.end(); ++itr ) { @@ -1932,7 +2089,7 @@ struct controller_impl { // re-apply good blocks for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr ) { - apply_block( *ritr, controller::block_status::validated /* we previously validated these blocks*/ ); + apply_block( *ritr, controller::block_status::validated /* we previously validated these blocks*/, trx_lookup ); head = *ritr; } throw *except; @@ -1946,22 +2103,17 @@ struct controller_impl { if( head_changed ) log_irreversible(); + } /// push_block - void abort_block() { + vector abort_block() { + vector applied_trxs; if( pending ) { - if ( read_mode == db_read_mode::SPECULATIVE ) { - for( const auto& t : pending->get_trx_metas() ) - unapplied_transactions[t->signed_id] = t; - } + applied_trxs = pending->extract_trx_metas(); pending.reset(); protocol_features.popped_blocks_to( head->block_num ); } - } - - - bool should_enforce_runtime_limits()const { - return false; + return applied_trxs; } checksum256_type calculate_action_merkle() { @@ -2234,13 +2386,13 @@ const protocol_feature_manager& controller::get_protocol_feature_manager()const return my->protocol_features; } -controller::controller( const controller::config& cfg ) -:my( new controller_impl( cfg, *this, protocol_feature_set{} ) ) +controller::controller( const controller::config& cfg, const chain_id_type& chain_id ) +:my( new controller_impl( cfg, *this, protocol_feature_set{}, chain_id ) ) { } -controller::controller( const config& cfg, protocol_feature_set&& pfs ) -:my( new controller_impl( cfg, *this, std::move(pfs) ) ) +controller::controller( const config& cfg, protocol_feature_set&& pfs, const chain_id_type& chain_id ) +:my( new controller_impl( cfg, *this, std::move(pfs), chain_id ) ) { } @@ -2259,19 +2411,15 @@ void controller::add_indices() { } void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { - if( snapshot ) { - ilog( "Starting initialization from snapshot, this may take a significant amount of time" ); - } - try { - my->init(shutdown, snapshot); - } catch (boost::interprocess::bad_alloc& e) { - if ( snapshot ) - elog( "db storage not configured to have enough storage for the provided snapshot, please increase and retry snapshot" ); - throw e; - } - if( snapshot ) { - ilog( "Finished initialization from snapshot" ); - } + my->startup(shutdown, snapshot); +} + +void controller::startup( std::function shutdown, const genesis_state& genesis ) { + my->startup(shutdown, genesis); +} + +void controller::startup( std::function shutdown ) { + my->startup(shutdown); } const chainbase::database& controller::db()const { return my->db; } @@ -2439,7 +2587,7 @@ void controller::start_block( block_timestamp_type when, block_status::incomplete, optional() ); } -block_state_ptr controller::finalize_block( const std::function& signer_callback ) { +block_state_ptr controller::finalize_block( const signer_callback_type& signer_callback ) { validate_db_available_size(); my->finalize_block(); @@ -2450,6 +2598,7 @@ block_state_ptr controller::finalize_block( const std::functionprotocol_features.get_protocol_feature_set(), []( block_timestamp_type timestamp, const flat_set& cur_features, const vector& new_features ) @@ -2468,8 +2617,8 @@ void controller::commit_block() { my->commit_block(true); } -void controller::abort_block() { - my->abort_block(); +vector controller::abort_block() { + return my->abort_block(); } boost::asio::io_context& controller::get_thread_pool() { @@ -2480,10 +2629,12 @@ std::future controller::create_block_state_future( const signed return my->create_block_state_future( b ); } -void controller::push_block( std::future& block_state_future ) { +void controller::push_block( std::future& block_state_future, + const forked_branch_callback& forked_branch_cb, const trx_meta_cache_lookup& trx_lookup ) +{ validate_db_available_size(); validate_reversible_available_size(); - my->push_block( block_state_future ); + my->push_block( block_state_future, forked_branch_cb, trx_lookup ); } transaction_trace_ptr controller::push_transaction( const transaction_metadata_ptr& trx, fc::time_point deadline, uint32_t billed_cpu_time_us ) { @@ -2610,13 +2761,13 @@ account_name controller::pending_block_producer()const { return my->pending->get_pending_block_header_state().producer; } -public_key_type controller::pending_block_signing_key()const { +const block_signing_authority& controller::pending_block_signing_authority()const { EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); if( my->pending->_block_stage.contains() ) - return my->pending->_block_stage.get()._block_state->block_signing_key; + return my->pending->_block_stage.get()._block_state->valid_block_signing_authority; - return my->pending->get_pending_block_header_state().block_signing_key; + return my->pending->get_pending_block_header_state().valid_block_signing_authority; } optional controller::pending_producer_block_id()const { @@ -2635,17 +2786,8 @@ uint32_t controller::last_irreversible_block_num() const { block_id_type controller::last_irreversible_block_id() const { auto lib_num = last_irreversible_block_num(); - const auto& tapos_block_summary = db().get((uint16_t)lib_num); - - if( block_header::num_from_id(tapos_block_summary.block_id) == lib_num ) - return tapos_block_summary.block_id; - - auto signed_blk = my->blog.read_block_by_num( lib_num ); - EOS_ASSERT( BOOST_LIKELY( signed_blk != nullptr ), unknown_block_exception, - "Could not find block: ${block}", ("block", lib_num) ); - - return signed_blk->id(); + return get_block_id_for_num( lib_num ); } const dynamic_global_property_object& controller::get_dynamic_global_properties()const { @@ -2693,6 +2835,11 @@ block_state_ptr controller::fetch_block_state_by_number( uint32_t block_num )con } FC_CAPTURE_AND_RETHROW( (block_num) ) } block_id_type controller::get_block_id_for_num( uint32_t block_num )const { try { + const auto& tapos_block_summary = db().get((uint16_t)block_num); + + if( block_header::num_from_id(tapos_block_summary.block_id) == block_num ) + return tapos_block_summary.block_id; + const auto& blog_head = my->blog.head(); bool find_in_blog = (blog_head && block_num <= blog_head->block_num()); @@ -2711,12 +2858,12 @@ block_id_type controller::get_block_id_for_num( uint32_t block_num )const { try } } - auto signed_blk = my->blog.read_block_by_num(block_num); + auto id = my->blog.read_block_id_by_num(block_num); - EOS_ASSERT( BOOST_LIKELY( signed_blk != nullptr ), unknown_block_exception, + EOS_ASSERT( BOOST_LIKELY( id != block_id_type() ), unknown_block_exception, "Could not find block: ${block}", ("block", block_num) ); - return signed_blk->id(); + return id; } FC_CAPTURE_AND_RETHROW( (block_num) ) } sha256 controller::calculate_integrity_hash()const { try { @@ -2728,11 +2875,7 @@ void controller::write_snapshot( const snapshot_writer_ptr& snapshot ) const { return my->add_to_snapshot(snapshot); } -void controller::pop_block() { - my->pop_block(); -} - -int64_t controller::set_proposed_producers( vector producers ) { +int64_t controller::set_proposed_producers( vector producers ) { const auto& gpo = get_global_properties(); auto cur_block_num = head_block_num() + 1; @@ -2749,7 +2892,7 @@ int64_t controller::set_proposed_producers( vector producers ) { return -1; // the proposed producer schedule does not change } - producer_schedule_type sch; + producer_authority_schedule sch; decltype(sch.producers.cend()) end; decltype(end) begin; @@ -2778,12 +2921,12 @@ int64_t controller::set_proposed_producers( vector producers ) { my->db.modify( gpo, [&]( auto& gp ) { gp.proposed_schedule_block_num = cur_block_num; - gp.proposed_schedule = std::move(sch); + gp.proposed_schedule = sch.to_shared(gp.proposed_schedule.producers.get_allocator()); }); return version; } -const producer_schedule_type& controller::active_producers()const { +const producer_authority_schedule& controller::active_producers()const { if( !(my->pending) ) return my->head->active_schedule; @@ -2793,7 +2936,7 @@ const producer_schedule_type& controller::active_producers()const { return my->pending->get_pending_block_header_state().active_schedule; } -const producer_schedule_type& controller::pending_producers()const { +const producer_authority_schedule& controller::pending_producers()const { if( !(my->pending) ) return my->head->pending_schedule.schedule; @@ -2801,9 +2944,10 @@ const producer_schedule_type& controller::pending_producers()const { return my->pending->_block_stage.get()._block_state->pending_schedule.schedule; if( my->pending->_block_stage.contains() ) { - const auto& np = my->pending->_block_stage.get()._unsigned_block->new_producers; - if( np ) - return *np; + const auto& new_prods_cache = my->pending->_block_stage.get()._new_producer_authority_cache; + if( new_prods_cache ) { + return *new_prods_cache; + } } const auto& bb = my->pending->_block_stage.get(); @@ -2814,12 +2958,12 @@ const producer_schedule_type& controller::pending_producers()const { return bb._pending_block_header_state.prev_pending_schedule.schedule; } -optional controller::proposed_producers()const { +optional controller::proposed_producers()const { const auto& gpo = get_global_properties(); if( !gpo.proposed_schedule_block_num.valid() ) - return optional(); + return optional(); - return gpo.proposed_schedule; + return producer_authority_schedule::from_shared(gpo.proposed_schedule); } bool controller::light_validation_allowed(bool replay_opts_disabled_by_policy) const { @@ -2898,14 +3042,6 @@ const account_object& controller::get_account( account_name name )const return my->db.get(name); } FC_CAPTURE_AND_RETHROW( (name) ) } -unapplied_transactions_type& controller::get_unapplied_transactions() { - if ( my->read_mode != db_read_mode::SPECULATIVE ) { - EOS_ASSERT( my->unapplied_transactions.empty(), transaction_exception, - "not empty unapplied_transactions in non-speculative mode" ); //should never happen - } - return my->unapplied_transactions; -} - bool controller::sender_avoids_whitelist_blacklist_enforcement( account_name sender )const { return my->sender_avoids_whitelist_blacklist_enforcement( sender ); } @@ -2940,6 +3076,10 @@ bool controller::is_ram_billing_in_notify_allowed()const { return my->conf.disable_all_subjective_mitigations || !is_producing_block() || my->conf.allow_ram_billing_in_notify; } +uint32_t controller::configured_subjective_signature_length_limit()const { + return my->conf.maximum_variable_signature_length; +} + void controller::validate_expiration( const transaction& trx )const { try { const auto& chain_configuration = get_global_properties().configuration; @@ -3055,6 +3195,12 @@ bool controller::all_subjective_mitigations_disabled()const { return my->conf.disable_all_subjective_mitigations; } +#if defined(EOSIO_EOS_VM_RUNTIME_ENABLED) || defined(EOSIO_EOS_VM_JIT_RUNTIME_ENABLED) +vm::wasm_allocator& controller::get_wasm_allocator() { + return my->wasm_alloc; +} +#endif + fc::optional controller::convert_exception_to_error_code( const fc::exception& e ) { const chain_exception* e_ptr = dynamic_cast( &e ); @@ -3065,6 +3211,48 @@ fc::optional controller::convert_exception_to_error_code( const fc::ex return e_ptr->error_code; } +chain_id_type controller::extract_chain_id(snapshot_reader& snapshot) { + chain_snapshot_header header; + snapshot.read_section([&header]( auto §ion ){ + section.read_row(header); + header.validate(); + }); + + // check if this is a legacy version of the snapshot, which has a genesis state instead of chain id + fc::optional genesis = controller_impl::extract_legacy_genesis_state(snapshot, header.version); + if (genesis) { + return genesis->compute_chain_id(); + } + + chain_id_type chain_id; + snapshot.read_section([&chain_id]( auto §ion ){ + snapshot_global_property_object global_properties; + section.read_row(global_properties); + chain_id = global_properties.chain_id; + }); + return chain_id; +} + +fc::optional controller::extract_chain_id_from_db( const path& state_dir ) { + try { + chainbase::database db( state_dir, chainbase::database::read_only ); + + db.add_index(); + db.add_index(); + + controller_impl::validate_db_version( db ); + + if( db.revision() < 1 ) return {}; + + return db.get().chain_id; + } catch( const bad_database_version_exception& ) { + throw; + } catch( ... ) { + } + + return {}; +} + /// Protocol feature activation handlers: template<> @@ -3099,6 +3287,22 @@ void controller_impl::on_activation +void controller_impl::on_activation() { + db.modify( db.get(), [&]( auto& ps ) { + ps.num_supported_key_types = 3; + } ); +} + +template<> +void controller_impl::on_activation() { + db.modify( db.get(), [&]( auto& ps ) { + add_intrinsic_to_whitelist( ps.whitelisted_intrinsics, "set_proposed_producers_ex" ); + } ); +} + + + /// End of protocol feature activation handlers } } /// eosio::chain diff --git a/libraries/chain/eosio_contract.cpp b/libraries/chain/eosio_contract.cpp index 9bd9a022f22..ccb85a82ac2 100644 --- a/libraries/chain/eosio_contract.cpp +++ b/libraries/chain/eosio_contract.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include @@ -313,7 +309,7 @@ void apply_eosio_deleteauth(apply_context& context) { auto range = index.equal_range(boost::make_tuple(remove.account, remove.permission)); EOS_ASSERT(range.first == range.second, action_validate_exception, "Cannot delete a linked authority. Unlink the authority first. This authority is linked to ${code}::${type}.", - ("code", string(range.first->code))("type", string(range.first->message_type))); + ("code", range.first->code)("type", range.first->message_type)); } const auto& permission = authorization.get_permission({remove.account, remove.permission}); diff --git a/libraries/chain/eosio_contract_abi_bin.cpp b/libraries/chain/eosio_contract_abi_bin.cpp new file mode 100644 index 00000000000..68a34ee5584 --- /dev/null +++ b/libraries/chain/eosio_contract_abi_bin.cpp @@ -0,0 +1,190 @@ +namespace eosio { namespace chain { + +// Initial value for eosio ABI. This value affects consensus; if different +// nodeos versions have different versions of the initial eosio ABI, then +// they could potentially fork. See https://github.com/EOSIO/eos/issues/7794 +// +// This is a capture of the ABI produced by versions 1.7 and 1.8. + +unsigned char eosio_abi_bin[2132] = { + 0x0e, 0x65, 0x6f, 0x73, 0x69, 0x6f, 0x3a, 0x3a, 0x61, 0x62, 0x69, 0x2f, + 0x31, 0x2e, 0x30, 0x07, 0x0c, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x0f, 0x70, + 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x0b, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x13, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x0b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x32, 0x35, 0x36, + 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x0b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x32, + 0x35, 0x36, 0x0b, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x06, 0x75, 0x69, 0x6e, 0x74, 0x31, 0x36, 0x16, 0x10, 0x70, + 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x00, 0x02, 0x05, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x0c, + 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x0f, + 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x04, + 0x07, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x0c, 0x61, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5b, 0x5d, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x09, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x02, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x06, 0x75, 0x69, 0x6e, 0x74, 0x31, 0x36, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x00, 0x06, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x0d, 0x72, 0x65, 0x66, + 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x06, 0x75, + 0x69, 0x6e, 0x74, 0x31, 0x36, 0x10, 0x72, 0x65, 0x66, 0x5f, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x06, 0x75, + 0x69, 0x6e, 0x74, 0x33, 0x32, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x65, + 0x74, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x64, + 0x73, 0x09, 0x76, 0x61, 0x72, 0x75, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x10, + 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x6d, 0x73, 0x05, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x09, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x73, 0x65, 0x63, 0x09, 0x76, 0x61, 0x72, + 0x75, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x03, 0x14, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x66, + 0x72, 0x65, 0x65, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x08, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5b, 0x5d, 0x07, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x08, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5b, + 0x5d, 0x16, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x0b, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5b, 0x5d, + 0x0c, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x72, 0x5f, 0x6b, 0x65, + 0x79, 0x00, 0x02, 0x0d, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x72, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x0c, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x11, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, + 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x11, + 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x68, + 0x65, 0x64, 0x75, 0x6c, 0x65, 0x00, 0x02, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x06, 0x75, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x09, 0x70, + 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x72, 0x73, 0x0e, 0x70, 0x72, 0x6f, + 0x64, 0x75, 0x63, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x5b, 0x5d, 0x0c, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x00, 0x09, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x06, 0x75, 0x69, 0x6e, 0x74, 0x33, 0x32, 0x08, 0x70, 0x72, 0x6f, 0x64, + 0x75, 0x63, 0x65, 0x72, 0x0c, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x09, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x72, + 0x6d, 0x65, 0x64, 0x06, 0x75, 0x69, 0x6e, 0x74, 0x31, 0x36, 0x08, 0x70, + 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x0d, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x5f, 0x69, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x11, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x72, + 0x6f, 0x6f, 0x74, 0x0b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x32, 0x35, 0x36, 0x0c, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x72, 0x6f, 0x6f, 0x74, 0x0b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x32, 0x35, 0x36, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, + 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x06, 0x75, 0x69, + 0x6e, 0x74, 0x33, 0x32, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x6f, + 0x64, 0x75, 0x63, 0x65, 0x72, 0x73, 0x12, 0x70, 0x72, 0x6f, 0x64, 0x75, + 0x63, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, + 0x3f, 0x11, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x0b, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5b, 0x5d, 0x0a, 0x6b, 0x65, 0x79, 0x5f, + 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x00, 0x02, 0x03, 0x6b, 0x65, 0x79, + 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x06, + 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x0b, 0x77, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x17, 0x70, 0x65, 0x72, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, + 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x00, 0x02, 0x0a, 0x70, 0x65, 0x72, + 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x10, 0x70, 0x65, 0x72, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, + 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x0b, 0x77, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x0b, 0x77, 0x61, 0x69, 0x74, + 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x00, 0x02, 0x08, 0x77, 0x61, + 0x69, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x06, 0x75, 0x69, 0x6e, 0x74, 0x33, + 0x32, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x0b, 0x77, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x09, 0x61, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x00, 0x04, 0x09, 0x74, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x06, 0x75, 0x69, 0x6e, 0x74, 0x33, + 0x32, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x0c, 0x6b, 0x65, 0x79, 0x5f, 0x77, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x5b, 0x5d, 0x08, 0x61, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x73, 0x19, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x77, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x5b, 0x5d, 0x05, 0x77, 0x61, 0x69, 0x74, 0x73, + 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x5b, 0x5d, 0x0a, 0x6e, 0x65, 0x77, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x00, 0x04, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x0c, + 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x0c, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x06, 0x61, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x74, 0x79, 0x07, 0x73, 0x65, 0x74, 0x63, 0x6f, 0x64, 0x65, 0x00, + 0x04, 0x07, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x0c, 0x61, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x06, 0x76, + 0x6d, 0x74, 0x79, 0x70, 0x65, 0x05, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x09, + 0x76, 0x6d, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x05, 0x75, 0x69, + 0x6e, 0x74, 0x38, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x05, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x06, 0x73, 0x65, 0x74, 0x61, 0x62, 0x69, 0x00, 0x02, 0x07, + 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x0c, 0x61, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x03, 0x61, 0x62, 0x69, + 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x61, 0x75, 0x74, 0x68, 0x00, 0x04, 0x07, 0x61, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x0c, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x0a, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x0f, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x0f, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x04, 0x61, 0x75, 0x74, 0x68, 0x09, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x0a, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x61, 0x75, 0x74, 0x68, 0x00, 0x02, 0x07, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x0c, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x0a, 0x70, 0x65, 0x72, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x0f, 0x70, 0x65, 0x72, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x08, 0x6c, + 0x69, 0x6e, 0x6b, 0x61, 0x75, 0x74, 0x68, 0x00, 0x04, 0x07, 0x61, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x0c, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x0c, + 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x0f, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x0a, 0x75, 0x6e, + 0x6c, 0x69, 0x6e, 0x6b, 0x61, 0x75, 0x74, 0x68, 0x00, 0x03, 0x07, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x0c, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x04, 0x63, 0x6f, 0x64, 0x65, + 0x0c, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x04, 0x74, 0x79, 0x70, 0x65, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, + 0x6c, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x00, 0x02, 0x0e, 0x63, 0x61, 0x6e, + 0x63, 0x65, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x10, + 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, + 0x65, 0x76, 0x65, 0x6c, 0x06, 0x74, 0x72, 0x78, 0x5f, 0x69, 0x64, 0x13, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x69, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x07, 0x6f, 0x6e, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x00, 0x02, 0x09, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x07, 0x75, 0x69, 0x6e, 0x74, 0x31, 0x32, 0x38, 0x08, + 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x72, 0x78, 0x05, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x07, 0x6f, 0x6e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x00, 0x01, + 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x0c, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x0a, 0x00, 0x40, 0x9e, + 0x9a, 0x22, 0x64, 0xb8, 0x9a, 0x0a, 0x6e, 0x65, 0x77, 0x61, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x00, 0x00, 0x00, 0x00, 0x40, 0x25, 0x8a, 0xb2, + 0xc2, 0x07, 0x73, 0x65, 0x74, 0x63, 0x6f, 0x64, 0x65, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xb8, 0x63, 0xb2, 0xc2, 0x06, 0x73, 0x65, 0x74, 0x61, 0x62, + 0x69, 0x00, 0x00, 0x40, 0xcb, 0xda, 0xa8, 0x6c, 0x52, 0xd5, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x61, 0x75, 0x74, 0x68, 0x00, 0x00, 0x40, + 0xcb, 0xda, 0xa8, 0xac, 0xa2, 0x4a, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x61, 0x75, 0x74, 0x68, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x6b, 0x03, + 0xa7, 0x8b, 0x08, 0x6c, 0x69, 0x6e, 0x6b, 0x61, 0x75, 0x74, 0x68, 0x00, + 0x00, 0x40, 0xcb, 0xda, 0xc0, 0xe9, 0xe2, 0xd4, 0x0a, 0x75, 0x6e, 0x6c, + 0x69, 0x6e, 0x6b, 0x61, 0x75, 0x74, 0x68, 0x00, 0x00, 0xbc, 0x89, 0x2a, + 0x45, 0x85, 0xa6, 0x41, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xd2, 0x7b, 0xd5, + 0xa4, 0x07, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x22, 0x1a, 0xcf, 0xa4, 0x07, 0x6f, 0x6e, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +} } /// eosio::chain diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index 749ec6404d5..5fe1c05118e 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -18,13 +19,17 @@ namespace eosio { namespace chain { const uint32_t fork_database::min_supported_version = 1; const uint32_t fork_database::max_supported_version = 1; + // work around block_state::is_valid being private + inline bool block_state_is_valid( const block_state& bs ) { + return bs.is_valid(); + } + /** * History: * Version 1: initial version of the new refactored fork database portable format */ struct by_block_id; - struct by_block_num; struct by_lib_block_num; struct by_prev; typedef multi_index_container< @@ -34,7 +39,7 @@ namespace eosio { namespace chain { ordered_non_unique< tag, const_mem_fun >, ordered_unique< tag, composite_key< block_state, - member, + global_fun, member, member, member @@ -125,12 +130,7 @@ namespace eosio { namespace chain { for( uint32_t i = 0, n = size.value; i < n; ++i ) { block_state s; fc::raw::unpack( ds, s ); - for( const auto& receipt : s.block->transactions ) { - if( receipt.trx.contains() ) { - const auto& pt = receipt.trx.get(); - s.trxs.push_back( std::make_shared( std::make_shared(pt) ) ); - } - } + // do not populate transaction_metadatas, they will be created as needed in apply_block with appropriate key recovery s.header_exts = s.block->validate_and_extract_header_extensions(); my->add( std::make_shared( move( s ) ), false, true, validator ); } @@ -314,8 +314,8 @@ namespace eosio { namespace chain { try { const auto& exts = n->header_exts; - if( exts.size() > 0 ) { - const auto& new_protocol_features = exts.front().get().protocol_features; + if( exts.count(protocol_feature_activation::extension_id()) > 0 ) { + const auto& new_protocol_features = exts.lower_bound(protocol_feature_activation::extension_id())->second.get().protocol_features; validator( n->header.timestamp, prev_bh->activated_protocol_features->protocol_features, new_protocol_features ); } } EOS_RETHROW_EXCEPTIONS( fork_database_exception, "serialized fork database is incompatible with configured protocol features" ) diff --git a/libraries/chain/genesis_intrinsics.cpp b/libraries/chain/genesis_intrinsics.cpp index be6077acbb1..8a0c13ec165 100644 --- a/libraries/chain/genesis_intrinsics.cpp +++ b/libraries/chain/genesis_intrinsics.cpp @@ -1,8 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ - #include namespace eosio { namespace chain { diff --git a/libraries/chain/genesis_state.cpp b/libraries/chain/genesis_state.cpp index 23d8986e5bd..3a6e1650b70 100644 --- a/libraries/chain/genesis_state.cpp +++ b/libraries/chain/genesis_state.cpp @@ -1,8 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ - #include // these are required to serialize a genesis_state diff --git a/libraries/chain/genesis_state_root_key.cpp.in b/libraries/chain/genesis_state_root_key.cpp.in index 11bbde26397..91c751519ef 100644 --- a/libraries/chain/genesis_state_root_key.cpp.in +++ b/libraries/chain/genesis_state_root_key.cpp.in @@ -1,8 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ - #include namespace eosio { namespace chain { diff --git a/libraries/chain/include/eosio/chain/abi_def.hpp b/libraries/chain/include/eosio/chain/abi_def.hpp index 9b1d211d623..dcace005520 100644 --- a/libraries/chain/include/eosio/chain/abi_def.hpp +++ b/libraries/chain/include/eosio/chain/abi_def.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include @@ -129,6 +125,8 @@ struct abi_def { abi_def eosio_contract_abi(const abi_def& eosio_system_abi); vector common_type_defs(); +extern unsigned char eosio_abi_bin[2132]; + } } /// namespace eosio::chain namespace fc { diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index 9c3ba8368de..c8c2efc1c8d 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include @@ -377,6 +373,8 @@ namespace impl { /** * overload of to_variant_object for actions + * + * This matches the FC_REFLECT for this type, but this is provided to extract the contents of act.data * @tparam Resolver * @param act * @param resolver @@ -385,6 +383,7 @@ namespace impl { template static void add( mutable_variant_object &out, const char* name, const action& act, Resolver resolver, abi_traverse_context& ctx ) { + static_assert(fc::reflector::total_member_count == 4); auto h = ctx.enter_scope(); mutable_variant_object mvo; mvo("account", act.account); @@ -419,6 +418,8 @@ namespace impl { /** * overload of to_variant_object for packed_transaction + * + * This matches the FC_REFLECT for this type, but this is provided to allow extracting the contents of ptrx.transaction * @tparam Resolver * @param act * @param resolver @@ -427,6 +428,7 @@ namespace impl { template static void add( mutable_variant_object &out, const char* name, const packed_transaction& ptrx, Resolver resolver, abi_traverse_context& ctx ) { + static_assert(fc::reflector::total_member_count == 4); auto h = ctx.enter_scope(); mutable_variant_object mvo; auto trx = ptrx.get_transaction(); @@ -440,6 +442,88 @@ namespace impl { out(name, std::move(mvo)); } + + /** + * overload of to_variant_object for transaction + * + * This matches the FC_REFLECT for this type, but this is provided to allow extracting the contents of trx.transaction_extensions + */ + template + static void add( mutable_variant_object &out, const char* name, const transaction& trx, Resolver resolver, abi_traverse_context& ctx ) + { + static_assert(fc::reflector::total_member_count == 9); + auto h = ctx.enter_scope(); + mutable_variant_object mvo; + mvo("expiration", trx.expiration); + mvo("ref_block_num", trx.ref_block_num); + mvo("ref_block_prefix", trx.ref_block_prefix); + mvo("max_net_usage_words", trx.max_net_usage_words); + mvo("max_cpu_usage_ms", trx.max_cpu_usage_ms); + mvo("delay_sec", trx.delay_sec); + add(mvo, "context_free_actions", trx.context_free_actions, resolver, ctx); + add(mvo, "actions", trx.actions, resolver, ctx); + + // process contents of block.transaction_extensions + auto exts = trx.validate_and_extract_extensions(); + if (exts.count(deferred_transaction_generation_context::extension_id()) > 0) { + const auto& deferred_transaction_generation = exts.lower_bound(deferred_transaction_generation_context::extension_id())->second.get(); + mvo("deferred_transaction_generation", deferred_transaction_generation); + } + + out(name, std::move(mvo)); + } + + /** + * overload of to_variant_object for signed_block + * + * This matches the FC_REFLECT for this type, but this is provided to allow extracting the contents of + * block.header_extensions and block.block_extensions + */ + template + static void add( mutable_variant_object &out, const char* name, const signed_block& block, Resolver resolver, abi_traverse_context& ctx ) + { + static_assert(fc::reflector::total_member_count == 12); + auto h = ctx.enter_scope(); + mutable_variant_object mvo; + mvo("timestamp", block.timestamp); + mvo("producer", block.producer); + mvo("confirmed", block.confirmed); + mvo("previous", block.previous); + mvo("transaction_mroot", block.transaction_mroot); + mvo("action_mroot", block.action_mroot); + mvo("schedule_version", block.schedule_version); + mvo("new_producers", block.new_producers); + + // process contents of block.header_extensions + flat_multimap header_exts = block.validate_and_extract_header_extensions(); + if ( header_exts.count(protocol_feature_activation::extension_id() > 0) ) { + const auto& new_protocol_features = header_exts.lower_bound(protocol_feature_activation::extension_id())->second.get().protocol_features; + vector pf_array; + pf_array.reserve(new_protocol_features.size()); + for (auto feature : new_protocol_features) { + mutable_variant_object feature_mvo; + add(feature_mvo, "feature_digest", feature, resolver, ctx); + pf_array.push_back(feature_mvo); + } + mvo("new_protocol_features", pf_array); + } + if ( header_exts.count(producer_schedule_change_extension::extension_id())) { + const auto& new_producer_schedule = header_exts.lower_bound(producer_schedule_change_extension::extension_id())->second.get(); + mvo("new_producer_schedule", new_producer_schedule); + } + + mvo("producer_signature", block.producer_signature); + add(mvo, "transactions", block.transactions, resolver, ctx); + + // process contents of block.block_extensions + auto block_exts = block.validate_and_extract_extensions(); + if ( block_exts.count(additional_block_signatures_extension::extension_id()) > 0) { + const auto& additional_signatures = block_exts.lower_bound(additional_block_signatures_extension::extension_id())->second.get(); + mvo("additional_signatures", additional_signatures); + } + + out(name, std::move(mvo)); + } }; /** diff --git a/libraries/chain/include/eosio/chain/account_object.hpp b/libraries/chain/include/eosio/chain/account_object.hpp index 235c3b91284..8c8da161a24 100644 --- a/libraries/chain/include/eosio/chain/account_object.hpp +++ b/libraries/chain/include/eosio/chain/account_object.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include diff --git a/libraries/chain/include/eosio/chain/action.hpp b/libraries/chain/include/eosio/chain/action.hpp index 9912e2f31b7..1e593447955 100644 --- a/libraries/chain/include/eosio/chain/action.hpp +++ b/libraries/chain/include/eosio/chain/action.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/libraries/chain/include/eosio/chain/action_receipt.hpp b/libraries/chain/include/eosio/chain/action_receipt.hpp index 17fe4f46713..78ef25c7c00 100644 --- a/libraries/chain/include/eosio/chain/action_receipt.hpp +++ b/libraries/chain/include/eosio/chain/action_receipt.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index ce7ae2bdae7..3a5889248bf 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include @@ -184,7 +180,7 @@ class apply_context { // context.require_write_lock( scope ); - const auto& tab = context.find_or_create_table( context.receiver, scope, table, payer ); + const auto& tab = context.find_or_create_table( context.receiver, name(scope), name(table), payer ); const auto& obj = context.db.create( [&]( auto& o ){ o.t_id = tab.id; @@ -248,7 +244,7 @@ class apply_context { } int find_secondary( uint64_t code, uint64_t scope, uint64_t table, secondary_key_proxy_const_type secondary, uint64_t& primary ) { - auto tab = context.find_table( code, scope, table ); + auto tab = context.find_table( name(code), name(scope), name(table) ); if( !tab ) return -1; auto table_end_itr = itr_cache.cache_table( *tab ); @@ -262,7 +258,7 @@ class apply_context { } int lowerbound_secondary( uint64_t code, uint64_t scope, uint64_t table, secondary_key_proxy_type secondary, uint64_t& primary ) { - auto tab = context.find_table( code, scope, table ); + auto tab = context.find_table( name(code), name(scope), name(table) ); if( !tab ) return -1; auto table_end_itr = itr_cache.cache_table( *tab ); @@ -279,7 +275,7 @@ class apply_context { } int upperbound_secondary( uint64_t code, uint64_t scope, uint64_t table, secondary_key_proxy_type secondary, uint64_t& primary ) { - auto tab = context.find_table( code, scope, table ); + auto tab = context.find_table( name(code), name(scope), name(table) ); if( !tab ) return -1; auto table_end_itr = itr_cache.cache_table( *tab ); @@ -296,7 +292,7 @@ class apply_context { } int end_secondary( uint64_t code, uint64_t scope, uint64_t table ) { - auto tab = context.find_table( code, scope, table ); + auto tab = context.find_table( name(code), name(scope), name(table) ); if( !tab ) return -1; return itr_cache.cache_table( *tab ); @@ -350,7 +346,7 @@ class apply_context { } int find_primary( uint64_t code, uint64_t scope, uint64_t table, secondary_key_proxy_type secondary, uint64_t primary ) { - auto tab = context.find_table( code, scope, table ); + auto tab = context.find_table( name(code), name(scope), name(table) ); if( !tab ) return -1; auto table_end_itr = itr_cache.cache_table( *tab ); @@ -363,7 +359,7 @@ class apply_context { } int lowerbound_primary( uint64_t code, uint64_t scope, uint64_t table, uint64_t primary ) { - auto tab = context.find_table( code, scope, table ); + auto tab = context.find_table( name(code), name(scope), name(table) ); if (!tab) return -1; auto table_end_itr = itr_cache.cache_table( *tab ); @@ -377,7 +373,7 @@ class apply_context { } int upperbound_primary( uint64_t code, uint64_t scope, uint64_t table, uint64_t primary ) { - auto tab = context.find_table( code, scope, table ); + auto tab = context.find_table( name(code), name(scope), name(table) ); if ( !tab ) return -1; auto table_end_itr = itr_cache.cache_table( *tab ); @@ -514,16 +510,16 @@ class apply_context { void update_db_usage( const account_name& payer, int64_t delta ); - int db_store_i64( uint64_t scope, uint64_t table, const account_name& payer, uint64_t id, const char* buffer, size_t buffer_size ); + int db_store_i64( name scope, name table, const account_name& payer, uint64_t id, const char* buffer, size_t buffer_size ); void db_update_i64( int iterator, account_name payer, const char* buffer, size_t buffer_size ); void db_remove_i64( int iterator ); int db_get_i64( int iterator, char* buffer, size_t buffer_size ); int db_next_i64( int iterator, uint64_t& primary ); int db_previous_i64( int iterator, uint64_t& primary ); - int db_find_i64( uint64_t code, uint64_t scope, uint64_t table, uint64_t id ); - int db_lowerbound_i64( uint64_t code, uint64_t scope, uint64_t table, uint64_t id ); - int db_upperbound_i64( uint64_t code, uint64_t scope, uint64_t table, uint64_t id ); - int db_end_i64( uint64_t code, uint64_t scope, uint64_t table ); + int db_find_i64( name code, name scope, name table, uint64_t id ); + int db_lowerbound_i64( name code, name scope, name table, uint64_t id ); + int db_upperbound_i64( name code, name scope, name table, uint64_t id ); + int db_end_i64( name code, name scope, name table ); private: @@ -531,12 +527,13 @@ class apply_context { const table_id_object& find_or_create_table( name code, name scope, name table, const account_name &payer ); void remove_table( const table_id_object& tid ); - int db_store_i64( uint64_t code, uint64_t scope, uint64_t table, const account_name& payer, uint64_t id, const char* buffer, size_t buffer_size ); + int db_store_i64( name code, name scope, name table, const account_name& payer, uint64_t id, const char* buffer, size_t buffer_size ); /// Misc methods: public: + int get_action( uint32_t type, uint32_t index, char* buffer, size_t buffer_size )const; int get_context_free_data( uint32_t index, char* buffer, size_t buffer_size )const; vector get_active_producers() const; diff --git a/libraries/chain/include/eosio/chain/asset.hpp b/libraries/chain/include/eosio/chain/asset.hpp index 85222652a02..dd427e3976d 100644 --- a/libraries/chain/include/eosio/chain/asset.hpp +++ b/libraries/chain/include/eosio/chain/asset.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include @@ -114,5 +110,19 @@ inline void from_variant(const fc::variant& var, eosio::chain::asset& vo) { } } +namespace fc { +inline void from_variant(const fc::variant& var, eosio::chain::extended_asset& vo) { + if( var.is_array() ) { + const auto& va = var.get_array(); + from_variant(va.at(0), vo.quantity); + from_variant(va.at(1), vo.contract); + } else { + const auto& vars = var.get_object(); + from_variant(vars["quantity"], vo.quantity); + from_variant(vars["contract"], vo.contract); + } +} +} + FC_REFLECT(eosio::chain::asset, (amount)(sym)) FC_REFLECT(eosio::chain::extended_asset, (quantity)(contract) ) diff --git a/libraries/chain/include/eosio/chain/authority.hpp b/libraries/chain/include/eosio/chain/authority.hpp index e7a7307bb56..492fd9b404f 100644 --- a/libraries/chain/include/eosio/chain/authority.hpp +++ b/libraries/chain/include/eosio/chain/authority.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include @@ -11,6 +7,75 @@ namespace eosio { namespace chain { +using shared_public_key_data = fc::static_variant; + +struct shared_public_key { + shared_public_key( shared_public_key_data&& p ) : + pubkey(std::move(p)) {} + + operator public_key_type() const { + fc::crypto::public_key::storage_type public_key_storage; + pubkey.visit(overloaded { + [&](const auto& k1r1) { + public_key_storage = k1r1; + }, + [&](const shared_string& wa) { + fc::datastream ds(wa.data(), wa.size()); + fc::crypto::webauthn::public_key pub; + fc::raw::unpack(ds, pub); + public_key_storage = pub; + } + }); + return std::move(public_key_storage); + } + + operator string() const { + return (string)this->operator public_key_type(); + } + + shared_public_key_data pubkey; + + friend bool operator == ( const shared_public_key& lhs, const shared_public_key& rhs ) { + if(lhs.pubkey.which() != rhs.pubkey.which()) + return false; + + return lhs.pubkey.visit(overloaded { + [&](const fc::ecc::public_key_shim& k1) { + return k1._data == rhs.pubkey.get()._data; + }, + [&](const fc::crypto::r1::public_key_shim& r1) { + return r1._data == rhs.pubkey.get()._data; + }, + [&](const shared_string& wa) { + return wa == rhs.pubkey.get(); + } + }); + } + + friend bool operator==(const shared_public_key& l, const public_key_type& r) { + if(l.pubkey.which() != r._storage.which()) + return false; + + return l.pubkey.visit(overloaded { + [&](const fc::ecc::public_key_shim& k1) { + return k1._data == r._storage.get()._data; + }, + [&](const fc::crypto::r1::public_key_shim& r1) { + return r1._data == r._storage.get()._data; + }, + [&](const shared_string& wa) { + fc::datastream ds(wa.data(), wa.size()); + fc::crypto::webauthn::public_key pub; + fc::raw::unpack(ds, pub); + return pub == r._storage.get(); + } + }); + } + + friend bool operator==(const public_key_type& l, const shared_public_key& r) { + return r == l; + } +}; struct permission_level_weight { permission_level permission; @@ -30,6 +95,39 @@ struct key_weight { } }; + +struct shared_key_weight { + shared_key_weight(shared_public_key_data&& k, const weight_type& w) : + key(std::move(k)), weight(w) {} + + operator key_weight() const { + return key_weight{key, weight}; + } + + static shared_key_weight convert(chainbase::allocator allocator, const key_weight& k) { + return k.key._storage.visit(overloaded { + [&](const auto& k1r1) { + return shared_key_weight(k1r1, k.weight); + }, + [&](const fc::crypto::webauthn::public_key& wa) { + size_t psz = fc::raw::pack_size(wa); + shared_string wa_ss(psz, boost::container::default_init, std::move(allocator)); + fc::datastream ds(wa_ss.data(), wa_ss.size()); + fc::raw::pack(ds, wa); + + return shared_key_weight(std::move(wa_ss), k.weight); + } + }); + } + + shared_public_key key; + weight_type weight; + + friend bool operator == ( const shared_key_weight& lhs, const shared_key_weight& rhs ) { + return tie( lhs.key, lhs.weight ) == tie( rhs.key, rhs.weight ); + } +}; + struct wait_weight { uint32_t wait_sec; weight_type weight; @@ -100,14 +198,18 @@ struct shared_authority { shared_authority& operator=(const authority& a) { threshold = a.threshold; - keys = decltype(keys)(a.keys.begin(), a.keys.end(), keys.get_allocator()); + keys.clear(); + keys.reserve(a.keys.size()); + for(const key_weight& k : a.keys) { + keys.emplace_back(shared_key_weight::convert(keys.get_allocator(), k)); + } accounts = decltype(accounts)(a.accounts.begin(), a.accounts.end(), accounts.get_allocator()); waits = decltype(waits)(a.waits.begin(), a.waits.end(), waits.get_allocator()); return *this; } uint32_t threshold = 0; - shared_vector keys; + shared_vector keys; shared_vector accounts; shared_vector waits; @@ -202,4 +304,6 @@ FC_REFLECT(eosio::chain::permission_level_weight, (permission)(weight) ) FC_REFLECT(eosio::chain::key_weight, (key)(weight) ) FC_REFLECT(eosio::chain::wait_weight, (wait_sec)(weight) ) FC_REFLECT(eosio::chain::authority, (threshold)(keys)(accounts)(waits) ) +FC_REFLECT(eosio::chain::shared_key_weight, (key)(weight) ) FC_REFLECT(eosio::chain::shared_authority, (threshold)(keys)(accounts)(waits) ) +FC_REFLECT(eosio::chain::shared_public_key, (pubkey)) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/authority_checker.hpp b/libraries/chain/include/eosio/chain/authority_checker.hpp index f17eeac10bf..e90bd5a49c3 100644 --- a/libraries/chain/include/eosio/chain/authority_checker.hpp +++ b/libraries/chain/include/eosio/chain/authority_checker.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include @@ -19,31 +15,9 @@ namespace eosio { namespace chain { namespace detail { - - // Order of the template types in the static_variant matters to meta_permission_comparator. - using meta_permission = static_variant; - - struct get_weight_visitor { - using result_type = uint32_t; - - template - uint32_t operator()( const Permission& permission ) { return permission.weight; } - }; - - // Orders permissions descending by weight, and breaks ties with Wait permissions being less than - // Key permissions which are in turn less than Account permissions - struct meta_permission_comparator { - bool operator()( const meta_permission& lhs, const meta_permission& rhs ) const { - get_weight_visitor scale; - auto lhs_weight = lhs.visit(scale); - auto lhs_type = lhs.which(); - auto rhs_weight = rhs.visit(scale); - auto rhs_type = rhs.which(); - return std::tie( lhs_weight, lhs_type ) > std::tie( rhs_weight, rhs_type ); - } - }; - - using meta_permission_set = boost::container::flat_multiset; + using meta_permission_key = std::tuple; + using meta_permission_value = std::function; + using meta_permission_map = boost::container::flat_multimap>; } /// namespace detail @@ -186,17 +160,27 @@ namespace detail { }); // Sort key permissions and account permissions together into a single set of meta_permissions - detail::meta_permission_set permissions; + detail::meta_permission_map permissions; - permissions.insert(authority.waits.begin(), authority.waits.end()); - permissions.insert(authority.keys.begin(), authority.keys.end()); - permissions.insert(authority.accounts.begin(), authority.accounts.end()); + weight_tally_visitor visitor(*this, cached_permissions, depth); + auto emplace_permission = [&permissions, &visitor](int priority, const auto& mp) { + permissions.emplace( + std::make_tuple(mp.weight, priority), + [&mp, &visitor]() { + return visitor(mp); + } + ); + }; + + permissions.reserve(authority.waits.size() + authority.keys.size() + authority.accounts.size()); + std::for_each(authority.accounts.begin(), authority.accounts.end(), boost::bind(emplace_permission, 1, _1)); + std::for_each(authority.keys.begin(), authority.keys.end(), boost::bind(emplace_permission, 2, _1)); + std::for_each(authority.waits.begin(), authority.waits.end(), boost::bind(emplace_permission, 3, _1)); // Check all permissions, from highest weight to lowest, seeing if provided authorization factors satisfies them or not - weight_tally_visitor visitor(*this, cached_permissions, depth); - for( const auto& permission : permissions ) + for( const auto& p: permissions ) // If we've got enough weight, to satisfy the authority, return! - if( permission.visit(visitor) >= authority.threshold ) { + if( p.second() >= authority.threshold ) { KeyReverter.cancel(); return true; } @@ -224,7 +208,8 @@ namespace detail { return total_weight; } - uint32_t operator()(const key_weight& permission) { + template>> + uint32_t operator()(const KeyWeight& permission) { auto itr = boost::find( checker.provided_keys, permission.key ); if( itr != checker.provided_keys.end() ) { checker._used_keys[itr - checker.provided_keys.begin()] = true; diff --git a/libraries/chain/include/eosio/chain/authorization_manager.hpp b/libraries/chain/include/eosio/chain/authorization_manager.hpp index 3e9ffd0bb42..2d67cc7adab 100644 --- a/libraries/chain/include/eosio/chain/authorization_manager.hpp +++ b/libraries/chain/include/eosio/chain/authorization_manager.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/libraries/chain/include/eosio/chain/block.hpp b/libraries/chain/include/eosio/chain/block.hpp index eee489e2961..6229c6760c0 100644 --- a/libraries/chain/include/eosio/chain/block.hpp +++ b/libraries/chain/include/eosio/chain/block.hpp @@ -51,13 +51,45 @@ namespace eosio { namespace chain { } }; + struct additional_block_signatures_extension : fc::reflect_init { + static constexpr uint16_t extension_id() { return 2; } + static constexpr bool enforce_unique() { return true; } + + additional_block_signatures_extension() = default; + + additional_block_signatures_extension( const vector& signatures ) + :signatures( signatures ) + {} + + additional_block_signatures_extension( vector&& signatures ) + :signatures( std::move(signatures) ) + {} + + void reflector_init(); + + vector signatures; + }; + + namespace detail { + template + struct block_extension_types { + using block_extension_t = fc::static_variant< Ts... >; + using decompose_t = decompose< Ts... >; + }; + } + + using block_extension_types = detail::block_extension_types< + additional_block_signatures_extension + >; + + using block_extension = block_extension_types::block_extension_t; /** */ - struct signed_block : public signed_block_header { - private: + struct signed_block : public signed_block_header{ + private: signed_block( const signed_block& ) = default; - public: + public: signed_block() = default; explicit signed_block( const signed_block_header& h ):signed_block_header(h){} signed_block( signed_block&& ) = default; @@ -66,6 +98,8 @@ namespace eosio { namespace chain { vector transactions; /// new or generated transactions extensions_type block_extensions; + + flat_multimap validate_and_extract_extensions()const; }; using signed_block_ptr = std::shared_ptr; @@ -83,4 +117,5 @@ FC_REFLECT_ENUM( eosio::chain::transaction_receipt::status_enum, FC_REFLECT(eosio::chain::transaction_receipt_header, (status)(cpu_usage_us)(net_usage_words) ) FC_REFLECT_DERIVED(eosio::chain::transaction_receipt, (eosio::chain::transaction_receipt_header), (trx) ) +FC_REFLECT(eosio::chain::additional_block_signatures_extension, (signatures)); FC_REFLECT_DERIVED(eosio::chain::signed_block, (eosio::chain::signed_block_header), (transactions)(block_extensions) ) diff --git a/libraries/chain/include/eosio/chain/block_header.hpp b/libraries/chain/include/eosio/chain/block_header.hpp index fc751826d95..286ecf528c1 100644 --- a/libraries/chain/include/eosio/chain/block_header.hpp +++ b/libraries/chain/include/eosio/chain/block_header.hpp @@ -10,16 +10,17 @@ namespace eosio { namespace chain { namespace detail { template struct block_header_extension_types { - using block_header_extensions_t = fc::static_variant< Ts... >; + using block_header_extension_t = fc::static_variant< Ts... >; using decompose_t = decompose< Ts... >; }; } using block_header_extension_types = detail::block_header_extension_types< - protocol_feature_activation + protocol_feature_activation, + producer_schedule_change_extension >; - using block_header_extensions = block_header_extension_types::block_header_extensions_t; + using block_header_extension = block_header_extension_types::block_header_extension_t; struct block_header { @@ -42,13 +43,20 @@ namespace eosio { namespace chain { checksum256_type transaction_mroot; /// mroot of cycles_summary checksum256_type action_mroot; /// mroot of all delivered action receipts - - /** The producer schedule version that should validate this block, this is used to + /** + * LEGACY SUPPORT - After enabling the wtmsig-blocks extension this field is deprecated and must be empty + * + * Prior to that activation this carries: + * + * The producer schedule version that should validate this block, this is used to * indicate that the prior block which included new_producers->version has been marked * irreversible and that it the new producer schedule takes effect this block. */ + + using new_producers_type = optional; + uint32_t schedule_version = 0; - optional new_producers; + new_producers_type new_producers; extensions_type header_extensions; @@ -59,7 +67,7 @@ namespace eosio { namespace chain { uint32_t block_num() const { return num_from_id(previous) + 1; } static uint32_t num_from_id(const block_id_type& id); - vector validate_and_extract_header_extensions()const; + flat_multimap validate_and_extract_header_extensions()const; }; diff --git a/libraries/chain/include/eosio/chain/block_header_state.hpp b/libraries/chain/include/eosio/chain/block_header_state.hpp index 41e19253138..956d66ba422 100644 --- a/libraries/chain/include/eosio/chain/block_header_state.hpp +++ b/libraries/chain/include/eosio/chain/block_header_state.hpp @@ -1,10 +1,50 @@ #pragma once #include #include +#include +#include #include namespace eosio { namespace chain { +namespace legacy { + + /** + * a fc::raw::unpack compatible version of the old block_state structure stored in + * version 2 snapshots + */ + struct snapshot_block_header_state_v2 { + static constexpr uint32_t minimum_version = 0; + static constexpr uint32_t maximum_version = 2; + static_assert(chain_snapshot_header::minimum_compatible_version <= maximum_version, "snapshot_block_header_state_v2 is no longer needed"); + + struct schedule_info { + uint32_t schedule_lib_num = 0; /// last irr block num + digest_type schedule_hash; + producer_schedule_type schedule; + }; + + /// from block_header_state_common + uint32_t block_num = 0; + uint32_t dpos_proposed_irreversible_blocknum = 0; + uint32_t dpos_irreversible_blocknum = 0; + producer_schedule_type active_schedule; + incremental_merkle blockroot_merkle; + flat_map producer_to_last_produced; + flat_map producer_to_last_implied_irb; + public_key_type block_signing_key; + vector confirm_count; + + // from block_header_state + block_id_type id; + signed_block_header header; + schedule_info pending_schedule; + protocol_feature_activation_set_ptr activated_protocol_features; + }; +} + +using signer_callback_type = std::function(const digest_type&)>; + struct block_header_state; namespace detail { @@ -12,19 +52,23 @@ namespace detail { uint32_t block_num = 0; uint32_t dpos_proposed_irreversible_blocknum = 0; uint32_t dpos_irreversible_blocknum = 0; - producer_schedule_type active_schedule; + producer_authority_schedule active_schedule; incremental_merkle blockroot_merkle; flat_map producer_to_last_produced; flat_map producer_to_last_implied_irb; - public_key_type block_signing_key; + block_signing_authority valid_block_signing_authority; vector confirm_count; }; struct schedule_info { uint32_t schedule_lib_num = 0; /// last irr block num digest_type schedule_hash; - producer_schedule_type schedule; + producer_authority_schedule schedule; }; + + bool is_builtin_activated( const protocol_feature_activation_set_ptr& pfa, + const protocol_feature_set& pfs, + builtin_protocol_feature_t feature_codename ); } struct pending_block_header_state : public detail::block_header_state_common { @@ -39,29 +83,33 @@ struct pending_block_header_state : public detail::block_header_state_common { signed_block_header make_block_header( const checksum256_type& transaction_mroot, const checksum256_type& action_mroot, - optional&& new_producers, - vector&& new_protocol_feature_activations )const; + const optional& new_producers, + vector&& new_protocol_feature_activations, + const protocol_feature_set& pfs)const; block_header_state finish_next( const signed_block_header& h, + vector&& additional_signatures, + const protocol_feature_set& pfs, const std::function&, const vector& )>& validator, bool skip_validate_signee = false )&&; block_header_state finish_next( signed_block_header& h, + const protocol_feature_set& pfs, const std::function&, const vector& )>& validator, - const std::function& signer )&&; + const signer_callback_type& signer )&&; protected: block_header_state _finish_next( const signed_block_header& h, + const protocol_feature_set& pfs, const std::function&, const vector& )>& validator )&&; }; - /** * @struct block_header_state * @brief defines the minimum state necessary to validate transaction headers @@ -71,10 +119,11 @@ struct block_header_state : public detail::block_header_state_common { signed_block_header header; detail::schedule_info pending_schedule; protocol_feature_activation_set_ptr activated_protocol_features; + vector additional_signatures; /// this data is redundant with the data stored in header, but it acts as a cache that avoids /// duplication of work - vector header_exts; + flat_multimap header_exts; block_header_state() = default; @@ -82,9 +131,13 @@ struct block_header_state : public detail::block_header_state_common { :detail::block_header_state_common( std::move(base) ) {} + explicit block_header_state( legacy::snapshot_block_header_state_v2&& snapshot ); + pending_block_header_state next( block_timestamp_type when, uint16_t num_prev_blocks_to_confirm )const; block_header_state next( const signed_block_header& h, + vector&& additional_signatures, + const protocol_feature_set& pfs, const std::function&, const vector& )>& validator, @@ -92,14 +145,12 @@ struct block_header_state : public detail::block_header_state_common { bool has_pending_producers()const { return pending_schedule.schedule.producers.size(); } uint32_t calc_dpos_last_irreversible( account_name producer_of_next_block )const; - bool is_active_producer( account_name n )const; - producer_key get_scheduled_producer( block_timestamp_type t )const; - const block_id_type& prev()const { return header.previous; } - digest_type sig_digest()const; - void sign( const std::function& signer ); - public_key_type signee()const; - void verify_signee(const public_key_type& signee)const; + producer_authority get_scheduled_producer( block_timestamp_type t )const; + const block_id_type& prev()const { return header.previous; } + digest_type sig_digest()const; + void sign( const signer_callback_type& signer ); + void verify_signee()const; const vector& get_new_protocol_feature_activations()const; }; @@ -116,7 +167,7 @@ FC_REFLECT( eosio::chain::detail::block_header_state_common, (blockroot_merkle) (producer_to_last_produced) (producer_to_last_implied_irb) - (block_signing_key) + (valid_block_signing_authority) (confirm_count) ) @@ -131,4 +182,29 @@ FC_REFLECT_DERIVED( eosio::chain::block_header_state, (eosio::chain::detail::bl (header) (pending_schedule) (activated_protocol_features) + (additional_signatures) +) + + +FC_REFLECT( eosio::chain::legacy::snapshot_block_header_state_v2::schedule_info, + ( schedule_lib_num ) + ( schedule_hash ) + ( schedule ) +) + + +FC_REFLECT( eosio::chain::legacy::snapshot_block_header_state_v2, + ( block_num ) + ( dpos_proposed_irreversible_blocknum ) + ( dpos_irreversible_blocknum ) + ( active_schedule ) + ( blockroot_merkle ) + ( producer_to_last_produced ) + ( producer_to_last_implied_irb ) + ( block_signing_key ) + ( confirm_count ) + ( id ) + ( header ) + ( pending_schedule ) + ( activated_protocol_features ) ) diff --git a/libraries/chain/include/eosio/chain/block_log.hpp b/libraries/chain/include/eosio/chain/block_log.hpp index 26e1dcb41fa..5fbe3e771a5 100644 --- a/libraries/chain/include/eosio/chain/block_log.hpp +++ b/libraries/chain/include/eosio/chain/block_log.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include @@ -44,10 +40,13 @@ namespace eosio { namespace chain { uint64_t append(const signed_block_ptr& b); void flush(); - void reset( const genesis_state& gs, const signed_block_ptr& genesis_block, uint32_t first_block_num = 1 ); + void reset( const genesis_state& gs, const signed_block_ptr& genesis_block ); + void reset( const chain_id_type& chain_id, uint32_t first_block_num ); - std::pair read_block(uint64_t file_pos)const; + signed_block_ptr read_block(uint64_t file_pos)const; + void read_block_header(block_header& bh, uint64_t file_pos)const; signed_block_ptr read_block_by_num(uint32_t block_num)const; + block_id_type read_block_id_by_num(uint32_t block_num)const; signed_block_ptr read_block_by_id(const block_id_type& id)const { return read_block_by_num(block_header::num_from_id(id)); } @@ -58,6 +57,7 @@ namespace eosio { namespace chain { uint64_t get_block_pos(uint32_t block_num) const; signed_block_ptr read_head()const; const signed_block_ptr& head()const; + const block_id_type& head_id()const; uint32_t first_block_num() const; static const uint64_t npos = std::numeric_limits::max(); @@ -67,13 +67,48 @@ namespace eosio { namespace chain { static fc::path repair_log( const fc::path& data_dir, uint32_t truncate_at_block = 0 ); - static genesis_state extract_genesis_state( const fc::path& data_dir ); + static fc::optional extract_genesis_state( const fc::path& data_dir ); - private: + static chain_id_type extract_chain_id( const fc::path& data_dir ); + + static void construct_index(const fc::path& block_file_name, const fc::path& index_file_name); + + static bool contains_genesis_state(uint32_t version, uint32_t first_block_num); + + static bool contains_chain_id(uint32_t version, uint32_t first_block_num); + + static bool is_supported_version(uint32_t version); + + static bool trim_blocklog_front(const fc::path& block_dir, const fc::path& temp_dir, uint32_t truncate_at_block); + + private: void open(const fc::path& data_dir); void construct_index(); std::unique_ptr my; }; +//to derive blknum_offset==14 see block_header.hpp and note on disk struct is packed +// block_timestamp_type timestamp; //bytes 0:3 +// account_name producer; //bytes 4:11 +// uint16_t confirmed; //bytes 12:13 +// block_id_type previous; //bytes 14:45, low 4 bytes is big endian block number of previous block + + struct trim_data { //used by trim_blocklog_front(), trim_blocklog_end(), and smoke_test() + trim_data(fc::path block_dir); + ~trim_data(); + uint64_t block_index(uint32_t n) const; + uint64_t block_pos(uint32_t n); + fc::path block_file_name, index_file_name; //full pathname for blocks.log and blocks.index + uint32_t version = 0; //blocklog version + uint32_t first_block = 0; //first block in blocks.log + uint32_t last_block = 0; //last block in blocks.log + FILE* blk_in = nullptr; //C style files for reading blocks.log and blocks.index + FILE* ind_in = nullptr; //C style files for reading blocks.log and blocks.index + //we use low level file IO because it is distinctly faster than C++ filebuf or iostream + uint64_t first_block_pos = 0; //file position in blocks.log for the first block in the log + chain_id_type chain_id; + + static constexpr int blknum_offset{14}; //offset from start of block to 4 byte block number, valid for the only allowed versions + }; } } diff --git a/libraries/chain/include/eosio/chain/block_state.hpp b/libraries/chain/include/eosio/chain/block_state.hpp index e91161cf716..6385500e5d2 100644 --- a/libraries/chain/include/eosio/chain/block_state.hpp +++ b/libraries/chain/include/eosio/chain/block_state.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include @@ -14,6 +10,7 @@ namespace eosio { namespace chain { struct block_state : public block_header_state { block_state( const block_header_state& prev, signed_block_ptr b, + const protocol_feature_set& pfs, const std::function&, const vector& )>& validator, @@ -23,35 +20,52 @@ namespace eosio { namespace chain { block_state( pending_block_header_state&& cur, signed_block_ptr&& b, // unsigned block vector&& trx_metas, + const protocol_feature_set& pfs, const std::function&, const vector& )>& validator, - const std::function& signer + const signer_callback_type& signer ); - block_state( pending_block_header_state&& cur, - const signed_block_ptr& b, // signed block - vector&& trx_metas, - const std::function&, - const vector& )>& validator, - bool skip_validate_signee - ); - block_state() = default; + + signed_block_ptr block; + + private: // internal use only, not thread safe + friend struct fc::reflector; + friend bool block_state_is_valid( const block_state& ); // work-around for multi-index access + friend struct controller_impl; + friend class fork_database; + friend struct fork_database_impl; + friend class unapplied_transaction_queue; + friend struct pending_state; + bool is_valid()const { return validated; } + bool is_pub_keys_recovered()const { return _pub_keys_recovered; } + vector extract_trxs_metas() { + _pub_keys_recovered = false; + auto result = std::move( _cached_trxs ); + _cached_trxs.clear(); + return result; + } + void set_trxs_metas( vector&& trxs_metas, bool keys_recovered ) { + _pub_keys_recovered = keys_recovered; + _cached_trxs = std::move( trxs_metas ); + } + const vector& trxs_metas()const { return _cached_trxs; } - signed_block_ptr block; bool validated = false; + bool _pub_keys_recovered = false; /// this data is redundant with the data stored in block, but facilitates /// recapturing transactions when we pop a block - vector trxs; + vector _cached_trxs; }; using block_state_ptr = std::shared_ptr; + using branch_type = std::vector; } } /// namespace eosio::chain diff --git a/libraries/chain/include/eosio/chain/block_summary_object.hpp b/libraries/chain/include/eosio/chain/block_summary_object.hpp index 0bb24ee7976..7ec03910a1f 100644 --- a/libraries/chain/include/eosio/chain/block_summary_object.hpp +++ b/libraries/chain/include/eosio/chain/block_summary_object.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/libraries/chain/include/eosio/chain/chain_config.hpp b/libraries/chain/include/eosio/chain/chain_config.hpp index 37b73cb51fe..9e83b09779f 100644 --- a/libraries/chain/include/eosio/chain/chain_config.hpp +++ b/libraries/chain/include/eosio/chain/chain_config.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/libraries/chain/include/eosio/chain/chain_id_type.hpp b/libraries/chain/include/eosio/chain/chain_id_type.hpp index 59ab8f248b0..5a1cc52de86 100644 --- a/libraries/chain/include/eosio/chain/chain_id_type.hpp +++ b/libraries/chain/include/eosio/chain/chain_id_type.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include @@ -17,6 +13,8 @@ namespace eosio { class read_only; } + class chain_plugin; + namespace chain { struct chain_id_type : public fc::sha256 { @@ -47,6 +45,12 @@ namespace chain { friend class eosio::net_plugin_impl; friend struct eosio::handshake_message; + friend class block_log; + friend struct trim_data; + friend class controller; + friend struct controller_impl; + friend class global_property_object; + friend struct snapshot_global_property_object; }; } } // namespace eosio::chain diff --git a/libraries/chain/include/eosio/chain/chain_snapshot.hpp b/libraries/chain/include/eosio/chain/chain_snapshot.hpp index 5546b301999..a92e9be8695 100644 --- a/libraries/chain/include/eosio/chain/chain_snapshot.hpp +++ b/libraries/chain/include/eosio/chain/chain_snapshot.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include @@ -15,10 +11,15 @@ struct chain_snapshot_header { * 2: Updated chain snapshot for v1.8.0 initial protocol features release: * - Incompatible with version 1. * - Adds new indices for: protocol_state_object and account_ram_correction_object + * 3: Updated for v2.0.0 protocol features: + * - forwards compatible with version 2 + * - WebAuthn keys + * - wtmsig block siganatures: the block header state changed to include producer authorities and additional signatures + * - removed genesis_state and added chain ID to global_property_object */ static constexpr uint32_t minimum_compatible_version = 2; - static constexpr uint32_t current_version = 2; + static constexpr uint32_t current_version = 3; uint32_t version = current_version; diff --git a/libraries/chain/include/eosio/chain/code_object.hpp b/libraries/chain/include/eosio/chain/code_object.hpp index 3309704a335..043d9855d9b 100644 --- a/libraries/chain/include/eosio/chain/code_object.hpp +++ b/libraries/chain/include/eosio/chain/code_object.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include diff --git a/libraries/chain/include/eosio/chain/config.hpp b/libraries/chain/include/eosio/chain/config.hpp index d6e1a8121f9..2931ba1053c 100644 --- a/libraries/chain/include/eosio/chain/config.hpp +++ b/libraries/chain/include/eosio/chain/config.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include @@ -23,25 +19,26 @@ const static auto default_state_size = 1*1024*1024*1024ll; const static auto default_state_guard_size = 128*1024*1024ll; -const static uint64_t system_account_name = N(eosio); -const static uint64_t null_account_name = N(eosio.null); -const static uint64_t producers_account_name = N(eosio.prods); +const static name system_account_name { N(eosio) }; +const static name null_account_name { N(eosio.null) }; +const static name producers_account_name { N(eosio.prods) }; // Active permission of producers account requires greater than 2/3 of the producers to authorize -const static uint64_t majority_producers_permission_name = N(prod.major); // greater than 1/2 of producers needed to authorize -const static uint64_t minority_producers_permission_name = N(prod.minor); // greater than 1/3 of producers needed to authorize0 +const static name majority_producers_permission_name { N(prod.major) }; // greater than 1/2 of producers needed to authorize +const static name minority_producers_permission_name { N(prod.minor) }; // greater than 1/3 of producers needed to authorize0 -const static uint64_t eosio_auth_scope = N(eosio.auth); -const static uint64_t eosio_all_scope = N(eosio.all); +const static name eosio_auth_scope { N(eosio.auth) }; +const static name eosio_all_scope { N(eosio.all) }; -const static uint64_t active_name = N(active); -const static uint64_t owner_name = N(owner); -const static uint64_t eosio_any_name = N(eosio.any); -const static uint64_t eosio_code_name = N(eosio.code); +const static name active_name { N(active) }; +const static name owner_name { N(owner) }; +const static name eosio_any_name { N(eosio.any) }; +const static name eosio_code_name { N(eosio.code) }; const static int block_interval_ms = 500; const static int block_interval_us = block_interval_ms*1000; const static uint64_t block_timestamp_epoch = 946684800000ll; // epoch is year 2000. +const static uint32_t genesis_num_supported_key_types = 2; /** Percentages are fixed point with a denominator of 10,000 */ const static int percent_100 = 10000; @@ -83,6 +80,7 @@ const static uint16_t default_max_inline_action_depth = 4; const static uint16_t default_max_auth_depth = 6; const static uint32_t default_sig_cpu_bill_pct = 50 * percent_1; // billable percentage of signature recovery const static uint16_t default_controller_thread_pool_size = 2; +const static uint32_t default_max_variable_signature_length = 16384u; const static uint32_t min_net_usage_delta_between_base_and_max_for_trx = 10*1024; // Should be large enough to allow recovery from badly set blockchain parameters without a hard fork diff --git a/libraries/chain/include/eosio/chain/contract_table_objects.hpp b/libraries/chain/include/eosio/chain/contract_table_objects.hpp index bc58cb3e6d9..baf549a2b13 100644 --- a/libraries/chain/include/eosio/chain/contract_table_objects.hpp +++ b/libraries/chain/include/eosio/chain/contract_table_objects.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include @@ -61,7 +57,7 @@ namespace eosio { namespace chain { id_type id; table_id t_id; //< t_id should not be changed within a chainbase modifier lambda uint64_t primary_key; //< primary_key should not be changed within a chainbase modifier lambda - account_name payer = 0; + account_name payer; shared_blob value; }; @@ -92,7 +88,7 @@ namespace eosio { namespace chain { typename chainbase::object::id_type id; table_id t_id; //< t_id should not be changed within a chainbase modifier lambda uint64_t primary_key; //< primary_key should not be changed within a chainbase modifier lambda - account_name payer = 0; + account_name payer; SecondaryKey secondary_key; //< secondary_key should not be changed within a chainbase modifier lambda }; diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 1f9ff9bf871..b17afd01da5 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -9,6 +9,7 @@ #include #include #include +#include namespace chainbase { class database; @@ -36,7 +37,9 @@ namespace eosio { namespace chain { class account_object; using resource_limits::resource_limits_manager; using apply_handler = std::function; - using unapplied_transactions_type = map; + using forked_branch_callback = std::function; + // lookup transaction_metadata via supplied function to avoid re-creation + using trx_meta_cache_lookup = std::function; class fork_database; @@ -54,7 +57,6 @@ namespace eosio { namespace chain { class controller { public: - struct config { flat_set sender_bypass_whiteblacklist; flat_set actor_whitelist; @@ -76,10 +78,12 @@ namespace eosio { namespace chain { bool disable_replay_opts = false; bool contracts_console = false; bool allow_ram_billing_in_notify = false; + uint32_t maximum_variable_signature_length = chain::config::default_max_variable_signature_length; bool disable_all_subjective_mitigations = false; //< for testing purposes only - genesis_state genesis; wasm_interface::vm_type wasm_runtime = chain::config::default_wasm_runtime; + eosvmoc::config eosvmoc_config; + bool eosvmoc_tierup = false; db_read_mode read_mode = db_read_mode::SPECULATIVE; validation_mode block_validation_mode = validation_mode::FULL; @@ -99,12 +103,14 @@ namespace eosio { namespace chain { incomplete = 3, ///< this is an incomplete block (either being produced by a producer or speculatively produced by a node) }; - explicit controller( const config& cfg ); - controller( const config& cfg, protocol_feature_set&& pfs ); + controller( const config& cfg, const chain_id_type& chain_id ); + controller( const config& cfg, protocol_feature_set&& pfs, const chain_id_type& chain_id ); ~controller(); void add_indices(); - void startup( std::function shutdown, const snapshot_reader_ptr& snapshot = nullptr ); + void startup( std::function shutdown, const snapshot_reader_ptr& snapshot); + void startup( std::function shutdown, const genesis_state& genesis); + void startup( std::function shutdown); void preactivate_feature( const digest_type& feature_digest ); @@ -128,18 +134,10 @@ namespace eosio { namespace chain { uint16_t confirm_block_count, const vector& new_protocol_feature_activations ); - void abort_block(); - /** - * These transactions were previously pushed by have since been unapplied, recalling push_transaction - * with the transaction_metadata_ptr will remove them from the source of this data IFF it succeeds. - * - * The caller is responsible for calling drop_unapplied_transaction on a failing transaction that - * they never intend to retry - * - * @return map of transactions which have been unapplied + * @return transactions applied in aborted block */ - unapplied_transactions_type& get_unapplied_transactions(); + vector abort_block(); /** * @@ -152,13 +150,20 @@ namespace eosio { namespace chain { */ transaction_trace_ptr push_scheduled_transaction( const transaction_id_type& scheduled, fc::time_point deadline, uint32_t billed_cpu_time_us = 0 ); - block_state_ptr finalize_block( const std::function& signer_callback ); - void sign_block( const std::function& signer_callback ); + block_state_ptr finalize_block( const signer_callback_type& signer_callback ); + void sign_block( const signer_callback_type& signer_callback ); void commit_block(); - void pop_block(); std::future create_block_state_future( const signed_block_ptr& b ); - void push_block( std::future& block_state_future ); + + /** + * @param block_state_future provide from call to create_block_state_future + * @param cb calls cb with forked applied transactions for each forked block + * @param trx_lookup user provided lookup function for externally cached transaction_metadata + */ + void push_block( std::future& block_state_future, + const forked_branch_callback& cb, + const trx_meta_cache_lookup& trx_lookup ); boost::asio::io_context& get_thread_pool(); @@ -206,16 +211,16 @@ namespace eosio { namespace chain { time_point fork_db_pending_head_block_time()const; account_name fork_db_pending_head_block_producer()const; - time_point pending_block_time()const; - account_name pending_block_producer()const; - public_key_type pending_block_signing_key()const; - optional pending_producer_block_id()const; + time_point pending_block_time()const; + account_name pending_block_producer()const; + const block_signing_authority& pending_block_signing_authority()const; + optional pending_producer_block_id()const; const vector& get_pending_trx_receipts()const; - const producer_schedule_type& active_producers()const; - const producer_schedule_type& pending_producers()const; - optional proposed_producers()const; + const producer_authority_schedule& active_producers()const; + const producer_authority_schedule& pending_producers()const; + optional proposed_producers()const; uint32_t last_irreversible_block_num() const; block_id_type last_irreversible_block_id() const; @@ -241,6 +246,11 @@ namespace eosio { namespace chain { bool is_ram_billing_in_notify_allowed()const; + //This is only an accessor to the user configured subjective limit: i.e. it does not do a + // check similar to is_ram_billing_in_notify_allowed() to check if controller is currently + // producing a block + uint32_t configured_subjective_signature_length_limit()const; + void add_resource_greylist(const account_name &name); void remove_resource_greylist(const account_name &name); bool is_resource_greylisted(const account_name &name) const; @@ -256,7 +266,7 @@ namespace eosio { namespace chain { bool is_known_unexpired_transaction( const transaction_id_type& id) const; - int64_t set_proposed_producers( vector producers ); + int64_t set_proposed_producers( vector producers ); bool light_validation_allowed(bool replay_opts_disabled_by_policy) const; bool skip_auth_check()const; @@ -279,6 +289,10 @@ namespace eosio { namespace chain { void add_to_ram_correction( account_name account, uint64_t ram_bytes ); bool all_subjective_mitigations_disabled()const; +#if defined(EOSIO_EOS_VM_RUNTIME_ENABLED) || defined(EOSIO_EOS_VM_JIT_RUNTIME_ENABLED) + vm::wasm_allocator& get_wasm_allocator(); +#endif + static fc::optional convert_exception_to_error_code( const fc::exception& e ); signal pre_accepted_block; @@ -324,6 +338,10 @@ namespace eosio { namespace chain { return pretty_output; } + static chain_id_type extract_chain_id(snapshot_reader& snapshot); + + static fc::optional extract_chain_id_from_db( const path& state_dir ); + private: friend class apply_context; friend class transaction_context; diff --git a/libraries/chain/include/eosio/chain/core_symbol.hpp.in b/libraries/chain/include/eosio/chain/core_symbol.hpp.in index c07f1ae267d..bb15ae8189d 100644 --- a/libraries/chain/include/eosio/chain/core_symbol.hpp.in +++ b/libraries/chain/include/eosio/chain/core_symbol.hpp.in @@ -1,8 +1,6 @@ -/** @file - * @copyright defined in eos/LICENSE - * +/** * \warning This file is machine generated. DO NOT EDIT. See core_symbol.hpp.in for changes. */ #define CORE_SYMBOL SY(4,${CORE_SYMBOL_NAME}) -#define CORE_SYMBOL_NAME "${CORE_SYMBOL_NAME}" \ No newline at end of file +#define CORE_SYMBOL_NAME "${CORE_SYMBOL_NAME}" diff --git a/libraries/chain/include/eosio/chain/database_header_object.hpp b/libraries/chain/include/eosio/chain/database_header_object.hpp new file mode 100644 index 00000000000..41dade55734 --- /dev/null +++ b/libraries/chain/include/eosio/chain/database_header_object.hpp @@ -0,0 +1,54 @@ +#pragma once +#include + +#include "multi_index_includes.hpp" + +namespace eosio { namespace chain { + /** + * @brief tracks the version of the application data stored in the database + * @ingroup object + * + * the in-memory database expects that binay structures of data do not shift between executions. Some + * upgrades will bump this version to indicate that the expectations of the binary application data + * have changed. When it is safe to directly use an older version that will be allowed though cases + * where this is possible may be rare. + */ + class database_header_object : public chainbase::object + { + OBJECT_CTOR(database_header_object) + + /** + * VERSION HISTORY + * - 0 : implied version when this header is absent + * - 1 : initial version, prior to this no `database_header_object` existed in the shared memory file but + * no changes to its format were made so it can be safely added to existing databases + * - 2 : shared_authority now holds shared_key_weights & shared_public_keys + * change from producer_key to producer_authority for many in-memory structures + */ + + static constexpr uint32_t current_version = 2; + static constexpr uint32_t minimum_version = 2; + + id_type id; + uint32_t version = current_version; + + void validate() const { + EOS_ASSERT(std::clamp(version, minimum_version, current_version) == version, bad_database_version_exception, + "state database version is incompatible, please restore from a compatible snapshot or replay!", + ("version", version)("minimum_version", minimum_version)("maximum_version", current_version)); + } + }; + + struct by_block_id; + using database_header_multi_index = chainbase::shared_multi_index_container< + database_header_object, + indexed_by< + ordered_unique, BOOST_MULTI_INDEX_MEMBER(database_header_object, database_header_object::id_type, id)> + > + >; + + } } + +CHAINBASE_SET_INDEX_TYPE(eosio::chain::database_header_object, eosio::chain::database_header_multi_index) + +FC_REFLECT( eosio::chain::database_header_object, (version) ) diff --git a/libraries/chain/include/eosio/chain/database_utils.hpp b/libraries/chain/include/eosio/chain/database_utils.hpp index 19de97bd3da..84c90842795 100644 --- a/libraries/chain/include/eosio/chain/database_utils.hpp +++ b/libraries/chain/include/eosio/chain/database_utils.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include @@ -122,12 +118,23 @@ namespace fc { inline void to_variant( const float128_t& f, variant& v ) { - v = variant(*reinterpret_cast(&f)); + // Assumes platform is little endian and hex representation of 128-bit integer is in little endian order. + const eosio::chain::uint128_t as_bytes = *reinterpret_cast(&f); + std::string s = "0x"; + s.append( to_hex( reinterpret_cast(&as_bytes), sizeof(as_bytes) ) ); + v = s; } inline void from_variant( const variant& v, float128_t& f ) { - from_variant(v, *reinterpret_cast(&f)); + // Temporarily hold the binary in uint128_t before casting it to float128_t + eosio::chain::uint128_t temp = 0; + auto s = v.as_string(); + FC_ASSERT( s.size() == 2 + 2 * sizeof(temp) && s.find("0x") == 0, "Failure in converting hex data into a float128_t"); + auto sz = from_hex( s.substr(2), reinterpret_cast(&temp), sizeof(temp) ); + // Assumes platform is little endian and hex representation of 128-bit integer is in little endian order. + FC_ASSERT( sz == sizeof(temp), "Failure in converting hex data into a float128_t" ); + f = *reinterpret_cast(&temp); } inline diff --git a/libraries/chain/include/eosio/chain/eosio_contract.hpp b/libraries/chain/include/eosio/chain/eosio_contract.hpp index 1bf9163827e..df881f5462c 100644 --- a/libraries/chain/include/eosio/chain/eosio_contract.hpp +++ b/libraries/chain/include/eosio/chain/eosio_contract.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index f67e9c85a7c..b6ddb73ca7e 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include @@ -23,7 +19,11 @@ * This macro will rethrow the exception as the specified "exception_type" */ #define EOS_RETHROW_EXCEPTIONS(exception_type, FORMAT, ... ) \ - catch (eosio::chain::chain_exception& e) { \ + catch( const std::bad_alloc& ) {\ + throw;\ + } catch( const boost::interprocess::bad_alloc& ) {\ + throw;\ + } catch (eosio::chain::chain_exception& e) { \ FC_RETHROW_EXCEPTION( e, warn, FORMAT, __VA_ARGS__ ); \ } catch (fc::exception& e) { \ exception_type new_exception(FC_LOG_MESSAGE( warn, FORMAT, __VA_ARGS__ )); \ @@ -46,7 +46,11 @@ * This macro will rethrow the exception as the specified "exception_type" */ #define EOS_CAPTURE_AND_RETHROW( exception_type, ... ) \ - catch (eosio::chain::chain_exception& e) { \ + catch( const std::bad_alloc& ) {\ + throw;\ + } catch( const boost::interprocess::bad_alloc& ) {\ + throw;\ + } catch (eosio::chain::chain_exception& e) { \ FC_RETHROW_EXCEPTION( e, warn, "", FC_FORMAT_ARG_PARAMS(__VA_ARGS__) ); \ } catch (fc::exception& e) { \ exception_type new_exception(e.get_log()); \ @@ -63,6 +67,26 @@ std::current_exception() ); \ } +/** + * Capture all exceptions and pass to NEXT function + */ +#define CATCH_AND_CALL(NEXT)\ + catch ( const fc::exception& err ) {\ + NEXT(err.dynamic_copy_exception());\ + } catch ( const std::exception& e ) {\ + fc::exception fce( \ + FC_LOG_MESSAGE( warn, "rethrow ${what}: ", ("what",e.what())),\ + fc::std_exception_code,\ + BOOST_CORE_TYPEID(e).name(),\ + e.what() ) ;\ + NEXT(fce.dynamic_copy_exception());\ + } catch( ... ) {\ + fc::unhandled_exception e(\ + FC_LOG_MESSAGE(warn, "rethrow"),\ + std::current_exception());\ + NEXT(e.dynamic_copy_exception());\ + } + #define EOS_RECODE_EXC( cause_type, effect_type ) \ catch( const cause_type& e ) \ { throw( effect_type( e.what(), e.get_log() ) ); } @@ -170,6 +194,10 @@ namespace eosio { namespace chain { 3010013, "Invalid fixed key" ) FC_DECLARE_DERIVED_EXCEPTION( symbol_type_exception, chain_type_exception, 3010014, "Invalid symbol" ) + FC_DECLARE_DERIVED_EXCEPTION( unactivated_key_type, chain_type_exception, + 3010015, "Key type is not a currently activated type" ) + FC_DECLARE_DERIVED_EXCEPTION( unactivated_signature_type, chain_type_exception, + 3010016, "Signature type is not a currently activated type" ) FC_DECLARE_DERIVED_EXCEPTION( fork_database_exception, chain_exception, @@ -204,7 +232,10 @@ namespace eosio { namespace chain { 3030010, "Invalid block header extension" ) FC_DECLARE_DERIVED_EXCEPTION( ill_formed_protocol_feature_activation, block_validate_exception, 3030011, "Block includes an ill-formed protocol feature activation extension" ) - + FC_DECLARE_DERIVED_EXCEPTION( invalid_block_extension, block_validate_exception, + 3030012, "Invalid block extension" ) + FC_DECLARE_DERIVED_EXCEPTION( ill_formed_additional_block_signatures_extension, block_validate_exception, + 3030013, "Block includes an ill-formed additional block signature extension" ) FC_DECLARE_DERIVED_EXCEPTION( transaction_exception, chain_exception, @@ -244,6 +275,8 @@ namespace eosio { namespace chain { 3040016, "Transaction includes an ill-formed deferred transaction generation context extension" ) FC_DECLARE_DERIVED_EXCEPTION( disallowed_transaction_extensions_bad_block_exception, transaction_exception, 3040017, "Transaction includes disallowed extensions (invalid block)" ) + FC_DECLARE_DERIVED_EXCEPTION( tx_resource_exhaustion, transaction_exception, + 3040018, "Transaction exceeded transient resource limit" ) FC_DECLARE_DERIVED_EXCEPTION( action_validate_exception, chain_exception, @@ -283,6 +316,8 @@ namespace eosio { namespace chain { 3060003, "Contract Table Query Exception" ) FC_DECLARE_DERIVED_EXCEPTION( contract_query_exception, database_exception, 3060004, "Contract Query Exception" ) + FC_DECLARE_DERIVED_EXCEPTION( bad_database_version_exception, database_exception, + 3060005, "Database is an unknown or unsupported version" ) FC_DECLARE_DERIVED_EXCEPTION( guard_exception, database_exception, 3060100, "Guard Exception" ) @@ -371,7 +406,10 @@ namespace eosio { namespace chain { 3100008, "Feature is currently unsupported" ) FC_DECLARE_DERIVED_EXCEPTION( node_management_success, misc_exception, 3100009, "Node management operation successfully executed" ) - + FC_DECLARE_DERIVED_EXCEPTION( json_parse_exception, misc_exception, + 3100010, "JSON parse exception" ) + FC_DECLARE_DERIVED_EXCEPTION( sig_variable_size_limit_exception, misc_exception, + 3100011, "Variable length component of signature too large" ) FC_DECLARE_DERIVED_EXCEPTION( plugin_exception, chain_exception, @@ -520,6 +558,10 @@ namespace eosio { namespace chain { 3170009, "Snapshot Finalization Exception" ) FC_DECLARE_DERIVED_EXCEPTION( invalid_protocol_features_to_activate, producer_exception, 3170010, "The protocol features to be activated were not valid" ) + FC_DECLARE_DERIVED_EXCEPTION( no_block_signatures, producer_exception, + 3170011, "The signer returned no valid block signatures" ) + FC_DECLARE_DERIVED_EXCEPTION( unsupported_multiple_block_signatures, producer_exception, + 3170012, "The signer returned multiple signatures but that is not supported" ) FC_DECLARE_DERIVED_EXCEPTION( reversible_blocks_exception, chain_exception, 3180000, "Reversible Blocks exception" ) @@ -540,6 +582,8 @@ namespace eosio { namespace chain { 3190003, "block log can not be found" ) FC_DECLARE_DERIVED_EXCEPTION( block_log_backup_dir_exist, block_log_exception, 3190004, "block log backup dir already exists" ) + FC_DECLARE_DERIVED_EXCEPTION( block_index_not_found, block_log_exception, + 3190005, "block index can not be found" ) FC_DECLARE_DERIVED_EXCEPTION( http_exception, chain_exception, 3200000, "http exception" ) diff --git a/libraries/chain/include/eosio/chain/fixed_bytes.hpp b/libraries/chain/include/eosio/chain/fixed_bytes.hpp new file mode 100644 index 00000000000..df659935bda --- /dev/null +++ b/libraries/chain/include/eosio/chain/fixed_bytes.hpp @@ -0,0 +1,392 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +#include +#include +#include +#include + +namespace eosio { + + /// @cond IMPLEMENTATIONS + + using chain::uint128_t; + + template + class fixed_bytes; + + template + bool operator ==(const fixed_bytes &c1, const fixed_bytes &c2); + + template + bool operator !=(const fixed_bytes &c1, const fixed_bytes &c2); + + template + bool operator >(const fixed_bytes &c1, const fixed_bytes &c2); + + template + bool operator <(const fixed_bytes &c1, const fixed_bytes &c2); + + template + bool operator >=(const fixed_bytes &c1, const fixed_bytes &c2); + + template + bool operator <=(const fixed_bytes &c1, const fixed_bytes &c2); + + /// @endcond + + /** + * @defgroup fixed_bytes Fixed Size Byte Array + * @ingroup core + * @ingroup types + * @brief Fixed size array of bytes sorted lexicographically + */ + + /** + * Fixed size byte array sorted lexicographically + * + * @ingroup fixed_bytes + * @tparam Size - Size of the fixed_bytes object + */ + template + class fixed_bytes { + private: + + template struct bool_pack; + template + using all_true = std::is_same< bool_pack, bool_pack >; + + template + static void set_from_word_sequence(const Word* arr_begin, const Word* arr_end, fixed_bytes& key) + { + auto itr = key._data.begin(); + word_t temp_word = 0; + const size_t sub_word_shift = 8 * sizeof(Word); + const size_t num_sub_words = sizeof(word_t) / sizeof(Word); + auto sub_words_left = num_sub_words; + for( auto w_itr = arr_begin; w_itr != arr_end; ++w_itr ) { + if( sub_words_left > 1 ) { + temp_word |= static_cast(*w_itr); + temp_word <<= sub_word_shift; + --sub_words_left; + continue; + } + + FC_ASSERT( sub_words_left == 1, "unexpected error in fixed_bytes constructor" ); + temp_word |= static_cast(*w_itr); + sub_words_left = num_sub_words; + + *itr = temp_word; + temp_word = 0; + ++itr; + } + if( sub_words_left != num_sub_words ) { + if( sub_words_left > 1 ) + temp_word <<= 8 * (sub_words_left-1); + *itr = temp_word; + } + } + + public: + typedef uint128_t word_t; + + /** + * Get number of words contained in this fixed_bytes object. A word is defined to be 16 bytes in size + */ + + static constexpr size_t num_words() { return (Size + sizeof(word_t) - 1) / sizeof(word_t); } + + /** + * Get number of padded bytes contained in this fixed_bytes object. Padded bytes are the remaining bytes + * inside the fixed_bytes object after all the words are allocated + */ + static constexpr size_t padded_bytes() { return num_words() * sizeof(word_t) - Size; } + + /** + * Default constructor to fixed_bytes object which initializes all bytes to zero + */ + constexpr fixed_bytes() : _data() {} + + /** + * Constructor to fixed_bytes object from std::array of num_words() word_t types + * + * @param arr data + */ + fixed_bytes(const std::array& arr) + { + std::copy(arr.begin(), arr.end(), _data.begin()); + } + + /** + * Constructor to fixed_bytes object from std::array of Word types smaller in size than word_t + * + * @param arr - Source data + */ + template::value && + std::is_unsigned::value && + !std::is_same::value && + std::less{}(sizeof(Word), sizeof(word_t))>::type > + fixed_bytes(const std::array& arr) + { + static_assert( sizeof(word_t) == (sizeof(word_t)/sizeof(Word)) * sizeof(Word), + "size of the backing word size is not divisible by the size of the array element" ); + static_assert( sizeof(Word) * NumWords <= Size, "too many words supplied to fixed_bytes constructor" ); + + set_from_word_sequence(arr.data(), arr.data() + arr.size(), *this); + } + + /** + * Constructor to fixed_bytes object from fixed-sized C array of Word types smaller in size than word_t + * + * @param arr - Source data + */ + template::value && + std::is_unsigned::value && + !std::is_same::value && + std::less{}(sizeof(Word), sizeof(word_t))>::type > + fixed_bytes(const Word(&arr)[NumWords]) + { + static_assert( sizeof(word_t) == (sizeof(word_t)/sizeof(Word)) * sizeof(Word), + "size of the backing word size is not divisible by the size of the array element" ); + static_assert( sizeof(Word) * NumWords <= Size, "too many words supplied to fixed_bytes constructor" ); + + set_from_word_sequence(arr, arr + NumWords, *this); + } + + /** + * Create a new fixed_bytes object from a sequence of words + * + * @tparam FirstWord - The type of the first word in the sequence + * @tparam Rest - The type of the remaining words in the sequence + * @param first_word - The first word in the sequence + * @param rest - The remaining words in the sequence + */ + template + static + fixed_bytes + make_from_word_sequence(typename std::enable_if::value && + std::is_unsigned::value && + !std::is_same::value && + sizeof(FirstWord) <= sizeof(word_t) && + all_true<(std::is_same::value)...>::value, + FirstWord>::type first_word, + Rest... rest) + { + static_assert( sizeof(word_t) == (sizeof(word_t)/sizeof(FirstWord)) * sizeof(FirstWord), + "size of the backing word size is not divisible by the size of the words supplied as arguments" ); + static_assert( sizeof(FirstWord) * (1 + sizeof...(Rest)) <= Size, "too many words supplied to make_from_word_sequence" ); + + fixed_bytes key; + std::array arr{{ first_word, rest... }}; + set_from_word_sequence(arr.data(), arr.data() + arr.size(), key); + return key; + } + + /** + * Get the contained std::array + */ + const auto& get_array()const { return _data; } + + /** + * Get the underlying data of the contained std::array + */ + auto data() { return _data.data(); } + + /// @cond INTERNAL + + /** + * Get the underlying data of the contained std::array + */ + auto data()const { return _data.data(); } + + /// @endcond + + /** + * Get the size of the contained std::array + */ + auto size()const { return _data.size(); } + + + /** + * Extract the contained data as an array of bytes + * + * @return - the extracted data as array of bytes + */ + std::array extract_as_byte_array()const { + std::array arr; + + const size_t num_sub_words = sizeof(word_t); + + auto arr_itr = arr.begin(); + auto data_itr = _data.begin(); + + for( size_t counter = _data.size(); counter > 0; --counter, ++data_itr ) { + size_t sub_words_left = num_sub_words; + + auto temp_word = *data_itr; + if( counter == 1 ) { // If last word in _data array... + sub_words_left -= padded_bytes(); + temp_word >>= 8*padded_bytes(); + } + for( ; sub_words_left > 0; --sub_words_left ) { + *(arr_itr + sub_words_left - 1) = static_cast(temp_word & 0xFF); + temp_word >>= 8; + } + arr_itr += num_sub_words; + } + + return arr; + } + + /** + * Prints fixed_bytes as a hexidecimal string + * + * @param val to be printed + */ + inline void print()const { + auto arr = extract_as_byte_array(); + printhex(static_cast(arr.data()), arr.size()); + } + + /// @cond OPERATORS + + friend bool operator == <>(const fixed_bytes &c1, const fixed_bytes &c2); + + friend bool operator != <>(const fixed_bytes &c1, const fixed_bytes &c2); + + friend bool operator > <>(const fixed_bytes &c1, const fixed_bytes &c2); + + friend bool operator < <>(const fixed_bytes &c1, const fixed_bytes &c2); + + friend bool operator >= <>(const fixed_bytes &c1, const fixed_bytes &c2); + + friend bool operator <= <>(const fixed_bytes &c1, const fixed_bytes &c2); + + /// @endcond + + private: + + std::array _data; + }; + + /// @cond IMPLEMENTATIONS + + /** + * Lexicographically compares two fixed_bytes variables c1 and c2 + * + * @param c1 - First fixed_bytes object to compare + * @param c2 - Second fixed_bytes object to compare + * @return if c1 == c2, return true, otherwise false + */ + template + bool operator ==(const fixed_bytes &c1, const fixed_bytes &c2) { + return c1._data == c2._data; + } + + /** + * Lexicographically compares two fixed_bytes variables c1 and c2 + * + * @param c1 - First fixed_bytes object to compare + * @param c2 - Second fixed_bytes object to compare + * @return if c1 != c2, return true, otherwise false + */ + template + bool operator !=(const fixed_bytes &c1, const fixed_bytes &c2) { + return c1._data != c2._data; + } + + /** + * Lexicographically compares two fixed_bytes variables c1 and c2 + * + * @param c1 - First fixed_bytes object to compare + * @param c2 - Second fixed_bytes object to compare + * @return if c1 > c2, return true, otherwise false + */ + template + bool operator >(const fixed_bytes& c1, const fixed_bytes& c2) { + return c1._data > c2._data; + } + + /** + * Lexicographically compares two fixed_bytes variables c1 and c2 + * + * @param c1 - First fixed_bytes object to compare + * @param c2 - Second fixed_bytes object to compare + * @return if c1 < c2, return true, otherwise false + */ + template + bool operator <(const fixed_bytes &c1, const fixed_bytes &c2) { + return c1._data < c2._data; + } + + /** + * Lexicographically compares two fixed_bytes variables c1 and c2 + * + * @param c1 - First fixed_bytes object to compare + * @param c2 - Second fixed_bytes object to compare + * @return if c1 >= c2, return true, otherwise false + */ + template + bool operator >=(const fixed_bytes& c1, const fixed_bytes& c2) { + return c1._data >= c2._data; + } + + /** + * Lexicographically compares two fixed_bytes variables c1 and c2 + * + * @param c1 - First fixed_bytes object to compare + * @param c2 - Second fixed_bytes object to compare + * @return if c1 <= c2, return true, otherwise false + */ + template + bool operator <=(const fixed_bytes &c1, const fixed_bytes &c2) { + return c1._data <= c2._data; + } + + + using checksum160 = fixed_bytes<20>; + using checksum256 = fixed_bytes<32>; + using checksum512 = fixed_bytes<64>; + + /** + * Serialize a fixed_bytes into a stream + * + * @brief Serialize a fixed_bytes + * @param ds - The stream to write + * @param d - The value to serialize + * @tparam DataStream - Type of datastream buffer + * @return DataStream& - Reference to the datastream + */ + template + inline DataStream& operator<<(DataStream& ds, const fixed_bytes& d) { + auto arr = d.extract_as_byte_array(); + ds.write( (const char*)arr.data(), arr.size() ); + return ds; + } + + /** + * Deserialize a fixed_bytes from a stream + * + * @brief Deserialize a fixed_bytes + * @param ds - The stream to read + * @param d - The destination for deserialized value + * @tparam DataStream - Type of datastream buffer + * @return DataStream& - Reference to the datastream + */ + template + inline DataStream& operator>>(DataStream& ds, fixed_bytes& d) { + std::array arr; + ds.read( (char*)arr.data(), arr.size() ); + d = fixed_bytes( arr ); + return ds; + } + + /// @endcond +} diff --git a/libraries/chain/include/eosio/chain/fixed_key.hpp b/libraries/chain/include/eosio/chain/fixed_key.hpp index 5dd90f901ad..f2a48162c99 100644 --- a/libraries/chain/include/eosio/chain/fixed_key.hpp +++ b/libraries/chain/include/eosio/chain/fixed_key.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/libraries/chain/include/eosio/chain/fork_database.hpp b/libraries/chain/include/eosio/chain/fork_database.hpp index e225d0fdcd1..bd774f2f71c 100644 --- a/libraries/chain/include/eosio/chain/fork_database.hpp +++ b/libraries/chain/include/eosio/chain/fork_database.hpp @@ -8,8 +8,6 @@ namespace eosio { namespace chain { struct fork_database_impl; - typedef vector branch_type; - /** * @class fork_database * @brief manages light-weight state for all potential unconfirmed forks diff --git a/libraries/chain/include/eosio/chain/generated_transaction_object.hpp b/libraries/chain/include/eosio/chain/generated_transaction_object.hpp index 74e07a4d9e9..a93e65296e4 100644 --- a/libraries/chain/include/eosio/chain/generated_transaction_object.hpp +++ b/libraries/chain/include/eosio/chain/generated_transaction_object.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include diff --git a/libraries/chain/include/eosio/chain/genesis_intrinsics.hpp b/libraries/chain/include/eosio/chain/genesis_intrinsics.hpp index bd736d6a285..10eb4f8858a 100644 --- a/libraries/chain/include/eosio/chain/genesis_intrinsics.hpp +++ b/libraries/chain/include/eosio/chain/genesis_intrinsics.hpp @@ -1,8 +1,3 @@ - -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/libraries/chain/include/eosio/chain/genesis_state.hpp b/libraries/chain/include/eosio/chain/genesis_state.hpp index 364f505629e..5e5b643a1b5 100644 --- a/libraries/chain/include/eosio/chain/genesis_state.hpp +++ b/libraries/chain/include/eosio/chain/genesis_state.hpp @@ -1,8 +1,3 @@ - -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/libraries/chain/include/eosio/chain/global_property_object.hpp b/libraries/chain/include/eosio/chain/global_property_object.hpp index 14ed594c0bd..57d6c745fe2 100644 --- a/libraries/chain/include/eosio/chain/global_property_object.hpp +++ b/libraries/chain/include/eosio/chain/global_property_object.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include @@ -9,13 +5,31 @@ #include #include #include +#include #include #include +#include #include #include "multi_index_includes.hpp" namespace eosio { namespace chain { + /** + * a fc::raw::unpack compatible version of the old global_property_object structure stored in + * version 2 snapshots and before + */ + namespace legacy { + struct snapshot_global_property_object_v2 { + static constexpr uint32_t minimum_version = 0; + static constexpr uint32_t maximum_version = 2; + static_assert(chain_snapshot_header::minimum_compatible_version <= maximum_version, "snapshot_global_property_object_v2 is no longer needed"); + + optional proposed_schedule_block_num; + producer_schedule_type proposed_schedule; + chain_config configuration; + }; + } + /** * @class global_property_object * @brief Maintains global state information about block producer schedules and chain configuration parameters @@ -27,10 +41,18 @@ namespace eosio { namespace chain { OBJECT_CTOR(global_property_object, (proposed_schedule)) public: - id_type id; - optional proposed_schedule_block_num; - shared_producer_schedule_type proposed_schedule; - chain_config configuration; + id_type id; + optional proposed_schedule_block_num; + shared_producer_authority_schedule proposed_schedule; + chain_config configuration; + chain_id_type chain_id; + + void initalize_from( const legacy::snapshot_global_property_object_v2& legacy, const chain_id_type& chain_id_val ) { + proposed_schedule_block_num = legacy.proposed_schedule_block_num; + proposed_schedule = producer_authority_schedule(legacy.proposed_schedule).to_shared(proposed_schedule.producers.get_allocator()); + configuration = legacy.configuration; + chain_id = chain_id_val; + } }; @@ -43,6 +65,32 @@ namespace eosio { namespace chain { > >; + struct snapshot_global_property_object { + optional proposed_schedule_block_num; + producer_authority_schedule proposed_schedule; + chain_config configuration; + chain_id_type chain_id; + }; + + namespace detail { + template<> + struct snapshot_row_traits { + using value_type = global_property_object; + using snapshot_type = snapshot_global_property_object; + + static snapshot_global_property_object to_snapshot_row( const global_property_object& value, const chainbase::database& ) { + return {value.proposed_schedule_block_num, producer_authority_schedule::from_shared(value.proposed_schedule), value.configuration, value.chain_id}; + } + + static void from_snapshot_row( snapshot_global_property_object&& row, global_property_object& value, chainbase::database& ) { + value.proposed_schedule_block_num = row.proposed_schedule_block_num; + value.proposed_schedule = row.proposed_schedule.to_shared(value.proposed_schedule.producers.get_allocator()); + value.configuration = row.configuration; + value.chain_id = row.chain_id; + } + }; + } + /** * @class dynamic_global_property_object * @brief Maintains global state information that frequently change @@ -73,9 +121,17 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::dynamic_global_property_object, eosio::chain::dynamic_global_property_multi_index) FC_REFLECT(eosio::chain::global_property_object, + (proposed_schedule_block_num)(proposed_schedule)(configuration)(chain_id) + ) + +FC_REFLECT(eosio::chain::legacy::snapshot_global_property_object_v2, (proposed_schedule_block_num)(proposed_schedule)(configuration) ) +FC_REFLECT(eosio::chain::snapshot_global_property_object, + (proposed_schedule_block_num)(proposed_schedule)(configuration)(chain_id) + ) + FC_REFLECT(eosio::chain::dynamic_global_property_object, (global_action_sequence) ) diff --git a/libraries/chain/include/eosio/chain/multi_index_includes.hpp b/libraries/chain/include/eosio/chain/multi_index_includes.hpp index 2bb5485194c..3fbc55a2a1d 100644 --- a/libraries/chain/include/eosio/chain/multi_index_includes.hpp +++ b/libraries/chain/include/eosio/chain/multi_index_includes.hpp @@ -1,8 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ - #pragma once #include diff --git a/libraries/chain/include/eosio/chain/name.hpp b/libraries/chain/include/eosio/chain/name.hpp index 81c13145dde..eef06b81ece 100644 --- a/libraries/chain/include/eosio/chain/name.hpp +++ b/libraries/chain/include/eosio/chain/name.hpp @@ -3,9 +3,16 @@ #include #include -namespace eosio { namespace chain { - using std::string; +namespace eosio::chain { + struct name; +} +namespace fc { + class variant; + void to_variant(const eosio::chain::name& c, fc::variant& v); + void from_variant(const fc::variant& v, eosio::chain::name& check); +} // fc +namespace eosio::chain { static constexpr uint64_t char_to_symbol( char c ) { if( c >= 'a' && c <= 'z' ) return (c - 'a') + 6; @@ -14,101 +21,83 @@ namespace eosio { namespace chain { return 0; } - // Each char of the string is encoded into 5-bit chunk and left-shifted - // to its 5-bit slot starting with the highest slot for the first char. - // The 13th char, if str is long enough, is encoded into 4-bit chunk - // and placed in the lowest 4 bits. 64 = 12 * 5 + 4 - static constexpr uint64_t string_to_name( const char* str ) - { - uint64_t name = 0; + static constexpr uint64_t string_to_uint64_t( std::string_view str ) { + uint64_t n = 0; int i = 0; for ( ; str[i] && i < 12; ++i) { - // NOTE: char_to_symbol() returns char type, and without this explicit - // expansion to uint64 type, the compilation fails at the point of usage - // of string_to_name(), where the usage requires constant (compile time) expression. - name |= (char_to_symbol(str[i]) & 0x1f) << (64 - 5 * (i + 1)); - } + // NOTE: char_to_symbol() returns char type, and without this explicit + // expansion to uint64 type, the compilation fails at the point of usage + // of string_to_name(), where the usage requires constant (compile time) expression. + n |= (char_to_symbol(str[i]) & 0x1f) << (64 - 5 * (i + 1)); + } // The for-loop encoded up to 60 high bits into uint64 'name' variable, // if (strlen(str) > 12) then encode str[12] into the low (remaining) // 4 bits of 'name' if (i == 12) - name |= char_to_symbol(str[12]) & 0x0F; - return name; + n |= char_to_symbol(str[12]) & 0x0F; + return n; } -#define N(X) eosio::chain::string_to_name(#X) - + /// Immutable except for fc::from_variant. struct name { + private: uint64_t value = 0; - bool empty()const { return 0 == value; } - bool good()const { return !empty(); } - - name( const char* str ) { set(str); } - name( const string& str ) { set( str.c_str() ); } - void set( const char* str ); + friend struct fc::reflector; + friend void fc::from_variant(const fc::variant& v, eosio::chain::name& check); - template - name( T v ):value(v){} - name(){} + void set( std::string_view str ); - explicit operator string()const; + public: + constexpr bool empty()const { return 0 == value; } + constexpr bool good()const { return !empty(); } - string to_string() const { return string(*this); } + explicit name( std::string_view str ) { set( str ); } + constexpr explicit name( uint64_t v ) : value(v) {} + constexpr name() = default; - name& operator=( uint64_t v ) { - value = v; - return *this; - } - - name& operator=( const string& n ) { - value = name(n).value; - return *this; - } - name& operator=( const char* n ) { - value = name(n).value; - return *this; - } + std::string to_string()const; + constexpr uint64_t to_uint64_t()const { return value; } friend std::ostream& operator << ( std::ostream& out, const name& n ) { - return out << string(n); + return out << n.to_string(); } - friend bool operator < ( const name& a, const name& b ) { return a.value < b.value; } - friend bool operator <= ( const name& a, const name& b ) { return a.value <= b.value; } - friend bool operator > ( const name& a, const name& b ) { return a.value > b.value; } - friend bool operator >=( const name& a, const name& b ) { return a.value >= b.value; } - friend bool operator == ( const name& a, const name& b ) { return a.value == b.value; } + friend constexpr bool operator < ( const name& a, const name& b ) { return a.value < b.value; } + friend constexpr bool operator > ( const name& a, const name& b ) { return a.value > b.value; } + friend constexpr bool operator <= ( const name& a, const name& b ) { return a.value <= b.value; } + friend constexpr bool operator >= ( const name& a, const name& b ) { return a.value >= b.value; } + friend constexpr bool operator == ( const name& a, const name& b ) { return a.value == b.value; } + friend constexpr bool operator != ( const name& a, const name& b ) { return a.value != b.value; } - friend bool operator == ( const name& a, uint64_t b ) { return a.value == b; } - friend bool operator != ( const name& a, uint64_t b ) { return a.value != b; } + friend constexpr bool operator == ( const name& a, uint64_t b ) { return a.value == b; } + friend constexpr bool operator != ( const name& a, uint64_t b ) { return a.value != b; } - friend bool operator != ( const name& a, const name& b ) { return a.value != b.value; } - - operator bool()const { return value; } - operator uint64_t()const { return value; } - operator unsigned __int128()const { return value; } + constexpr explicit operator bool()const { return value != 0; } }; -} } // eosio::chain + // Each char of the string is encoded into 5-bit chunk and left-shifted + // to its 5-bit slot starting with the highest slot for the first char. + // The 13th char, if str is long enough, is encoded into 4-bit chunk + // and placed in the lowest 4 bits. 64 = 12 * 5 + 4 + static constexpr name string_to_name( std::string_view str ) + { + return name( string_to_uint64_t( str ) ); + } + +#define N(X) eosio::chain::string_to_name(#X) + +} // eosio::chain namespace std { template<> struct hash : private hash { typedef eosio::chain::name argument_type; - typedef typename hash::result_type result_type; - result_type operator()(const argument_type& name) const noexcept + size_t operator()(const argument_type& name) const noexcept { - return hash::operator()(name.value); + return hash::operator()(name.to_uint64_t()); } }; }; -namespace fc { - class variant; - void to_variant(const eosio::chain::name& c, fc::variant& v); - void from_variant(const fc::variant& v, eosio::chain::name& check); -} // fc - - FC_REFLECT( eosio::chain::name, (value) ) diff --git a/libraries/chain/include/eosio/chain/parallel_markers.hpp b/libraries/chain/include/eosio/chain/parallel_markers.hpp index 7436f970630..349bc90d352 100644 --- a/libraries/chain/include/eosio/chain/parallel_markers.hpp +++ b/libraries/chain/include/eosio/chain/parallel_markers.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/libraries/chain/include/eosio/chain/permission_link_object.hpp b/libraries/chain/include/eosio/chain/permission_link_object.hpp index ee5b1287b18..acd6f45e33f 100644 --- a/libraries/chain/include/eosio/chain/permission_link_object.hpp +++ b/libraries/chain/include/eosio/chain/permission_link_object.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/libraries/chain/include/eosio/chain/permission_object.hpp b/libraries/chain/include/eosio/chain/permission_object.hpp index 335ce754907..d323a1ef899 100644 --- a/libraries/chain/include/eosio/chain/permission_object.hpp +++ b/libraries/chain/include/eosio/chain/permission_object.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include diff --git a/libraries/chain/include/eosio/chain/platform_timer.hpp b/libraries/chain/include/eosio/chain/platform_timer.hpp new file mode 100644 index 00000000000..29a8d62d46f --- /dev/null +++ b/libraries/chain/include/eosio/chain/platform_timer.hpp @@ -0,0 +1,61 @@ +#pragma once +#include +#include +#include + +#include + +#include + +#include + +namespace eosio { namespace chain { + +struct platform_timer { + platform_timer(); + ~platform_timer(); + + void start(fc::time_point tp); + void stop(); + + /* Sets a callback for when timer expires. Be aware this could might fire from a signal handling context and/or + on any particular thread. Only a single callback can be registered at once; trying to register more will + result in an exception. Setting to nullptr disables any current set callback */ + void set_expiration_callback(void(*func)(void*), void* user) { + bool expect_false = false; + while(!atomic_compare_exchange_strong(&_callback_variables_busy, &expect_false, true)) + expect_false = false; + auto reset_busy = fc::make_scoped_exit([this]() { + _callback_variables_busy.store(false, std::memory_order_release); + }); + EOS_ASSERT(!(func && _expiration_callback), misc_exception, "Setting a platform_timer callback when one already exists"); + + _expiration_callback = func; + _expiration_callback_data = user; + } + + std::atomic_bool expired = true; + +private: + struct impl; + constexpr static size_t fwd_size = 8; + fc::fwd my; + + void call_expiration_callback() { + bool expect_false = false; + if(atomic_compare_exchange_strong(&_callback_variables_busy, &expect_false, true)) { + void(*cb)(void*) = _expiration_callback; + void* cb_data = _expiration_callback_data; + if(cb) { + cb(cb_data); + } + _callback_variables_busy.store(false, std::memory_order_release); + } + } + + std::atomic_bool _callback_variables_busy = false; + void(*_expiration_callback)(void*) = nullptr; + void* _expiration_callback_data; +}; + +}} diff --git a/libraries/chain/include/eosio/chain/platform_timer_accuracy.hpp b/libraries/chain/include/eosio/chain/platform_timer_accuracy.hpp new file mode 100755 index 00000000000..cc83b6a554f --- /dev/null +++ b/libraries/chain/include/eosio/chain/platform_timer_accuracy.hpp @@ -0,0 +1,8 @@ +#pragma once + +namespace eosio { namespace chain { + +struct platform_timer; +void compute_and_print_timer_accuracy(platform_timer& t); + +}} \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/producer_schedule.hpp b/libraries/chain/include/eosio/chain/producer_schedule.hpp index 09528d7b809..ec0d5bc79dd 100644 --- a/libraries/chain/include/eosio/chain/producer_schedule.hpp +++ b/libraries/chain/include/eosio/chain/producer_schedule.hpp @@ -2,86 +2,334 @@ #include #include #include +#include +#include namespace eosio { namespace chain { + namespace legacy { + /** + * Used as part of the producer_schedule_type, maps the producer name to their key. + */ + struct producer_key { + account_name producer_name; + public_key_type block_signing_key; + + friend bool operator == ( const producer_key& lhs, const producer_key& rhs ) { + return tie( lhs.producer_name, lhs.block_signing_key ) == tie( rhs.producer_name, rhs.block_signing_key ); + } + friend bool operator != ( const producer_key& lhs, const producer_key& rhs ) { + return tie( lhs.producer_name, lhs.block_signing_key ) != tie( rhs.producer_name, rhs.block_signing_key ); + } + }; + + /** + * Defines both the order, account name, and signing keys of the active set of producers. + */ + struct producer_schedule_type { + uint32_t version = 0; ///< sequentially incrementing version number + vector producers; + + friend bool operator == ( const producer_schedule_type& a, const producer_schedule_type& b ) + { + if( a.version != b.version ) return false; + if ( a.producers.size() != b.producers.size() ) return false; + for( uint32_t i = 0; i < a.producers.size(); ++i ) + if( a.producers[i] != b.producers[i] ) return false; + return true; + } + + friend bool operator != ( const producer_schedule_type& a, const producer_schedule_type& b ) + { + return !(a==b); + } + }; + } + + struct shared_block_signing_authority_v0 { + shared_block_signing_authority_v0() = delete; + shared_block_signing_authority_v0( const shared_block_signing_authority_v0& ) = default; + shared_block_signing_authority_v0( shared_block_signing_authority_v0&& ) = default; + shared_block_signing_authority_v0& operator= ( shared_block_signing_authority_v0 && ) = default; + shared_block_signing_authority_v0& operator= ( const shared_block_signing_authority_v0 & ) = default; + + explicit shared_block_signing_authority_v0( chainbase::allocator alloc ) + :keys(alloc){} + + uint32_t threshold = 0; + shared_vector keys; + }; + + using shared_block_signing_authority = static_variant; + + struct shared_producer_authority { + shared_producer_authority() = delete; + shared_producer_authority( const shared_producer_authority& ) = default; + shared_producer_authority( shared_producer_authority&& ) = default; + shared_producer_authority& operator= ( shared_producer_authority && ) = default; + shared_producer_authority& operator= ( const shared_producer_authority & ) = default; + + shared_producer_authority( const name& producer_name, shared_block_signing_authority&& authority ) + :producer_name(producer_name) + ,authority(std::move(authority)) + {} + + name producer_name; + shared_block_signing_authority authority; + }; + + struct shared_producer_authority_schedule { + shared_producer_authority_schedule() = delete; + + explicit shared_producer_authority_schedule( chainbase::allocator alloc ) + :producers(alloc){} + + shared_producer_authority_schedule( const shared_producer_authority_schedule& ) = default; + shared_producer_authority_schedule( shared_producer_authority_schedule&& ) = default; + shared_producer_authority_schedule& operator= ( shared_producer_authority_schedule && ) = default; + shared_producer_authority_schedule& operator= ( const shared_producer_authority_schedule & ) = default; + + uint32_t version = 0; ///< sequentially incrementing version number + shared_vector producers; + }; + /** - * Used as part of the producer_schedule_type, mapps the producer name to their key. + * block signing authority version 0 + * this authority allows for a weighted threshold multi-sig per-producer */ - struct producer_key { - account_name producer_name; - public_key_type block_signing_key; + struct block_signing_authority_v0 { + static constexpr std::string_view abi_type_name() { return "block_signing_authority_v0"; } + + uint32_t threshold = 0; + vector keys; + + template + void for_each_key( Op&& op ) const { + for (const auto& kw : keys ) { + op(kw.key); + } + } + + std::pair keys_satisfy_and_relevant( const std::set& presented_keys ) const { + size_t num_relevant_keys = 0; + uint32_t total_weight = 0; + for (const auto& kw : keys ) { + const auto& iter = presented_keys.find(kw.key); + if (iter != presented_keys.end()) { + ++num_relevant_keys; + + if( total_weight < threshold ) { + total_weight += std::min(std::numeric_limits::max() - total_weight, kw.weight); + } + } + } + + return {total_weight >= threshold, num_relevant_keys}; + } + + auto to_shared(chainbase::allocator alloc) const { + shared_block_signing_authority_v0 result(alloc); + result.threshold = threshold; + result.keys.clear(); + result.keys.reserve(keys.size()); + for (const auto& k: keys) { + result.keys.emplace_back(shared_key_weight::convert(alloc, k)); + } + + return result; + } + + static auto from_shared(const shared_block_signing_authority_v0& src) { + block_signing_authority_v0 result; + result.threshold = src.threshold; + result.keys.reserve(src.keys.size()); + for (const auto& k: src.keys) { + result.keys.push_back(k); + } + + return result; + } - friend bool operator == ( const producer_key& lhs, const producer_key& rhs ) { - return tie( lhs.producer_name, lhs.block_signing_key ) == tie( rhs.producer_name, rhs.block_signing_key ); + friend bool operator == ( const block_signing_authority_v0& lhs, const block_signing_authority_v0& rhs ) { + return tie( lhs.threshold, lhs.keys ) == tie( rhs.threshold, rhs.keys ); } - friend bool operator != ( const producer_key& lhs, const producer_key& rhs ) { - return tie( lhs.producer_name, lhs.block_signing_key ) != tie( rhs.producer_name, rhs.block_signing_key ); + friend bool operator != ( const block_signing_authority_v0& lhs, const block_signing_authority_v0& rhs ) { + return tie( lhs.threshold, lhs.keys ) != tie( rhs.threshold, rhs.keys ); } }; - /** - * Defines both the order, account name, and signing keys of the active set of producers. - */ - struct producer_schedule_type { - uint32_t version = 0; ///< sequentially incrementing version number - vector producers; - public_key_type get_producer_key( account_name p )const { - for( const auto& i : producers ) - if( i.producer_name == p ) - return i.block_signing_key; - return public_key_type(); + using block_signing_authority = static_variant; + + struct producer_authority { + name producer_name; + block_signing_authority authority; + + template + static void for_each_key( const block_signing_authority& authority, Op&& op ) { + authority.visit([&op](const auto &a){ + a.for_each_key(std::forward(op)); + }); + } + + template + void for_each_key( Op&& op ) const { + for_each_key(authority, std::forward(op)); + } + + static std::pair keys_satisfy_and_relevant( const std::set& keys, const block_signing_authority& authority ) { + return authority.visit([&keys](const auto &a){ + return a.keys_satisfy_and_relevant(keys); + }); + } + + std::pair keys_satisfy_and_relevant( const std::set& presented_keys ) const { + return keys_satisfy_and_relevant(presented_keys, authority); + } + + auto to_shared(chainbase::allocator alloc) const { + auto shared_auth = authority.visit([&alloc](const auto& a) { + return a.to_shared(alloc); + }); + + return shared_producer_authority(producer_name, std::move(shared_auth)); + } + + static auto from_shared( const shared_producer_authority& src ) { + producer_authority result; + result.producer_name = src.producer_name; + result.authority = src.authority.visit(overloaded { + [](const shared_block_signing_authority_v0& a) { + return block_signing_authority_v0::from_shared(a); + } + }); + + return result; + } + + /** + * ABI's for contracts expect variants to be serialized as a 2 entry array of + * [type-name, value]. + * + * This is incompatible with standard FC rules for + * static_variants which produce + * + * [ordinal, value] + * + * this method produces an appropriate variant for contracts where the authority field + * is correctly formatted + */ + fc::variant get_abi_variant() const; + + friend bool operator == ( const producer_authority& lhs, const producer_authority& rhs ) { + return tie( lhs.producer_name, lhs.authority ) == tie( rhs.producer_name, rhs.authority ); + } + friend bool operator != ( const producer_authority& lhs, const producer_authority& rhs ) { + return tie( lhs.producer_name, lhs.authority ) != tie( rhs.producer_name, rhs.authority ); } }; - struct shared_producer_schedule_type { - shared_producer_schedule_type( chainbase::allocator alloc ) - :producers(alloc){} + struct producer_authority_schedule { + producer_authority_schedule() = default; - shared_producer_schedule_type& operator=( const producer_schedule_type& a ) { - version = a.version; - producers.clear(); - producers.reserve( a.producers.size() ); - for( const auto& p : a.producers ) - producers.push_back(p); - return *this; + /** + * Up-convert a legacy producer schedule + */ + explicit producer_authority_schedule( const legacy::producer_schedule_type& old ) + :version(old.version) + { + producers.reserve( old.producers.size() ); + for( const auto& p : old.producers ) + producers.emplace_back(producer_authority{ p.producer_name, block_signing_authority_v0{ 1, {{p.block_signing_key, 1}} } }); } - operator producer_schedule_type()const { - producer_schedule_type result; + producer_authority_schedule( uint32_t version, std::initializer_list producers ) + :version(version) + ,producers(producers) + {} + + auto to_shared(chainbase::allocator alloc) const { + auto result = shared_producer_authority_schedule(alloc); result.version = version; - result.producers.reserve(producers.size()); - for( const auto& p : producers ) - result.producers.push_back(p); + result.producers.clear(); + result.producers.reserve( producers.size() ); + for( const auto& p : producers ) { + result.producers.emplace_back(p.to_shared(alloc)); + } return result; } - void clear() { - version = 0; - producers.clear(); + static auto from_shared( const shared_producer_authority_schedule& src ) { + producer_authority_schedule result; + result.version = src.version; + result.producers.reserve(src.producers.size()); + for( const auto& p : src.producers ) { + result.producers.emplace_back(producer_authority::from_shared(p)); + } + + return result; } uint32_t version = 0; ///< sequentially incrementing version number - shared_vector producers; + vector producers; + + friend bool operator == ( const producer_authority_schedule& a, const producer_authority_schedule& b ) + { + if( a.version != b.version ) return false; + if ( a.producers.size() != b.producers.size() ) return false; + for( uint32_t i = 0; i < a.producers.size(); ++i ) + if( ! (a.producers[i] == b.producers[i]) ) return false; + return true; + } + + friend bool operator != ( const producer_authority_schedule& a, const producer_authority_schedule& b ) + { + return !(a==b); + } + }; + + /** + * Block Header Extension Compatibility + */ + struct producer_schedule_change_extension : producer_authority_schedule { + + static constexpr uint16_t extension_id() { return 1; } + static constexpr bool enforce_unique() { return true; } + + producer_schedule_change_extension() = default; + producer_schedule_change_extension(const producer_schedule_change_extension&) = default; + producer_schedule_change_extension( producer_schedule_change_extension&& ) = default; + + producer_schedule_change_extension( const producer_authority_schedule& sched ) + :producer_authority_schedule(sched) {} }; - inline bool operator == ( const producer_schedule_type& a, const producer_schedule_type& b ) + inline bool operator == ( const producer_authority& pa, const shared_producer_authority& pb ) { - if( a.version != b.version ) return false; - if ( a.producers.size() != b.producers.size() ) return false; - for( uint32_t i = 0; i < a.producers.size(); ++i ) - if( a.producers[i] != b.producers[i] ) return false; + if(pa.producer_name != pb.producer_name) return false; + if(pa.authority.which() != pb.authority.which()) return false; + + bool authority_matches = pa.authority.visit([&pb]( const auto& lhs ){ + return pb.authority.visit( [&lhs](const auto& rhs ) { + if (lhs.threshold != rhs.threshold) return false; + return std::equal(lhs.keys.cbegin(), lhs.keys.cend(), rhs.keys.cbegin(), rhs.keys.cend()); + }); + }); + + if (!authority_matches) return false; return true; } - inline bool operator != ( const producer_schedule_type& a, const producer_schedule_type& b ) - { - return !(a==b); - } - } } /// eosio::chain -FC_REFLECT( eosio::chain::producer_key, (producer_name)(block_signing_key) ) -FC_REFLECT( eosio::chain::producer_schedule_type, (version)(producers) ) -FC_REFLECT( eosio::chain::shared_producer_schedule_type, (version)(producers) ) +FC_REFLECT( eosio::chain::legacy::producer_key, (producer_name)(block_signing_key) ) +FC_REFLECT( eosio::chain::legacy::producer_schedule_type, (version)(producers) ) +FC_REFLECT( eosio::chain::block_signing_authority_v0, (threshold)(keys)) +FC_REFLECT( eosio::chain::producer_authority, (producer_name)(authority) ) +FC_REFLECT( eosio::chain::producer_authority_schedule, (version)(producers) ) +FC_REFLECT_DERIVED( eosio::chain::producer_schedule_change_extension, (eosio::chain::producer_authority_schedule), ) + +FC_REFLECT( eosio::chain::shared_block_signing_authority_v0, (threshold)(keys)) +FC_REFLECT( eosio::chain::shared_producer_authority, (producer_name)(authority) ) +FC_REFLECT( eosio::chain::shared_producer_authority_schedule, (version)(producers) ) + diff --git a/libraries/chain/include/eosio/chain/protocol.hpp b/libraries/chain/include/eosio/chain/protocol.hpp index 389e9c08723..ab03127c12b 100644 --- a/libraries/chain/include/eosio/chain/protocol.hpp +++ b/libraries/chain/include/eosio/chain/protocol.hpp @@ -1,6 +1,2 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp b/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp index 03ab31be131..8201b76115e 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index c2f0140433f..81d22c15b0e 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include @@ -24,7 +20,9 @@ enum class builtin_protocol_feature_t : uint32_t { only_bill_first_authorizer, forward_setcode, get_sender, - ram_restrictions + ram_restrictions, + webauthn_key, + wtmsig_block_signatures, }; struct protocol_feature_subjective_restrictions { diff --git a/libraries/chain/include/eosio/chain/protocol_state_object.hpp b/libraries/chain/include/eosio/chain/protocol_state_object.hpp index dfbb4373a8a..dc0eabd1f1b 100644 --- a/libraries/chain/include/eosio/chain/protocol_state_object.hpp +++ b/libraries/chain/include/eosio/chain/protocol_state_object.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/libraries/chain/include/eosio/chain/reversible_block_object.hpp b/libraries/chain/include/eosio/chain/reversible_block_object.hpp index c493a4915e2..690d3c1953c 100644 --- a/libraries/chain/include/eosio/chain/reversible_block_object.hpp +++ b/libraries/chain/include/eosio/chain/reversible_block_object.hpp @@ -1,8 +1,3 @@ - -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include diff --git a/libraries/chain/include/eosio/chain/snapshot.hpp b/libraries/chain/include/eosio/chain/snapshot.hpp index 499fbe29960..69f89987912 100644 --- a/libraries/chain/include/eosio/chain/snapshot.hpp +++ b/libraries/chain/include/eosio/chain/snapshot.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include @@ -283,6 +279,8 @@ namespace eosio { namespace chain { virtual void validate() const = 0; + virtual void return_to_header() = 0; + virtual ~snapshot_reader(){}; protected: @@ -320,6 +318,7 @@ namespace eosio { namespace chain { bool read_row( detail::abstract_snapshot_row_reader& row_reader ) override; bool empty ( ) override; void clear_section() override; + void return_to_header() override; private: const fc::variant& snapshot; @@ -356,6 +355,7 @@ namespace eosio { namespace chain { bool read_row( detail::abstract_snapshot_row_reader& row_reader ) override; bool empty ( ) override; void clear_section() override; + void return_to_header() override; private: bool validate_section() const; diff --git a/libraries/chain/include/eosio/chain/symbol.hpp b/libraries/chain/include/eosio/chain/symbol.hpp index de4a37514a6..1faa3038921 100644 --- a/libraries/chain/include/eosio/chain/symbol.hpp +++ b/libraries/chain/include/eosio/chain/symbol.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include diff --git a/libraries/chain/include/eosio/chain/thread_utils.hpp b/libraries/chain/include/eosio/chain/thread_utils.hpp index b3aea3085f5..3ec462d5402 100644 --- a/libraries/chain/include/eosio/chain/thread_utils.hpp +++ b/libraries/chain/include/eosio/chain/thread_utils.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ #pragma once #include diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 0db1be762ff..a7a069bde38 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/libraries/chain/include/eosio/chain/transaction.hpp b/libraries/chain/include/eosio/chain/transaction.hpp index 8f103eb6601..a9d523a9e0a 100644 --- a/libraries/chain/include/eosio/chain/transaction.hpp +++ b/libraries/chain/include/eosio/chain/transaction.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include @@ -31,7 +27,7 @@ namespace eosio { namespace chain { namespace detail { template struct transaction_extension_types { - using transaction_extensions_t = fc::static_variant< Ts... >; + using transaction_extension_t = fc::static_variant< Ts... >; using decompose_t = decompose< Ts... >; }; } @@ -40,7 +36,7 @@ namespace eosio { namespace chain { deferred_transaction_generation_context >; - using transaction_extensions = transaction_extension_types::transaction_extensions_t; + using transaction_extension = transaction_extension_types::transaction_extension_t; /** * The transaction header contains the fixed-sized data @@ -99,7 +95,7 @@ namespace eosio { namespace chain { bool allow_duplicate_keys = false) const; uint32_t total_actions()const { return context_free_actions.size() + actions.size(); } - + account_name first_authorizer()const { for( const auto& a : actions ) { for( const auto& u : a.authorization ) @@ -108,7 +104,7 @@ namespace eosio { namespace chain { return account_name(); } - vector validate_and_extract_extensions()const; + flat_multimap validate_and_extract_extensions()const; }; struct signed_transaction : public transaction @@ -138,7 +134,7 @@ namespace eosio { namespace chain { }; struct packed_transaction : fc::reflect_init { - enum compression_type { + enum class compression_type { none = 0, zlib = 1, }; @@ -149,15 +145,15 @@ namespace eosio { namespace chain { packed_transaction& operator=(const packed_transaction&) = delete; packed_transaction& operator=(packed_transaction&&) = default; - explicit packed_transaction(const signed_transaction& t, compression_type _compression = none) - :signatures(t.signatures), compression(_compression), unpacked_trx(t) + explicit packed_transaction(const signed_transaction& t, compression_type _compression = compression_type::none) + :signatures(t.signatures), compression(_compression), unpacked_trx(t), trx_id(unpacked_trx.id()) { local_pack_transaction(); local_pack_context_free_data(); } - explicit packed_transaction(signed_transaction&& t, compression_type _compression = none) - :signatures(t.signatures), compression(_compression), unpacked_trx(std::move(t)) + explicit packed_transaction(signed_transaction&& t, compression_type _compression = compression_type::none) + :signatures(t.signatures), compression(_compression), unpacked_trx(std::move(t)), trx_id(unpacked_trx.id()) { local_pack_transaction(); local_pack_context_free_data(); @@ -173,7 +169,7 @@ namespace eosio { namespace chain { digest_type packed_digest()const; - transaction_id_type id()const { return unpacked_trx.id(); } + const transaction_id_type& id()const { return trx_id; } bytes get_raw_transaction()const; time_point_sec expiration()const { return unpacked_trx.expiration; } @@ -204,6 +200,7 @@ namespace eosio { namespace chain { private: // cache unpacked trx, for thread safety do not modify after construction signed_transaction unpacked_trx; + transaction_id_type trx_id; }; using packed_transaction_ptr = std::shared_ptr; diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index 1c262ccfe1a..01cdb7d732f 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -1,21 +1,32 @@ #pragma once #include #include +#include #include namespace eosio { namespace chain { - struct deadline_timer { - deadline_timer(); - ~deadline_timer(); + struct transaction_checktime_timer { + public: + transaction_checktime_timer() = delete; + transaction_checktime_timer(const transaction_checktime_timer&) = delete; + transaction_checktime_timer(transaction_checktime_timer&&) = default; + ~transaction_checktime_timer(); void start(fc::time_point tp); void stop(); - static volatile sig_atomic_t expired; + /* Sets a callback for when timer expires. Be aware this could might fire from a signal handling context and/or + on any particular thread. Only a single callback can be registered at once; trying to register more will + result in an exception. Use nullptr to disable a previously set callback. */ + void set_expiration_callback(void(*func)(void*), void* user); + + std::atomic_bool& expired; private: - static void timer_expired(int); - static bool initialized; + platform_timer& _timer; + + transaction_checktime_timer(platform_timer& timer); + friend controller_impl; }; class transaction_context { @@ -27,6 +38,7 @@ namespace eosio { namespace chain { transaction_context( controller& c, const signed_transaction& t, const transaction_id_type& trx_id, + transaction_checktime_timer&& timer, fc::time_point start = fc::time_point::now() ); void init_for_implicit_trx( uint64_t initial_net_usage = 0 ); @@ -118,6 +130,8 @@ namespace eosio { namespace chain { int64_t billed_cpu_time_us = 0; bool explicit_billed_cpu_time = false; + transaction_checktime_timer transaction_timer; + private: bool is_initialized = false; @@ -138,8 +152,6 @@ namespace eosio { namespace chain { fc::time_point pseudo_start; fc::microseconds billed_time; fc::microseconds billing_timer_duration_limit; - - deadline_timer _deadline_timer; }; } } diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp index bba816ab651..945dd9af5e7 100644 --- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp +++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include @@ -16,9 +12,7 @@ namespace eosio { namespace chain { class transaction_metadata; using transaction_metadata_ptr = std::shared_ptr; -using signing_keys_future_value_type = std::tuple>; -using signing_keys_future_type = std::shared_future; -using recovery_keys_type = std::pair&>; +using recover_keys_future = std::future; /** * This data structure should store context-free cached data about a transaction such as @@ -26,13 +20,42 @@ using recovery_keys_type = std::pair _recovered_pub_keys; + + public: + const bool implicit; + const bool scheduled; + bool accepted = false; // not thread safe + + private: + struct private_type{}; + + static void check_variable_sig_size(const packed_transaction_ptr& trx, uint32_t max) { + for(const signature_type& sig : trx->get_signed_transaction().signatures) + EOS_ASSERT(sig.variable_size() <= max, sig_variable_size_limit_exception, + "signature variable length component size (${s}) greater than subjective maximum (${m})", ("s", sig.variable_size())("m", max)); + } + + public: + // creation of tranaction_metadata restricted to start_recover_keys and create_no_recover_keys below, public for make_shared + explicit transaction_metadata( const private_type& pt, packed_transaction_ptr ptrx, + fc::microseconds sig_cpu_usage, flat_set recovered_pub_keys, + bool _implicit = false, bool _scheduled = false) + : _packed_trx( std::move( ptrx ) ) + , _sig_cpu_usage( sig_cpu_usage ) + , _recovered_pub_keys( std::move( recovered_pub_keys ) ) + , implicit( _implicit ) + , scheduled( _scheduled ) { + } transaction_metadata() = delete; transaction_metadata(const transaction_metadata&) = delete; @@ -40,25 +63,27 @@ class transaction_metadata { transaction_metadata operator=(transaction_metadata&) = delete; transaction_metadata operator=(transaction_metadata&&) = delete; - explicit transaction_metadata( const signed_transaction& t, packed_transaction::compression_type c = packed_transaction::none ) - :id(t.id()), packed_trx(std::make_shared(t, c)) { - //raw_packed = fc::raw::pack( static_cast(trx) ); - signed_id = digest_type::hash(*packed_trx); - } - explicit transaction_metadata( const packed_transaction_ptr& ptrx ) - :id(ptrx->id()), packed_trx(ptrx) { - //raw_packed = fc::raw::pack( static_cast(trx) ); - signed_id = digest_type::hash(*packed_trx); - } + const packed_transaction_ptr& packed_trx()const { return _packed_trx; } + const transaction_id_type& id()const { return _packed_trx->id(); } + fc::microseconds signature_cpu_usage()const { return _sig_cpu_usage; } + const flat_set& recovered_keys()const { return _recovered_pub_keys; } + + /// Thread safe. + /// @returns transaction_metadata_ptr or exception via future + static recover_keys_future + start_recover_keys( packed_transaction_ptr trx, boost::asio::io_context& thread_pool, + const chain_id_type& chain_id, fc::microseconds time_limit, + uint32_t max_variable_sig_size = UINT32_MAX ); - // must be called from main application thread - static signing_keys_future_type - start_recover_keys( const transaction_metadata_ptr& mtrx, boost::asio::io_context& thread_pool, - const chain_id_type& chain_id, fc::microseconds time_limit ); + /// @returns constructed transaction_metadata with no key recovery (sig_cpu_usage=0, recovered_pub_keys=empty) + static transaction_metadata_ptr + create_no_recover_keys( const packed_transaction& trx, trx_type t ) { + return std::make_shared( private_type(), + std::make_shared( trx ), fc::microseconds(), flat_set(), + t == trx_type::implicit, t == trx_type::scheduled ); + } - // start_recover_keys must be called first - recovery_keys_type recover_keys( const chain_id_type& chain_id ); }; } } // eosio::chain diff --git a/libraries/chain/include/eosio/chain/transaction_object.hpp b/libraries/chain/include/eosio/chain/transaction_object.hpp index cf87e11b5e9..d43b449be48 100644 --- a/libraries/chain/include/eosio/chain/transaction_object.hpp +++ b/libraries/chain/include/eosio/chain/transaction_object.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index d6323fbd23d..6f259f73b02 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include @@ -77,6 +73,7 @@ namespace eosio { namespace chain { using fc::time_point; using fc::safe; using fc::flat_map; + using fc::flat_multimap; using fc::flat_set; using fc::static_variant; using fc::ecc::range_proof_type; @@ -192,6 +189,7 @@ namespace eosio { namespace chain { protocol_state_object_type, account_ram_correction_object_type, code_object_type, + database_header_object_type, OBJECT_TYPE_COUNT ///< Sentry value which contains the number of different object types }; @@ -273,6 +271,18 @@ namespace eosio { namespace chain { */ typedef vector>> extensions_type; + /** + * emplace an extension into the extensions type such that it is properly ordered by extension id + * this assumes exts is already sorted by extension id + */ + inline auto emplace_extension( extensions_type& exts, uint16_t eid, vector&& data) { + auto insert_itr = std::upper_bound(exts.begin(), exts.end(), eid, [](uint16_t id, const auto& ext){ + return id < ext.first; + }); + + return exts.emplace(insert_itr, eid, std::move(data)); + } + template class end_insert_iterator : public std::iterator< std::output_iterator_tag, void, void, void, void > @@ -351,6 +361,14 @@ namespace eosio { namespace chain { return tail_t::template extract( id, data, result ); } }; + + template + struct is_any_of { + static constexpr bool value = std::disjunction_v...>; + }; + + template + constexpr bool is_any_of_v = is_any_of::value; } template @@ -372,6 +390,9 @@ namespace eosio { namespace chain { return ( flags & ~static_cast(field) ); } + template struct overloaded : Ts... { using Ts::operator()...; }; + template overloaded(Ts...) -> overloaded; + } } // eosio::chain -FC_REFLECT( eosio::chain::void_t, ) +FC_REFLECT_EMPTY( eosio::chain::void_t ) diff --git a/libraries/chain/include/eosio/chain/unapplied_transaction_queue.hpp b/libraries/chain/include/eosio/chain/unapplied_transaction_queue.hpp new file mode 100644 index 00000000000..f32498dffc4 --- /dev/null +++ b/libraries/chain/include/eosio/chain/unapplied_transaction_queue.hpp @@ -0,0 +1,184 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +namespace fc { + inline std::size_t hash_value( const fc::sha256& v ) { + return v._hash[3]; + } +} + +namespace eosio { namespace chain { + +using namespace boost::multi_index; + +enum class trx_enum_type { + unknown = 0, + persisted = 1, + forked = 2, + aborted = 3 +}; + +struct unapplied_transaction { + const transaction_metadata_ptr trx_meta; + const fc::time_point expiry; + trx_enum_type trx_type = trx_enum_type::unknown; + + const transaction_id_type& id()const { return trx_meta->id(); } + + unapplied_transaction(const unapplied_transaction&) = delete; + unapplied_transaction() = delete; + unapplied_transaction& operator=(const unapplied_transaction&) = delete; + unapplied_transaction(unapplied_transaction&&) = default; +}; + +/** + * Track unapplied transactions for persisted, forked blocks, and aborted blocks. + * Persisted are first so that they can be applied in each block until expired. + */ +class unapplied_transaction_queue { +public: + enum class process_mode { + non_speculative, // HEAD, READ_ONLY, IRREVERSIBLE + speculative_non_producer, // will never produce + speculative_producer // can produce + }; + +private: + struct by_trx_id; + struct by_type; + struct by_expiry; + + typedef multi_index_container< unapplied_transaction, + indexed_by< + hashed_unique< tag, + const_mem_fun + >, + ordered_non_unique< tag, member >, + ordered_non_unique< tag, member > + > + > unapplied_trx_queue_type; + + unapplied_trx_queue_type queue; + process_mode mode = process_mode::speculative_producer; + +public: + + void set_mode( process_mode new_mode ) { + if( new_mode != mode ) { + FC_ASSERT( empty(), "set_mode, queue required to be empty" ); + } + mode = new_mode; + } + + bool empty() const { + return queue.empty(); + } + + size_t size() const { + return queue.size(); + } + + void clear() { + queue.clear(); + } + + bool contains_persisted()const { + return queue.get().find( trx_enum_type::persisted ) != queue.get().end(); + } + + bool is_persisted(const transaction_metadata_ptr& trx)const { + auto itr = queue.get().find( trx->id() ); + if( itr == queue.get().end() ) return false; + return itr->trx_type == trx_enum_type::persisted; + } + + transaction_metadata_ptr get_trx( const transaction_id_type& id ) const { + auto itr = queue.get().find( id ); + if( itr == queue.get().end() ) return {}; + return itr->trx_meta; + } + + template + bool clear_expired( const time_point& pending_block_time, const time_point& deadline, Func&& callback ) { + auto& persisted_by_expiry = queue.get(); + while(!persisted_by_expiry.empty() && persisted_by_expiry.begin()->expiry <= pending_block_time) { + if (deadline <= fc::time_point::now()) { + return false; + } + callback( persisted_by_expiry.begin()->id(), persisted_by_expiry.begin()->trx_type ); + persisted_by_expiry.erase( persisted_by_expiry.begin() ); + } + return true; + } + + void clear_applied( const block_state_ptr& bs ) { + if( empty() ) return; + auto& idx = queue.get(); + for( const auto& receipt : bs->block->transactions ) { + if( receipt.trx.contains() ) { + const auto& pt = receipt.trx.get(); + auto itr = queue.get().find( pt.id() ); + if( itr != queue.get().end() ) { + if( itr->trx_type != trx_enum_type::persisted ) { + idx.erase( pt.id() ); + } + } + } + } + } + + void add_forked( const branch_type& forked_branch ) { + if( mode == process_mode::non_speculative || mode == process_mode::speculative_non_producer ) return; + // forked_branch is in reverse order + for( auto ritr = forked_branch.rbegin(), rend = forked_branch.rend(); ritr != rend; ++ritr ) { + const block_state_ptr& bsptr = *ritr; + for( auto itr = bsptr->trxs_metas().begin(), end = bsptr->trxs_metas().end(); itr != end; ++itr ) { + const auto& trx = *itr; + fc::time_point expiry = trx->packed_trx()->expiration(); + queue.insert( { trx, expiry, trx_enum_type::forked } ); + } + } + } + + void add_aborted( std::vector aborted_trxs ) { + if( mode == process_mode::non_speculative || mode == process_mode::speculative_non_producer ) return; + for( auto& trx : aborted_trxs ) { + fc::time_point expiry = trx->packed_trx()->expiration(); + queue.insert( { std::move( trx ), expiry, trx_enum_type::aborted } ); + } + } + + void add_persisted( const transaction_metadata_ptr& trx ) { + if( mode == process_mode::non_speculative ) return; + auto itr = queue.get().find( trx->id() ); + if( itr == queue.get().end() ) { + fc::time_point expiry = trx->packed_trx()->expiration(); + queue.insert( { trx, expiry, trx_enum_type::persisted } ); + } else if( itr->trx_type != trx_enum_type::persisted ) { + queue.get().modify( itr, [](auto& un){ + un.trx_type = trx_enum_type::persisted; + } ); + } + } + + using iterator = unapplied_trx_queue_type::index::type::iterator; + + iterator begin() { return queue.get().begin(); } + iterator end() { return queue.get().end(); } + + iterator persisted_begin() { return queue.get().lower_bound( trx_enum_type::persisted ); } + iterator persisted_end() { return queue.get().upper_bound( trx_enum_type::persisted ); } + + iterator erase( iterator itr ) { return queue.get().erase( itr ); } + +}; + +} } //eosio::chain diff --git a/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp b/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp index 65cfa7b552c..71ab4ab677f 100644 --- a/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp +++ b/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp @@ -677,9 +677,6 @@ namespace eosio { namespace chain { namespace wasm_injections { }; struct pre_op_injectors : wasm_ops::op_types { - using call_t = wasm_ops::call ; - using call_indirect_t = wasm_ops::call_indirect ; - // float binops using f32_add_t = wasm_ops::f32_add >; using f32_sub_t = wasm_ops::f32_sub >; @@ -752,10 +749,17 @@ namespace eosio { namespace chain { namespace wasm_injections { using f64_convert_u_i64 = wasm_ops::f64_convert_u_i64 >; }; // pre_op_injectors + struct pre_op_full_injectors : pre_op_injectors { + using call_t = wasm_ops::call ; + using call_indirect_t = wasm_ops::call_indirect ; + }; struct post_op_injectors : wasm_ops::op_types { + using call_t = wasm_ops::call ; + }; + + struct post_op_full_injectors : post_op_injectors { using loop_t = wasm_ops::loop ; - using call_t = wasm_ops::call ; using grow_memory_t = wasm_ops::grow_memory ; }; @@ -774,7 +778,9 @@ namespace eosio { namespace chain { namespace wasm_injections { } }; - // inherit from this class and define your own injectors + // "full injection" gives checktime & depth counting along with softfloat injection. + // Otherwise you'll just get softfloat injection + template class wasm_binary_injection { using standard_module_injectors = module_injectors< max_memory_injection_visitor >; @@ -790,10 +796,11 @@ namespace eosio { namespace chain { namespace wasm_injections { void inject() { _module_injectors.inject( *_module ); // inject checktime first - injector_utils::add_import( *_module, u8"checktime", checktime_injection::chktm_idx ); + if constexpr (full_injection) + injector_utils::add_import( *_module, u8"checktime", checktime_injection::chktm_idx ); for ( auto& fd : _module->functions.defs ) { - wasm_ops::EOSIO_OperatorDecoderStream pre_decoder(fd.code); + wasm_ops::EOSIO_OperatorDecoderStream> pre_decoder(fd.code); wasm_ops::instruction_stream pre_code(fd.code.size()*2); while ( pre_decoder ) { @@ -811,12 +818,14 @@ namespace eosio { namespace chain { namespace wasm_injections { fd.code = pre_code.get(); } for ( auto& fd : _module->functions.defs ) { - wasm_ops::EOSIO_OperatorDecoderStream post_decoder(fd.code); + wasm_ops::EOSIO_OperatorDecoderStream> post_decoder(fd.code); wasm_ops::instruction_stream post_code(fd.code.size()*2); - wasm_ops::op_types<>::call_t chktm; - chktm.field = injector_utils::injected_index_mapping.find(checktime_injection::chktm_idx)->second; - chktm.pack(&post_code); + if constexpr (full_injection) { + wasm_ops::op_types<>::call_t chktm; + chktm.field = injector_utils::injected_index_mapping.find(checktime_injection::chktm_idx)->second; + chktm.pack(&post_code); + } while ( post_decoder ) { auto op = post_decoder.decodeOp(); @@ -835,8 +844,7 @@ namespace eosio { namespace chain { namespace wasm_injections { } private: IR::Module* _module; - static std::string op_string; - static standard_module_injectors _module_injectors; + standard_module_injectors _module_injectors; }; }}} // namespace wasm_constraints, chain, eosio diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 341331989e5..82cbe001108 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -3,6 +3,9 @@ #include #include #include +#if defined(EOSIO_EOS_VM_RUNTIME_ENABLED) || defined(EOSIO_EOS_VM_JIT_RUNTIME_ENABLED) +#include +#endif #include "Runtime/Linker.h" #include "Runtime/Runtime.h" @@ -11,6 +14,7 @@ namespace eosio { namespace chain { class apply_context; class wasm_runtime_interface; class controller; + namespace eosvmoc { struct config; } struct wasm_exit { int32_t code = 0; @@ -72,11 +76,13 @@ namespace eosio { namespace chain { class wasm_interface { public: enum class vm_type { - wavm, - wabt + wabt, + eos_vm, + eos_vm_jit, + eos_vm_oc }; - wasm_interface(vm_type vm, const chainbase::database& db); + wasm_interface(vm_type vm, bool eosvmoc_tierup, const chainbase::database& d, const boost::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config); ~wasm_interface(); //call before dtor to skip what can be minutes of dtor overhead with some runtimes; can cause leaks @@ -108,4 +114,4 @@ namespace eosio{ namespace chain { std::istream& operator>>(std::istream& in, wasm_interface::vm_type& runtime); }} -FC_REFLECT_ENUM( eosio::chain::wasm_interface::vm_type, (wavm)(wabt) ) +FC_REFLECT_ENUM( eosio::chain::wasm_interface::vm_type, (wabt)(eos_vm)(eos_vm_jit)(eos_vm_oc) ) diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index 45f70460b02..f891602bed4 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -3,6 +3,12 @@ #include #include #include +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED +#include +#else +#define _REGISTER_EOSVMOC_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) +#endif +#include #include #include #include @@ -16,14 +22,21 @@ #include "WAST/WAST.h" #include "IR/Validate.h" +#if defined(EOSIO_EOS_VM_RUNTIME_ENABLED) || defined(EOSIO_EOS_VM_JIT_RUNTIME_ENABLED) +#include +#endif + using namespace fc; using namespace eosio::chain::webassembly; using namespace IR; using namespace Runtime; + using boost::multi_index_container; namespace eosio { namespace chain { + namespace eosvmoc { struct config; } + struct wasm_interface_impl { struct wasm_cache_entry { digest_type code_hash; @@ -37,13 +50,39 @@ namespace eosio { namespace chain { struct by_first_block_num; struct by_last_block_num; - wasm_interface_impl(wasm_interface::vm_type vm, const chainbase::database& d) : db(d) { - if(vm == wasm_interface::vm_type::wavm) - runtime_interface = std::make_unique(); - else if(vm == wasm_interface::vm_type::wabt) +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + struct eosvmoc_tier { + eosvmoc_tier(const boost::filesystem::path& d, const eosvmoc::config& c, const chainbase::database& db) : cc(d, c, db), exec(cc) {} + eosvmoc::code_cache_async cc; + eosvmoc::executor exec; + eosvmoc::memory mem; + }; +#endif + + wasm_interface_impl(wasm_interface::vm_type vm, bool eosvmoc_tierup, const chainbase::database& d, const boost::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config) : db(d), wasm_runtime_time(vm) { + if(vm == wasm_interface::vm_type::wabt) runtime_interface = std::make_unique(); - else - EOS_THROW(wasm_exception, "wasm_interface_impl fall through"); +#ifdef EOSIO_EOS_VM_RUNTIME_ENABLED + if(vm == wasm_interface::vm_type::eos_vm) + runtime_interface = std::make_unique>(); +#endif +#ifdef EOSIO_EOS_VM_JIT_RUNTIME_ENABLED + if(vm == wasm_interface::vm_type::eos_vm_jit) + runtime_interface = std::make_unique>(); +#endif +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + if(vm == wasm_interface::vm_type::eos_vm_oc) + runtime_interface = std::make_unique(data_dir, eosvmoc_config, d); +#endif + if(!runtime_interface) + EOS_THROW(wasm_exception, "${r} wasm runtime not supported on this platform and/or configuration", ("r", vm)); + +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + if(eosvmoc_tierup) { + EOS_ASSERT(vm != wasm_interface::vm_type::eos_vm_oc, wasm_exception, "You can't use EOS VM OC as the base runtime when tier up is activated"); + eosvmoc.emplace(data_dir, eosvmoc_config, d); + } +#endif } ~wasm_interface_impl() { @@ -82,7 +121,13 @@ namespace eosio { namespace chain { void current_lib(uint32_t lib) { //anything last used before or on the LIB can be evicted - wasm_instantiation_cache.get().erase(wasm_instantiation_cache.get().begin(), wasm_instantiation_cache.get().upper_bound(lib)); + const auto first_it = wasm_instantiation_cache.get().begin(); + const auto last_it = wasm_instantiation_cache.get().upper_bound(lib); +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + if(eosvmoc) for(auto it = first_it; it != last_it; it++) + eosvmoc->cc.free_code(it->code_hash, it->vm_version); +#endif + wasm_instantiation_cache.get().erase(first_it, last_it); } const std::unique_ptr& get_instantiated_module( const digest_type& code_hash, const uint8_t& vm_type, @@ -113,32 +158,35 @@ namespace eosio { namespace chain { }); trx_context.pause_billing_timer(); IR::Module module; + std::vector bytes = { + (const U8*)codeobject->code.data(), + (const U8*)codeobject->code.data() + codeobject->code.size()}; try { - Serialization::MemoryInputStream stream((const U8*)codeobject->code.data(), codeobject->code.size()); + Serialization::MemoryInputStream stream((const U8*)bytes.data(), + bytes.size()); WASM::serialize(stream, module); module.userSections.clear(); - } catch(const Serialization::FatalSerializationException& e) { + } catch (const Serialization::FatalSerializationException& e) { EOS_ASSERT(false, wasm_serialization_error, e.message.c_str()); - } catch(const IR::ValidationException& e) { + } catch (const IR::ValidationException& e) { EOS_ASSERT(false, wasm_serialization_error, e.message.c_str()); } - - wasm_injections::wasm_binary_injection injector(module); - injector.inject(); - - std::vector bytes; - try { - Serialization::ArrayOutputStream outstream; - WASM::serialize(outstream, module); - bytes = outstream.getBytes(); - } catch(const Serialization::FatalSerializationException& e) { - EOS_ASSERT(false, wasm_serialization_error, e.message.c_str()); - } catch(const IR::ValidationException& e) { - EOS_ASSERT(false, wasm_serialization_error, e.message.c_str()); + if (runtime_interface->inject_module(module)) { + try { + Serialization::ArrayOutputStream outstream; + WASM::serialize(outstream, module); + bytes = outstream.getBytes(); + } catch (const Serialization::FatalSerializationException& e) { + EOS_ASSERT(false, wasm_serialization_error, + e.message.c_str()); + } catch (const IR::ValidationException& e) { + EOS_ASSERT(false, wasm_serialization_error, + e.message.c_str()); + } } wasm_instantiation_cache.modify(it, [&](auto& c) { - c.module = runtime_interface->instantiate_module((const char*)bytes.data(), bytes.size(), parse_initial_memory(module)); + c.module = runtime_interface->instantiate_module((const char*)bytes.data(), bytes.size(), parse_initial_memory(module), code_hash, vm_type, vm_version); }); } return it->module; @@ -164,11 +212,24 @@ namespace eosio { namespace chain { wasm_cache_index wasm_instantiation_cache; const chainbase::database& db; + const wasm_interface::vm_type wasm_runtime_time; + +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + fc::optional eosvmoc; +#endif }; +#define _ADD_PAREN_1(...) ((__VA_ARGS__)) _ADD_PAREN_2 +#define _ADD_PAREN_2(...) ((__VA_ARGS__)) _ADD_PAREN_1 +#define _ADD_PAREN_1_END +#define _ADD_PAREN_2_END +#define _WRAPPED_SEQ(SEQ) BOOST_PP_CAT(_ADD_PAREN_1 SEQ, _END) + #define _REGISTER_INTRINSIC_EXPLICIT(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ - _REGISTER_WAVM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ - _REGISTER_WABT_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) + _REGISTER_WAVM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) \ + _REGISTER_WABT_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) \ + _REGISTER_EOS_VM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) \ + _REGISTER_EOSVMOC_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) #define _REGISTER_INTRINSIC4(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ _REGISTER_INTRINSIC_EXPLICIT(CLS, MOD, METHOD, WASM_SIG, NAME, SIG ) diff --git a/libraries/chain/include/eosio/chain/wast_to_wasm.hpp b/libraries/chain/include/eosio/chain/wast_to_wasm.hpp index 1493f2f99f7..eb478d7653a 100644 --- a/libraries/chain/include/eosio/chain/wast_to_wasm.hpp +++ b/libraries/chain/include/eosio/chain/wast_to_wasm.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include diff --git a/libraries/chain/include/eosio/chain/webassembly/common.hpp b/libraries/chain/include/eosio/chain/webassembly/common.hpp index 723bd3039b6..7f441be8649 100644 --- a/libraries/chain/include/eosio/chain/webassembly/common.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/common.hpp @@ -7,7 +7,7 @@ using namespace fc; -namespace eosio { namespace chain { +namespace eosio { namespace chain { class apply_context; class transaction_context; @@ -23,7 +23,7 @@ namespace eosio { namespace chain { return T(ctx); } }; - + template<> struct class_from_wasm { /** @@ -68,13 +68,12 @@ namespace eosio { namespace chain { return value; } - template - operator U *() const { - return static_cast(value); + operator T *() const { + return value; } T *value; - }; + }; /** * class to represent an in-wasm-memory char array that must be null terminated @@ -90,9 +89,8 @@ namespace eosio { namespace chain { return value; } - template - operator U *() const { - return static_cast(value); + operator char *() const { + return value; } char *value; diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc.hpp new file mode 100644 index 00000000000..000b8cef33c --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc.hpp @@ -0,0 +1,799 @@ +#pragma once + +#include +#include +#include +#include +#include +#include "IR/Types.h" + +#include +#include +#include +#include +#include +#include + +#include + +namespace eosio { namespace chain { namespace webassembly { namespace eosvmoc { + +using namespace IR; +using namespace Runtime; +using namespace fc; +using namespace eosio::chain::webassembly::common; + +using namespace eosio::chain::eosvmoc; + +class eosvmoc_instantiated_module; + +class eosvmoc_runtime : public eosio::chain::wasm_runtime_interface { + public: + eosvmoc_runtime(const boost::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db); + ~eosvmoc_runtime(); + bool inject_module(IR::Module&) override { return false; } + std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory, + const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version) override; + + void immediately_exit_currently_running_module() override; + + friend eosvmoc_instantiated_module; + eosvmoc::code_cache_sync cc; + eosvmoc::executor exec; + eosvmoc::memory mem; +}; + +/** + * class to represent an in-wasm-memory array + * it is a hint to the transcriber that the next parameter will + * be a size (in Ts) and that the pair are validated together + * This triggers the template specialization of intrinsic_invoker_impl + * @tparam T + */ +template +inline array_ptr array_ptr_impl (size_t ptr, size_t length) +{ + constexpr int cb_full_linear_memory_start_segment_offset = OFFSET_OF_CONTROL_BLOCK_MEMBER(full_linear_memory_start); + constexpr int cb_first_invalid_memory_address_segment_offset = OFFSET_OF_CONTROL_BLOCK_MEMBER(first_invalid_memory_address); + + size_t end = ptr + length*sizeof(T); + + asm volatile("cmp %%gs:%c[firstInvalidMemory], %[End]\n" + "jle 1f\n" + "mov %%gs:%c[maximumEosioWasmMemory], %[Ptr]\n" //always invalid address + "1:\n" + "add %%gs:%c[linearMemoryStart], %[Ptr]\n" + : [Ptr] "+r" (ptr), + [End] "+r" (end) + : [linearMemoryStart] "i" (cb_full_linear_memory_start_segment_offset), + [firstInvalidMemory] "i" (cb_first_invalid_memory_address_segment_offset), + [maximumEosioWasmMemory] "i" (wasm_constraints::maximum_linear_memory) + : "cc" + ); + + + return array_ptr((T*)ptr); +} + +/** + * class to represent an in-wasm-memory char array that must be null terminated + */ +inline null_terminated_ptr null_terminated_ptr_impl(uint64_t ptr) +{ + constexpr int cb_full_linear_memory_start_segment_offset = OFFSET_OF_CONTROL_BLOCK_MEMBER(full_linear_memory_start); + constexpr int cb_first_invalid_memory_address_segment_offset = OFFSET_OF_CONTROL_BLOCK_MEMBER(first_invalid_memory_address); + + char dumpster; + uint64_t scratch; + + asm volatile("mov %%gs:(%[Ptr]), %[Dumpster]\n" //probe memory location at ptr to see if valid + "mov %%gs:%c[firstInvalidMemory], %[Scratch]\n" //get first invalid memory address + "cmpb $0, %%gs:-1(%[Scratch])\n" //is last byte in valid linear memory 0? + "je 2f\n" //if so, this will be a null terminated string one way or another + "mov %[Ptr],%[Scratch]\n" + "1:\n" //start loop looking for either 0, or until we SEGV + "inc %[Scratch]\n" + "cmpb $0,%%gs:(%[Scratch])\n" + "jne 1b\n" + "2:\n" + "add %%gs:%c[linearMemoryStart], %[Ptr]\n" //add address of linear memory 0 to ptr + : [Ptr] "+r" (ptr), + [Dumpster] "=r" (dumpster), + [Scratch] "=r" (scratch) + : [linearMemoryStart] "i" (cb_full_linear_memory_start_segment_offset), + [firstInvalidMemory] "i" (cb_first_invalid_memory_address_segment_offset) + : "cc" + ); + + return null_terminated_ptr((char*)ptr); +} + +template +struct void_ret_wrapper { + using type = T; +}; + +template<> +struct void_ret_wrapper { + using type = char; +}; + +template +using void_ret_wrapper_t = typename void_ret_wrapper::type; + +/** + * template that maps native types to WASM VM types + * @tparam T the native type + */ +template +struct native_to_wasm { + using type = void; +}; + +/** + * specialization for mapping pointers to int32's + */ +template +struct native_to_wasm { + using type = I32; +}; + +/** + * Mappings for native types + */ +template<> +struct native_to_wasm { + using type = F32; +}; +template<> +struct native_to_wasm { + using type = F64; +}; +template<> +struct native_to_wasm { + using type = I32; +}; +template<> +struct native_to_wasm { + using type = I32; +}; +template<> +struct native_to_wasm { + using type = I64; +}; +template<> +struct native_to_wasm { + using type = I64; +}; +template<> +struct native_to_wasm { + using type = I32; +}; +template<> +struct native_to_wasm { + using type = I64; +}; +template<> +struct native_to_wasm { + using type = I64; +}; +template<> +struct native_to_wasm { + using type = I32; +}; + +template<> +struct native_to_wasm { + using type = I32; +}; + +// convenience alias +template +using native_to_wasm_t = typename native_to_wasm::type; + +template +inline auto convert_native_to_wasm(T val) { + return native_to_wasm_t(val); +} + +inline auto convert_native_to_wasm(const name &val) { + return native_to_wasm_t(val.to_uint64_t()); +} + +inline auto convert_native_to_wasm(const fc::time_point_sec& val) { + return native_to_wasm_t(val.sec_since_epoch()); +} + +inline auto convert_native_to_wasm(char* ptr) { + constexpr int cb_full_linear_memory_start_offset = OFFSET_OF_CONTROL_BLOCK_MEMBER(full_linear_memory_start); + char* full_linear_memory_start; + asm("mov %%gs:%c[fullLinearMemOffset], %[fullLinearMem]\n" + : [fullLinearMem] "=r" (full_linear_memory_start) + : [fullLinearMemOffset] "i" (cb_full_linear_memory_start_offset) + ); + U64 delta = (U64)(ptr - full_linear_memory_start); + array_ptr_impl(delta, 1); + return (U32)delta; +} + +template +inline auto convert_wasm_to_native(native_to_wasm_t val) { + return T(val); +} + +template +struct wasm_to_value_type; + +template<> +struct wasm_to_value_type { + static constexpr auto value = ValueType::f32; +}; + +template<> +struct wasm_to_value_type { + static constexpr auto value = ValueType::f64; +}; +template<> +struct wasm_to_value_type { + static constexpr auto value = ValueType::i32; +}; +template<> +struct wasm_to_value_type { + static constexpr auto value = ValueType::i64; +}; + +template +constexpr auto wasm_to_value_type_v = wasm_to_value_type::value; + +template +struct wasm_to_rvalue_type; +template<> +struct wasm_to_rvalue_type { + static constexpr auto value = ResultType::f32; +}; +template<> +struct wasm_to_rvalue_type { + static constexpr auto value = ResultType::f64; +}; +template<> +struct wasm_to_rvalue_type { + static constexpr auto value = ResultType::i32; +}; +template<> +struct wasm_to_rvalue_type { + static constexpr auto value = ResultType::i64; +}; +template<> +struct wasm_to_rvalue_type { + static constexpr auto value = ResultType::none; +}; +template<> +struct wasm_to_rvalue_type { + static constexpr auto value = ResultType::i64; +}; +template<> +struct wasm_to_rvalue_type { + static constexpr auto value = ResultType::i64; +}; + +template<> +struct wasm_to_rvalue_type { + static constexpr auto value = ResultType::i32; +}; + +template<> +struct wasm_to_rvalue_type { + static constexpr auto value = ResultType::i32; +}; + + +template +constexpr auto wasm_to_rvalue_type_v = wasm_to_rvalue_type::value; + +template +struct is_reference_from_value { + static constexpr bool value = false; +}; + +template<> +struct is_reference_from_value { + static constexpr bool value = true; +}; + +template<> +struct is_reference_from_value { + static constexpr bool value = true; +}; + +template +constexpr bool is_reference_from_value_v = is_reference_from_value::value; + + + +struct void_type { +}; + +/** + * Forward declaration of provider for FunctionType given a desired C ABI signature + */ +template +struct wasm_function_type_provider; + +/** + * specialization to destructure return and arguments + */ +template +struct wasm_function_type_provider { + static const FunctionType *type() { + return FunctionType::get(wasm_to_rvalue_type_v, {wasm_to_value_type_v ...}); + } +}; + +/** + * Forward declaration of the invoker type which transcribes arguments to/from a native method + * and injects the appropriate checks + * + * @tparam Ret - the return type of the native function + * @tparam NativeParameters - a std::tuple of the remaining native parameters to transcribe + * @tparam WasmParameters - a std::tuple of the transribed parameters + */ +template +struct intrinsic_invoker_impl; + +/** + * Specialization for the fully transcribed signature + * @tparam Ret - the return type of the native function + * @tparam Translated - the arguments to the wasm function + */ +template +struct intrinsic_invoker_impl, std::tuple> { + using next_method_type = Ret (*)(Translated...); + + template + static native_to_wasm_t invoke(Translated... translated) { + try { + if constexpr(!is_injected) { + constexpr int cb_current_call_depth_remaining_segment_offset = OFFSET_OF_CONTROL_BLOCK_MEMBER(current_call_depth_remaining); + constexpr int depth_assertion_intrinsic_offset = OFFSET_OF_FIRST_INTRINSIC - (int)boost::hana::index_if(intrinsic_table, ::boost::hana::equal.to(BOOST_HANA_STRING("eosvmoc_internal.depth_assert"))).value()*8; + asm volatile("cmpl $1,%%gs:%c[callDepthRemainOffset]\n" + "jne 1f\n" + "callq *%%gs:%c[depthAssertionIntrinsicOffset]\n" + "1:\n" + : + : [callDepthRemainOffset] "i" (cb_current_call_depth_remaining_segment_offset), + [depthAssertionIntrinsicOffset] "i" (depth_assertion_intrinsic_offset) + : "cc"); + } + return convert_native_to_wasm(Method(translated...)); + } + catch(...) { + *reinterpret_cast(eos_vm_oc_get_exception_ptr()) = std::current_exception(); + } + siglongjmp(*eos_vm_oc_get_jmp_buf(), EOSVMOC_EXIT_EXCEPTION); + __builtin_unreachable(); + } + + template + static const auto fn() { + return invoke; + } +}; + +/** + * specialization of the fully transcribed signature for void return values + * @tparam Translated - the arguments to the wasm function + */ +template +struct intrinsic_invoker_impl, std::tuple> { + using next_method_type = void_type (*)(Translated...); + + template + static void invoke(Translated... translated) { + try { + if constexpr(!is_injected) { + constexpr int cb_current_call_depth_remaining_segment_offset = OFFSET_OF_CONTROL_BLOCK_MEMBER(current_call_depth_remaining); + constexpr int depth_assertion_intrinsic_offset = OFFSET_OF_FIRST_INTRINSIC - (int)boost::hana::index_if(intrinsic_table, ::boost::hana::equal.to(BOOST_HANA_STRING("eosvmoc_internal.depth_assert"))).value()*8; + asm volatile("cmpl $1,%%gs:%c[callDepthRemainOffset]\n" + "jne 1f\n" + "callq *%%gs:%c[depthAssertionIntrinsicOffset]\n" + "1:\n" + : + : [callDepthRemainOffset] "i" (cb_current_call_depth_remaining_segment_offset), + [depthAssertionIntrinsicOffset] "i" (depth_assertion_intrinsic_offset) + : "cc"); + } + Method(translated...); + return; + } + catch(...) { + *reinterpret_cast(eos_vm_oc_get_exception_ptr()) = std::current_exception(); + } + siglongjmp(*eos_vm_oc_get_jmp_buf(), EOSVMOC_EXIT_EXCEPTION); + __builtin_unreachable(); + } + + template + static const auto fn() { + return invoke; + } +}; + +/** + * Sepcialization for transcribing a simple type in the native method signature + * @tparam Ret - the return type of the native method + * @tparam Input - the type of the native parameter to transcribe + * @tparam Inputs - the remaining native parameters to transcribe + * @tparam Translated - the list of transcribed wasm parameters + */ +template +struct intrinsic_invoker_impl, std::tuple> { + using translated_type = native_to_wasm_t; + using next_step = intrinsic_invoker_impl, std::tuple>; + using then_type = Ret (*)(Input, Inputs..., Translated...); + + template + static Ret translate_one(Inputs... rest, Translated... translated, translated_type last) { + auto native = convert_wasm_to_native(last); + return Then(native, rest..., translated...); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a array_ptr type in the native method signature + * This type transcribes into 2 wasm parameters: a pointer and byte length and checks the validity of that memory + * range before dispatching to the native method + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + * @tparam Translated - the list of transcribed wasm parameters + */ +template +struct intrinsic_invoker_impl, uint32_t, Inputs...>, std::tuple> { + using next_step = intrinsic_invoker_impl, std::tuple>; + using then_type = Ret(*)(array_ptr, uint32_t, Inputs..., Translated...); + + template + static auto translate_one(Inputs... rest, Translated... translated, I32 ptr, I32 size) -> std::enable_if_t::value, Ret> { + static_assert(!std::is_pointer::value, "Currently don't support array of pointers"); + const auto length = size_t((U32)size); + T* base = array_ptr_impl((U32)ptr, length); + if ( reinterpret_cast(base) % alignof(T) != 0 ) { + std::vector& copy = reinterpret_cast>*>(eos_vm_oc_get_bounce_buffer_list())->emplace_back(length > 0 ? length*sizeof(T) : 1); + T* copy_ptr = (T*)©[0]; + memcpy( (void*)copy.data(), (void*)base, length * sizeof(T) ); + return Then(static_cast>(copy_ptr), length, rest..., translated...); + } + return Then(static_cast>(base), length, rest..., translated...); + }; + + template + static auto translate_one(Inputs... rest, Translated... translated, I32 ptr, I32 size) -> std::enable_if_t::value, Ret> { + static_assert(!std::is_pointer::value, "Currently don't support array of pointers"); + const auto length = size_t((U32)size); + T* base = array_ptr_impl((U32)ptr, length); + if ( reinterpret_cast(base) % alignof(T) != 0 ) { + std::vector& copy = reinterpret_cast>*>(eos_vm_oc_get_bounce_buffer_list())->emplace_back(length > 0 ? length*sizeof(T) : 1); + T* copy_ptr = (T*)©[0]; + memcpy( (void*)copy.data(), (void*)base, length * sizeof(T) ); + Ret ret = Then(static_cast>(copy_ptr), length, rest..., translated...); + memcpy( (void*)base, (void*)copy.data(), length * sizeof(T) ); + return ret; + } + return Then(static_cast>(base), length, rest..., translated...); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a null_terminated_ptr type in the native method signature + * This type transcribes 1 wasm parameters: a char pointer which is validated to contain + * a null value before the end of the allocated memory. + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + * @tparam Translated - the list of transcribed wasm parameters + */ +template +struct intrinsic_invoker_impl, std::tuple> { + using next_step = intrinsic_invoker_impl, std::tuple>; + using then_type = Ret(*)(null_terminated_ptr, Inputs..., Translated...); + + template + static Ret translate_one(Inputs... rest, Translated... translated, I32 ptr) { + return Then(null_terminated_ptr_impl((U32)ptr), rest..., translated...); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a pair of array_ptr types in the native method signature that share size + * This type transcribes into 3 wasm parameters: 2 pointers and byte length and checks the validity of those memory + * ranges before dispatching to the native method + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + * @tparam Translated - the list of transcribed wasm parameters + */ +template +struct intrinsic_invoker_impl, array_ptr, uint32_t, Inputs...>, std::tuple> { + using next_step = intrinsic_invoker_impl, std::tuple>; + using then_type = Ret(*)(array_ptr, array_ptr, uint32_t, Inputs..., Translated...); + + template + static Ret translate_one(Inputs... rest, Translated... translated, I32 ptr_t, I32 ptr_u, I32 size) { + static_assert(std::is_same, char>::value && std::is_same, char>::value, "Currently only support array of (const)chars"); + const auto length = size_t((U32)size); + return Then(array_ptr_impl((U32)ptr_t, length), array_ptr_impl((U32)ptr_u, length), length, rest..., translated...); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing memset parameters + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + * @tparam Translated - the list of transcribed wasm parameters + */ +template +struct intrinsic_invoker_impl, int, uint32_t>, std::tuple<>> { + using next_step = intrinsic_invoker_impl, std::tuple>; + using then_type = Ret(*)(array_ptr, int, uint32_t); + + template + static Ret translate_one(I32 ptr, I32 value, I32 size) { + const auto length = size_t((U32)size); + return Then(array_ptr_impl((U32)ptr, length), value, length); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a pointer type in the native method signature + * This type transcribes into an int32 pointer checks the validity of that memory + * range before dispatching to the native method + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + * @tparam Translated - the list of transcribed wasm parameters + */ +template +struct intrinsic_invoker_impl, std::tuple> { + using next_step = intrinsic_invoker_impl, std::tuple>; + using then_type = Ret (*)(T *, Inputs..., Translated...); + + template + static auto translate_one(Inputs... rest, Translated... translated, I32 ptr) -> std::enable_if_t::value, Ret> { + T* base = array_ptr_impl((U32)ptr, 1); + if ( reinterpret_cast(base) % alignof(T) != 0 ) { + std::remove_const_t copy; + T* copy_ptr = © + memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); + return Then(copy_ptr, rest..., translated...); + } + return Then(base, rest..., translated...); + }; + + template + static auto translate_one(Inputs... rest, Translated... translated, I32 ptr) -> std::enable_if_t::value, Ret> { + T* base = array_ptr_impl((U32)ptr, 1); + if ( reinterpret_cast(base) % alignof(T) != 0 ) { + std::remove_const_t copy; + T* copy_ptr = © + memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); + Ret ret = Then(copy_ptr, rest..., translated...); + memcpy( (void*)base, (void*)copy_ptr, sizeof(T) ); + return ret; + } + return Then(base, rest..., translated...); + }; + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a reference to a name which can be passed as a native value + * This type transcribes into a native type which is loaded by value into a + * variable on the stack and then passed by reference to the intrinsic. + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + * @tparam Translated - the list of transcribed wasm parameters + */ +template +struct intrinsic_invoker_impl, std::tuple> { + using next_step = intrinsic_invoker_impl, std::tuple >>; + using then_type = Ret (*)(const name&, Inputs..., Translated...); + + template + static Ret translate_one(Inputs... rest, Translated... translated, native_to_wasm_t wasm_value) { + auto value = name(wasm_value); + return Then(value, rest..., translated...); + } + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * Specialization for transcribing a reference type in the native method signature + * This type transcribes into an int32 pointer checks the validity of that memory + * range before dispatching to the native method + * + * @tparam Ret - the return type of the native method + * @tparam Inputs - the remaining native parameters to transcribe + * @tparam Translated - the list of transcribed wasm parameters + */ +template +struct intrinsic_invoker_impl, std::tuple> { + using next_step = intrinsic_invoker_impl, std::tuple>; + using then_type = Ret (*)(T &, Inputs..., Translated...); + + template + static auto translate_one(Inputs... rest, Translated... translated, I32 ptr) -> std::enable_if_t::value, Ret> { + EOS_ASSERT((U32)ptr != 0, wasm_exception, "references cannot be created for null pointers"); + T &base = *array_ptr_impl((uint32_t)ptr, 1); + + if ( reinterpret_cast(&base) % alignof(T) != 0 ) { + std::remove_const_t copy; + T* copy_ptr = © + memcpy( (void*)copy_ptr, (void*)&base, sizeof(T) ); + return Then(*copy_ptr, rest..., translated...); + } + return Then(base, rest..., translated...); + } + + template + static auto translate_one(Inputs... rest, Translated... translated, I32 ptr) -> std::enable_if_t::value, Ret> { + EOS_ASSERT((U32)ptr != 0, wasm_exception, "reference cannot be created for null pointers"); + T &base = *array_ptr_impl((uint32_t)ptr, 1); + + if ( reinterpret_cast(&base) % alignof(T) != 0 ) { + std::remove_const_t copy; + T* copy_ptr = © + memcpy( (void*)copy_ptr, (void*)&base, sizeof(T) ); + Ret ret = Then(*copy_ptr, rest..., translated...); + memcpy( (void*)&base, (void*)copy_ptr, sizeof(T) ); + return ret; + } + return Then(base, rest..., translated...); + } + + template + static const auto fn() { + return next_step::template fn>(); + } +}; + +/** + * forward declaration of a wrapper class to call methods of the class + */ +template +struct intrinsic_function_invoker { + using impl = intrinsic_invoker_impl, std::tuple<>>; + + template + static Ret wrapper(Params... params) { + constexpr int cb_ctx_ptr_offset = OFFSET_OF_CONTROL_BLOCK_MEMBER(ctx); + apply_context* ctx; + asm("mov %%gs:%c[applyContextOffset], %[cPtr]\n" + : [cPtr] "=r" (ctx) + : [applyContextOffset] "i" (cb_ctx_ptr_offset) + ); + return (class_from_wasm::value(*ctx).*Method)(params...); + } + + template + static const WasmSig *fn() { + auto fn = impl::template fn>(); + static_assert(std::is_same::value, + "Intrinsic function signature does not match the ABI"); + return fn; + } +}; + +template +struct intrinsic_function_invoker { + using impl = intrinsic_invoker_impl, std::tuple<>>; + + template + static void_type wrapper(Params... params) { + constexpr int cb_ctx_ptr_offset = OFFSET_OF_CONTROL_BLOCK_MEMBER(ctx); + apply_context* ctx; + asm("mov %%gs:%c[applyContextOffset], %[cPtr]\n" + : [cPtr] "=r" (ctx) + : [applyContextOffset] "i" (cb_ctx_ptr_offset) + ); + (class_from_wasm::value(*ctx).*Method)(params...); + return void_type(); + } + + template + static const WasmSig *fn() { + auto fn = impl::template fn>(); + static_assert(std::is_same::value, + "Intrinsic function signature does not match the ABI"); + return fn; + } +}; + +template +struct intrinsic_function_invoker_wrapper; + +template +struct intrinsic_function_invoker_wrapper { + static_assert( !(std::is_pointer_v && alignof(std::remove_pointer_t>) != 1) && + !(std::is_lvalue_reference_v && alignof(std::remove_reference_t>) != 1), + "intrinsics should only return a reference or pointer with single byte alignment"); + using type = intrinsic_function_invoker; +}; + +template +struct intrinsic_function_invoker_wrapper { + static_assert( !(std::is_pointer_v && alignof(std::remove_pointer_t>) != 1) && + !(std::is_lvalue_reference_v && alignof(std::remove_reference_t>) != 1), + "intrinsics should only return a reference or pointer with single byte alignment"); + using type = intrinsic_function_invoker; +}; + +template +struct intrinsic_function_invoker_wrapper { + static_assert( !(std::is_pointer_v && alignof(std::remove_pointer_t>) != 1) && + !(std::is_lvalue_reference_v && alignof(std::remove_reference_t>) != 1), + "intrinsics should only return a reference or pointer with single byte alignment"); + using type = intrinsic_function_invoker; +}; + +template +struct intrinsic_function_invoker_wrapper { + static_assert( !(std::is_pointer_v && alignof(std::remove_pointer_t>) != 1) && + !(std::is_lvalue_reference_v && alignof(std::remove_reference_t>) != 1), + "intrinsics should only return a reference or pointer with single byte alignment"); + using type = intrinsic_function_invoker; +}; + +#define _ADD_PAREN_1(...) ((__VA_ARGS__)) _ADD_PAREN_2 +#define _ADD_PAREN_2(...) ((__VA_ARGS__)) _ADD_PAREN_1 +#define _ADD_PAREN_1_END +#define _ADD_PAREN_2_END +#define _WRAPPED_SEQ(SEQ) BOOST_PP_CAT(_ADD_PAREN_1 SEQ, _END) + +#define __INTRINSIC_NAME(LABEL, SUFFIX) LABEL##SUFFIX +#define _INTRINSIC_NAME(LABEL, SUFFIX) __INTRINSIC_NAME(LABEL,SUFFIX) + +#define _REGISTER_EOSVMOC_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ + static eosio::chain::eosvmoc::intrinsic _INTRINSIC_NAME(__intrinsic_fn, __COUNTER__) EOSVMOC_INTRINSIC_INIT_PRIORITY (\ + MOD "." NAME,\ + eosio::chain::webassembly::eosvmoc::wasm_function_type_provider::type(),\ + (void *)eosio::chain::webassembly::eosvmoc::intrinsic_function_invoker_wrapper::type::fn<&CLS::METHOD>(),\ + ::boost::hana::index_if(eosio::chain::eosvmoc::intrinsic_table, ::boost::hana::equal.to(BOOST_HANA_STRING(MOD "." NAME))).value()\ + );\ + + +} } } }// eosio::chain::webassembly::eosvmoc diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp new file mode 100644 index 00000000000..ce94cc5db90 --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp @@ -0,0 +1,117 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + +#include + +namespace std { + template<> struct hash { + size_t operator()(const eosio::chain::eosvmoc::code_tuple& ct) const noexcept { + return ct.code_id._hash[0]; + } + }; +} + +namespace eosio { namespace chain { namespace eosvmoc { + +using namespace boost::multi_index; +using namespace boost::asio; + +namespace bip = boost::interprocess; +namespace bfs = boost::filesystem; + +using allocator_t = bip::rbtree_best_fit, alignof(std::max_align_t)>; + +struct config; + +class code_cache_base { + public: + code_cache_base(const bfs::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db); + ~code_cache_base(); + + const int& fd() const { return _cache_fd; } + + void free_code(const digest_type& code_id, const uint8_t& vm_version); + + protected: + struct by_hash; + + typedef boost::multi_index_container< + code_descriptor, + indexed_by< + sequenced<>, + ordered_unique, + composite_key< code_descriptor, + member, + member + > + > + > + > code_cache_index; + code_cache_index _cache_index; + + const chainbase::database& _db; + + bfs::path _cache_file_path; + int _cache_fd; + + io_context _ctx; + local::datagram_protocol::socket _compile_monitor_write_socket{_ctx}; + local::datagram_protocol::socket _compile_monitor_read_socket{_ctx}; + + //these are really only useful to the async code cache, but keep them here so + //free_code can be shared + std::unordered_set _queued_compiles; + std::unordered_map _outstanding_compiles_and_poison; + + size_t _free_bytes_eviction_threshold; + void check_eviction_threshold(size_t free_bytes); + void run_eviction_round(); + + void set_on_disk_region_dirty(bool); + + template + void serialize_cache_index(fc::datastream& ds); +}; + +class code_cache_async : public code_cache_base { + public: + code_cache_async(const bfs::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db); + ~code_cache_async(); + + //If code is in cache: returns pointer & bumps to front of MRU list + //If code is not in cache, and not blacklisted, and not currently compiling: return nullptr and kick off compile + //otherwise: return nullptr + const code_descriptor* const get_descriptor_for_code(const digest_type& code_id, const uint8_t& vm_version); + + private: + std::thread _monitor_reply_thread; + boost::lockfree::spsc_queue _result_queue; + void wait_on_compile_monitor_message(); + std::tuple consume_compile_thread_queue(); + std::unordered_set _blacklist; + size_t _threads; +}; + +class code_cache_sync : public code_cache_base { + public: + using code_cache_base::code_cache_base; + ~code_cache_sync(); + + //Can still fail and return nullptr if, for example, there is an expected instantiation failure + const code_descriptor* const get_descriptor_for_code_sync(const digest_type& code_id, const uint8_t& vm_version); +}; + +}}} \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/compile_monitor.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/compile_monitor.hpp new file mode 100644 index 00000000000..7d4229e4616 --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/compile_monitor.hpp @@ -0,0 +1,12 @@ +#pragma once + +#include + +#include +#include + +namespace eosio { namespace chain { namespace eosvmoc { + +wrapped_fd get_connection_to_compile_monitor(int cache_fd); + +}}} \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/compile_trampoline.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/compile_trampoline.hpp new file mode 100644 index 00000000000..c6be55ad3ea --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/compile_trampoline.hpp @@ -0,0 +1,7 @@ +#pragma once + +namespace eosio { namespace chain { namespace eosvmoc { + +void run_compile_trampoline(int fd); + +}}} \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/config.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/config.hpp new file mode 100644 index 00000000000..dbab7b0342b --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/config.hpp @@ -0,0 +1,18 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace eosio { namespace chain { namespace eosvmoc { + +struct config { + uint64_t cache_size = 1024u*1024u*1024u; + uint64_t threads = 1u; +}; + +}}} diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/eos-vm-oc.h b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/eos-vm-oc.h new file mode 100644 index 00000000000..6c5d4e42676 --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/eos-vm-oc.h @@ -0,0 +1,38 @@ +#pragma once + +#include +#include +#include + +#ifdef __cplusplus +#include +#include +namespace eosio { namespace chain {class apply_context;}} +#endif + +struct eos_vm_oc_control_block { + uint64_t magic; + uintptr_t execution_thread_code_start; + size_t execution_thread_code_length; + uintptr_t execution_thread_memory_start; + size_t execution_thread_memory_length; +#ifdef __cplusplus + eosio::chain::apply_context* ctx; + std::exception_ptr* eptr; +#else + void* ctx; + void* eptr; +#endif + unsigned current_call_depth_remaining; + int64_t current_linear_memory_pages; //-1 if no memory + char* full_linear_memory_start; + sigjmp_buf* jmp; +#ifdef __cplusplus + std::list>* bounce_buffers; +#else + void* bounce_buffers; +#endif + uintptr_t running_code_base; + int64_t first_invalid_memory_address; + unsigned is_running; +}; \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/eos-vm-oc.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/eos-vm-oc.hpp new file mode 100644 index 00000000000..6dfffa12804 --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/eos-vm-oc.hpp @@ -0,0 +1,56 @@ +#pragma once + +#include +#include + +#include + +#include +#include +#include + +#include +#include + +namespace eosio { namespace chain { + +class apply_context; + +namespace eosvmoc { + +using control_block = eos_vm_oc_control_block; + +struct no_offset{}; +struct code_offset{ size_t offset; }; //relative to code_begin +struct intrinsic_ordinal{ size_t ordinal; }; + +using eosvmoc_optional_offset_or_import_t = fc::static_variant; + +struct code_descriptor { + digest_type code_hash; + uint8_t vm_version; + uint8_t codegen_version; + size_t code_begin; + eosvmoc_optional_offset_or_import_t start; + unsigned apply_offset; + int starting_memory_pages; + size_t initdata_begin; + unsigned initdata_size; + unsigned initdata_prologue_size; +}; + +enum eosvmoc_exitcode : int { + EOSVMOC_EXIT_CLEAN_EXIT = 1, + EOSVMOC_EXIT_CHECKTIME_FAIL, + EOSVMOC_EXIT_SEGV, + EOSVMOC_EXIT_EXCEPTION +}; + +}}} + +FC_REFLECT(eosio::chain::eosvmoc::no_offset, ); +FC_REFLECT(eosio::chain::eosvmoc::code_offset, (offset)); +FC_REFLECT(eosio::chain::eosvmoc::intrinsic_ordinal, (ordinal)); +FC_REFLECT(eosio::chain::eosvmoc::code_descriptor, (code_hash)(vm_version)(codegen_version)(code_begin)(start)(apply_offset)(starting_memory_pages)(initdata_begin)(initdata_size)(initdata_prologue_size)); + +#define EOSVMOC_INTRINSIC_INIT_PRIORITY __attribute__((init_priority(198))) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/executor.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/executor.hpp new file mode 100644 index 00000000000..e5a092cd57e --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/executor.hpp @@ -0,0 +1,39 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +namespace eosio { namespace chain { + +class apply_context; + +namespace eosvmoc { + +class code_cache_base; +class memory; +struct code_descriptor; + +class executor { + public: + executor(const code_cache_base& cc); + ~executor(); + + void execute(const code_descriptor& code, const memory& mem, apply_context& context); + + private: + uint8_t* code_mapping; + size_t code_mapping_size; + bool mapping_is_executable; + + std::exception_ptr executors_exception_ptr; + sigjmp_buf executors_sigjmp_buf; + std::list> executors_bounce_buffers; +}; + +}}} diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/gs_seg_helpers.h b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/gs_seg_helpers.h new file mode 100644 index 00000000000..d2baf938017 --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/gs_seg_helpers.h @@ -0,0 +1,32 @@ +#pragma once + +#include +#include +#include + +#include + +#ifdef __clang__ + #define GS_PTR __attribute__((address_space(256))) +#else + #define GS_PTR __seg_gs +#endif + +//This is really rather unfortunate, but on the upside it does allow a static assert to know if +//the values ever slide which would be a PIC breaking event we'd want to know about at compile +//time. +#define EOS_VM_OC_CONTROL_BLOCK_OFFSET (-18944) +#define EOS_VM_OC_MEMORY_STRIDE (UINT64_C(4329598976)) + +#ifdef __cplusplus +extern "C" { +#endif + +int32_t eos_vm_oc_grow_memory(int32_t grow, int32_t max); +sigjmp_buf* eos_vm_oc_get_jmp_buf(); +void* eos_vm_oc_get_exception_ptr(); +void* eos_vm_oc_get_bounce_buffer_list(); + +#ifdef __cplusplus +} +#endif \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/intrinsic.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/intrinsic.hpp new file mode 100644 index 00000000000..d61af22c919 --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/intrinsic.hpp @@ -0,0 +1,28 @@ +#pragma once + +#include + +#include +#include + +namespace IR { + struct FunctionType; +} + +namespace eosio { namespace chain { namespace eosvmoc { + +struct intrinsic { + intrinsic(const char* name, const IR::FunctionType* type, void* function_ptr, size_t ordinal); +}; + +struct intrinsic_entry { + const IR::FunctionType* const type; + const void* const function_ptr; + const size_t ordinal; +}; + +using intrinsic_map_t = std::map; + +const intrinsic_map_t& get_intrinsic_map(); + +}}} \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/intrinsic_mapping.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/intrinsic_mapping.hpp new file mode 100644 index 00000000000..022df920918 --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/intrinsic_mapping.hpp @@ -0,0 +1,254 @@ +#pragma once + +#define BOOST_HANA_CONFIG_ENABLE_STRING_UDL +#include +#include + +namespace eosio { namespace chain { namespace eosvmoc { + +using namespace boost::hana::literals; + +//NEVER reorder or remove indexes; the PIC uses the indexes in this table as an offset in to a jump +// table. Adding on the bottom is fine and requires no other updates elsewhere +constexpr auto intrinsic_table = boost::hana::make_tuple( + "eosvmoc_internal.unreachable"_s, + "eosvmoc_internal.grow_memory"_s, + "eosvmoc_internal.div0_or_overflow"_s, + "eosvmoc_internal.indirect_call_mismatch"_s, + "eosvmoc_internal.indirect_call_oob"_s, + "eosvmoc_internal.depth_assert"_s, + "eosio_injection.call_depth_assert"_s, //these two are never used by EOS VM OC but all intrinsics + "eosio_injection.checktime"_s, //must be mapped + "env.__ashlti3"_s, + "env.__ashrti3"_s, + "env.__lshlti3"_s, + "env.__lshrti3"_s, + "env.__divti3"_s, + "env.__udivti3"_s, + "env.__modti3"_s, + "env.__umodti3"_s, + "env.__multi3"_s, + "env.__addtf3"_s, + "env.__subtf3"_s, + "env.__multf3"_s, + "env.__divtf3"_s, + "env.__eqtf2"_s, + "env.__netf2"_s, + "env.__getf2"_s, + "env.__gttf2"_s, + "env.__lttf2"_s, + "env.__letf2"_s, + "env.__cmptf2"_s, + "env.__unordtf2"_s, + "env.__negtf2"_s, + "env.__floatsitf"_s, + "env.__floatunsitf"_s, + "env.__floatditf"_s, + "env.__floatunditf"_s, + "env.__floattidf"_s, + "env.__floatuntidf"_s, + "env.__floatsidf"_s, + "env.__extendsftf2"_s, + "env.__extenddftf2"_s, + "env.__fixtfti"_s, + "env.__fixtfdi"_s, + "env.__fixtfsi"_s, + "env.__fixunstfti"_s, + "env.__fixunstfdi"_s, + "env.__fixunstfsi"_s, + "env.__fixsfti"_s, + "env.__fixdfti"_s, + "env.__fixunssfti"_s, + "env.__fixunsdfti"_s, + "env.__trunctfdf2"_s, + "env.__trunctfsf2"_s, + "env.is_feature_active"_s, + "env.activate_feature"_s, + "env.get_resource_limits"_s, + "env.set_resource_limits"_s, + "env.set_proposed_producers"_s, + "env.set_proposed_producers_ex"_s, + "env.get_blockchain_parameters_packed"_s, + "env.set_blockchain_parameters_packed"_s, + "env.is_privileged"_s, + "env.set_privileged"_s, + "env.preactivate_feature"_s, + "env.get_active_producers"_s, + "env.db_store_i64"_s, + "env.db_update_i64"_s, + "env.db_remove_i64"_s, + "env.db_get_i64"_s, + "env.db_next_i64"_s, + "env.db_previous_i64"_s, + "env.db_find_i64"_s, + "env.db_lowerbound_i64"_s, + "env.db_upperbound_i64"_s, + "env.db_end_i64"_s, + "env.db_idx64_store"_s, + "env.db_idx64_remove"_s, + "env.db_idx64_update"_s, + "env.db_idx64_find_primary"_s, + "env.db_idx64_find_secondary"_s, + "env.db_idx64_lowerbound"_s, + "env.db_idx64_upperbound"_s, + "env.db_idx64_end"_s, + "env.db_idx64_next"_s, + "env.db_idx64_previous"_s, + "env.db_idx128_store"_s, + "env.db_idx128_remove"_s, + "env.db_idx128_update"_s, + "env.db_idx128_find_primary"_s, + "env.db_idx128_find_secondary"_s, + "env.db_idx128_lowerbound"_s, + "env.db_idx128_upperbound"_s, + "env.db_idx128_end"_s, + "env.db_idx128_next"_s, + "env.db_idx128_previous"_s, + "env.db_idx_double_store"_s, + "env.db_idx_double_remove"_s, + "env.db_idx_double_update"_s, + "env.db_idx_double_find_primary"_s, + "env.db_idx_double_find_secondary"_s, + "env.db_idx_double_lowerbound"_s, + "env.db_idx_double_upperbound"_s, + "env.db_idx_double_end"_s, + "env.db_idx_double_next"_s, + "env.db_idx_double_previous"_s, + "env.db_idx_long_double_store"_s, + "env.db_idx_long_double_remove"_s, + "env.db_idx_long_double_update"_s, + "env.db_idx_long_double_find_primary"_s, + "env.db_idx_long_double_find_secondary"_s, + "env.db_idx_long_double_lowerbound"_s, + "env.db_idx_long_double_upperbound"_s, + "env.db_idx_long_double_end"_s, + "env.db_idx_long_double_next"_s, + "env.db_idx_long_double_previous"_s, + "env.db_idx256_store"_s, + "env.db_idx256_remove"_s, + "env.db_idx256_update"_s, + "env.db_idx256_find_primary"_s, + "env.db_idx256_find_secondary"_s, + "env.db_idx256_lowerbound"_s, + "env.db_idx256_upperbound"_s, + "env.db_idx256_end"_s, + "env.db_idx256_next"_s, + "env.db_idx256_previous"_s, + "env.assert_recover_key"_s, + "env.recover_key"_s, + "env.assert_sha256"_s, + "env.assert_sha1"_s, + "env.assert_sha512"_s, + "env.assert_ripemd160"_s, + "env.sha1"_s, + "env.sha256"_s, + "env.sha512"_s, + "env.ripemd160"_s, + "env.check_transaction_authorization"_s, + "env.check_permission_authorization"_s, + "env.get_permission_last_used"_s, + "env.get_account_creation_time"_s, + "env.current_time"_s, + "env.publication_time"_s, + "env.is_feature_activated"_s, + "env.get_sender"_s, + "env.abort"_s, + "env.eosio_assert"_s, + "env.eosio_assert_message"_s, + "env.eosio_assert_code"_s, + "env.eosio_exit"_s, + "env.read_action_data"_s, + "env.action_data_size"_s, + "env.current_receiver"_s, + "env.require_recipient"_s, + "env.require_auth"_s, + "env.require_auth2"_s, + "env.has_auth"_s, + "env.is_account"_s, + "env.prints"_s, + "env.prints_l"_s, + "env.printi"_s, + "env.printui"_s, + "env.printi128"_s, + "env.printui128"_s, + "env.printsf"_s, + "env.printdf"_s, + "env.printqf"_s, + "env.printn"_s, + "env.printhex"_s, + "env.read_transaction"_s, + "env.transaction_size"_s, + "env.expiration"_s, + "env.tapos_block_prefix"_s, + "env.tapos_block_num"_s, + "env.get_action"_s, + "env.send_inline"_s, + "env.send_context_free_inline"_s, + "env.send_deferred"_s, + "env.cancel_deferred"_s, + "env.get_context_free_data"_s, + "env.memcpy"_s, + "env.memmove"_s, + "env.memcmp"_s, + "env.memset"_s, + "eosio_injection._eosio_f32_add"_s, + "eosio_injection._eosio_f32_sub"_s, + "eosio_injection._eosio_f32_mul"_s, + "eosio_injection._eosio_f32_div"_s, + "eosio_injection._eosio_f32_min"_s, + "eosio_injection._eosio_f32_max"_s, + "eosio_injection._eosio_f32_copysign"_s, + "eosio_injection._eosio_f32_abs"_s, + "eosio_injection._eosio_f32_neg"_s, + "eosio_injection._eosio_f32_sqrt"_s, + "eosio_injection._eosio_f32_ceil"_s, + "eosio_injection._eosio_f32_floor"_s, + "eosio_injection._eosio_f32_trunc"_s, + "eosio_injection._eosio_f32_nearest"_s, + "eosio_injection._eosio_f32_eq"_s, + "eosio_injection._eosio_f32_ne"_s, + "eosio_injection._eosio_f32_lt"_s, + "eosio_injection._eosio_f32_le"_s, + "eosio_injection._eosio_f32_gt"_s, + "eosio_injection._eosio_f32_ge"_s, + "eosio_injection._eosio_f64_add"_s, + "eosio_injection._eosio_f64_sub"_s, + "eosio_injection._eosio_f64_mul"_s, + "eosio_injection._eosio_f64_div"_s, + "eosio_injection._eosio_f64_min"_s, + "eosio_injection._eosio_f64_max"_s, + "eosio_injection._eosio_f64_copysign"_s, + "eosio_injection._eosio_f64_abs"_s, + "eosio_injection._eosio_f64_neg"_s, + "eosio_injection._eosio_f64_sqrt"_s, + "eosio_injection._eosio_f64_ceil"_s, + "eosio_injection._eosio_f64_floor"_s, + "eosio_injection._eosio_f64_trunc"_s, + "eosio_injection._eosio_f64_nearest"_s, + "eosio_injection._eosio_f64_eq"_s, + "eosio_injection._eosio_f64_ne"_s, + "eosio_injection._eosio_f64_lt"_s, + "eosio_injection._eosio_f64_le"_s, + "eosio_injection._eosio_f64_gt"_s, + "eosio_injection._eosio_f64_ge"_s, + "eosio_injection._eosio_f32_promote"_s, + "eosio_injection._eosio_f64_demote"_s, + "eosio_injection._eosio_f32_trunc_i32s"_s, + "eosio_injection._eosio_f64_trunc_i32s"_s, + "eosio_injection._eosio_f32_trunc_i32u"_s, + "eosio_injection._eosio_f64_trunc_i32u"_s, + "eosio_injection._eosio_f32_trunc_i64s"_s, + "eosio_injection._eosio_f64_trunc_i64s"_s, + "eosio_injection._eosio_f32_trunc_i64u"_s, + "eosio_injection._eosio_f64_trunc_i64u"_s, + "eosio_injection._eosio_i32_to_f32"_s, + "eosio_injection._eosio_i64_to_f32"_s, + "eosio_injection._eosio_ui32_to_f32"_s, + "eosio_injection._eosio_ui64_to_f32"_s, + "eosio_injection._eosio_i32_to_f64"_s, + "eosio_injection._eosio_i64_to_f64"_s, + "eosio_injection._eosio_ui32_to_f64"_s, + "eosio_injection._eosio_ui64_to_f64"_s +); + +}}} \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/ipc_helpers.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/ipc_helpers.hpp new file mode 100644 index 00000000000..929bc19d66f --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/ipc_helpers.hpp @@ -0,0 +1,68 @@ +#pragma once + +#include + +#include + +#include + +#include +#include + +namespace eosio { namespace chain { namespace eosvmoc { + +class wrapped_fd { + public: + wrapped_fd() : _inuse(false) {} + wrapped_fd(int fd) : _inuse(true), _fd(fd) {} + wrapped_fd(const wrapped_fd&) = delete; + wrapped_fd& operator=(const wrapped_fd&) = delete; + wrapped_fd(wrapped_fd&& other) : _inuse(other._inuse), _fd(other._fd) {other._inuse = false;} + wrapped_fd& operator=(wrapped_fd&& other) { + _inuse = other._inuse; + _fd = other._fd; + other._inuse = false; + return *this; + } + + operator int() const { + FC_ASSERT(_inuse, "trying to get the value of a not-in-use wrappedfd"); + return _fd; + } + + int release() { + _inuse = false; + return _fd; + } + + ~wrapped_fd() { + if(_inuse) + close(_fd); + } + + private: + bool _inuse = false;; + int _fd; +}; + +std::tuple> read_message_with_fds(boost::asio::local::datagram_protocol::socket& s); +std::tuple> read_message_with_fds(int fd); +bool write_message_with_fds(boost::asio::local::datagram_protocol::socket& s, const eosvmoc_message& message, const std::vector& fds = std::vector()); +bool write_message_with_fds(int fd_to_send_to, const eosvmoc_message& message, const std::vector& fds = std::vector()); + +template +wrapped_fd memfd_for_bytearray(const T& bytes) { + int fd = syscall(SYS_memfd_create, "eosvmoc_code", MFD_CLOEXEC); + FC_ASSERT(fd >= 0, "Failed to create memfd"); + FC_ASSERT(ftruncate(fd, bytes.size()) == 0, "failed to grow memfd"); + if(bytes.size()) { + uint8_t* b = (uint8_t*)mmap(nullptr, bytes.size(), PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); + FC_ASSERT(b != MAP_FAILED, "failed to mmap memfd"); + memcpy(b, bytes.data(), bytes.size()); + munmap(b, bytes.size()); + } + return wrapped_fd(fd); +} + +std::vector vector_for_memfd(const wrapped_fd& memfd); +}}} \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/ipc_protocol.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/ipc_protocol.hpp new file mode 100644 index 00000000000..b38c3bce64b --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/ipc_protocol.hpp @@ -0,0 +1,71 @@ +#pragma once + +#include +#include +#include + +namespace eosio { namespace chain { namespace eosvmoc { + +struct initialize_message { + //Two sent fds: 1) communication socket for this instance 2) the cache file +}; + +struct initalize_response_message { + fc::optional error_message; //no error message? everything groovy +}; + +struct code_tuple { + eosio::chain::digest_type code_id; + uint8_t vm_version; + bool operator==(const code_tuple& o) const {return o.code_id == code_id && o.vm_version == vm_version;} +}; + +struct compile_wasm_message { + code_tuple code; + //Two sent fd: 1) communication socket for result, 2) the wasm to compile +}; + +struct evict_wasms_message { + std::vector codes; +}; + +struct code_compilation_result_message { + eosvmoc_optional_offset_or_import_t start; + unsigned apply_offset; + int starting_memory_pages; + unsigned initdata_prologue_size; + //Two sent fds: 1) wasm code, 2) initial memory snapshot +}; + + +struct compilation_result_unknownfailure {}; +struct compilation_result_toofull {}; + +using wasm_compilation_result = fc::static_variant; + +struct wasm_compilation_result_message { + code_tuple code; + wasm_compilation_result result; + size_t cache_free_bytes; +}; + +using eosvmoc_message = fc::static_variant; +}}} + +FC_REFLECT(eosio::chain::eosvmoc::initialize_message, ) +FC_REFLECT(eosio::chain::eosvmoc::initalize_response_message, (error_message)) +FC_REFLECT(eosio::chain::eosvmoc::code_tuple, (code_id)(vm_version)) +FC_REFLECT(eosio::chain::eosvmoc::compile_wasm_message, (code)) +FC_REFLECT(eosio::chain::eosvmoc::evict_wasms_message, (codes)) +FC_REFLECT(eosio::chain::eosvmoc::code_compilation_result_message, (start)(apply_offset)(starting_memory_pages)(initdata_prologue_size)) +FC_REFLECT(eosio::chain::eosvmoc::compilation_result_unknownfailure, ) +FC_REFLECT(eosio::chain::eosvmoc::compilation_result_toofull, ) +FC_REFLECT(eosio::chain::eosvmoc::wasm_compilation_result_message, (code)(result)(cache_free_bytes)) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/memory.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/memory.hpp new file mode 100644 index 00000000000..509e183bd80 --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/memory.hpp @@ -0,0 +1,65 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace eosio { namespace chain { namespace eosvmoc { + +class memory { + static constexpr uint64_t wasm_memory_size = eosio::chain::wasm_constraints::maximum_linear_memory; + static constexpr uint64_t intrinsic_count = boost::hana::length(intrinsic_table); + //warning: changing the following 3 params will invalidate existing PIC + static constexpr uint64_t mutable_global_size = 8u * eosio::chain::wasm_constraints::maximum_mutable_globals/4u; + static constexpr uint64_t table_size = 16u * eosio::chain::wasm_constraints::maximum_table_elements; + static constexpr size_t wcb_allowance = 512u; + static_assert(sizeof(control_block) <= wcb_allowance, "EOS VM OC memory doesn't set aside enough memory for control block"); + + //round up the prologue to multiple of 4K page + static constexpr uint64_t memory_prologue_size = ((memory::wcb_allowance + mutable_global_size + table_size + intrinsic_count*UINT64_C(8))+UINT64_C(4095))/UINT64_C(4096)*UINT64_C(4096); + //prologue + 33MB + 4GB fault buffer + 4096 addtional buffer for safety + static constexpr uint64_t total_memory_per_slice = memory_prologue_size + wasm_memory_size + UINT64_C(0x100000000) + UINT64_C(4096); + + static constexpr uint64_t number_slices = wasm_memory_size/(64u*1024u)+1u; + + public: + memory(); + ~memory(); + + uint8_t* const zero_page_memory_base() const { return zeropage_base; } + uint8_t* const full_page_memory_base() const { return fullpage_base; } + + control_block* const get_control_block() const { return reinterpret_cast(zeropage_base - cb_offset);} + + //these two are really only inteded for SEGV handling + uint8_t* const start_of_memory_slices() const { return mapbase; } + size_t size_of_memory_slice_mapping() const { return mapsize; } + + //to obtain memory protected for n wasm-pages, use the pointer computed from: + // zero_page_memory_base()+stride*n + static constexpr size_t stride = total_memory_per_slice; + + //offsets to various interesting things in the memory + static constexpr uintptr_t linear_memory = 0; + static constexpr uintptr_t cb_offset = wcb_allowance + mutable_global_size + table_size; + static constexpr uintptr_t first_intrinsic_offset = cb_offset + 8u; + + static_assert(-cb_offset == EOS_VM_OC_CONTROL_BLOCK_OFFSET, "EOS VM OC control block offset has slid out of place somehow"); + static_assert(stride == EOS_VM_OC_MEMORY_STRIDE, "EOS VM OC memory stride has slid out of place somehow"); + + private: + uint8_t* mapbase; + uint64_t mapsize; + + uint8_t* zeropage_base; + uint8_t* fullpage_base; +}; + +}}} + +#define OFFSET_OF_CONTROL_BLOCK_MEMBER(M) (-(int)eosio::chain::eosvmoc::memory::cb_offset + (int)offsetof(eosio::chain::eosvmoc::control_block, M)) +#define OFFSET_OF_FIRST_INTRINSIC ((int)-eosio::chain::eosvmoc::memory::first_intrinsic_offset) \ No newline at end of file diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp new file mode 100644 index 00000000000..5119a5c386f --- /dev/null +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm.hpp @@ -0,0 +1,150 @@ +#pragma once + +#if defined(EOSIO_EOS_VM_RUNTIME_ENABLED) || defined(EOSIO_EOS_VM_JIT_RUNTIME_ENABLED) + +#include +#include +#include +#include +#include + +//eos-vm includes +#include + +// eosio specific specializations +namespace eosio { namespace vm { + + template<> + struct wasm_type_converter { + static auto from_wasm(uint64_t val) { + return eosio::chain::name{val}; + } + static auto to_wasm(eosio::chain::name val) { + return val.to_uint64_t(); + } + }; + + template + struct wasm_type_converter : linear_memory_access { + auto from_wasm(void* val) { + validate_ptr(val, 1); + return eosio::vm::aligned_ptr_wrapper{val}; + } + }; + + template<> + struct wasm_type_converter : linear_memory_access { + void* to_wasm(char* val) { + validate_ptr(val, 1); + return val; + } + }; + + template + struct wasm_type_converter : linear_memory_access { + auto from_wasm(uint32_t val) { + EOS_VM_ASSERT( val != 0, wasm_memory_exception, "references cannot be created for null pointers" ); + void* ptr = get_ptr(val); + validate_ptr(ptr, 1); + return eosio::vm::aligned_ref_wrapper{ptr}; + } + }; + + template + struct wasm_type_converter> : linear_memory_access { + auto from_wasm(void* ptr, uint32_t size) { + validate_ptr(ptr, size); + return aligned_array_wrapper(ptr, size); + } + }; + + template<> + struct wasm_type_converter> : linear_memory_access { + auto from_wasm(void* ptr, uint32_t size) { + validate_ptr(ptr, size); + return eosio::chain::array_ptr((char*)ptr); + } + // memcpy/memmove + auto from_wasm(void* ptr, eosio::chain::array_ptr /*src*/, uint32_t size) { + validate_ptr(ptr, size); + return eosio::chain::array_ptr((char*)ptr); + } + // memset + auto from_wasm(void* ptr, int /*val*/, uint32_t size) { + validate_ptr(ptr, size); + return eosio::chain::array_ptr((char*)ptr); + } + }; + + template<> + struct wasm_type_converter> : linear_memory_access { + auto from_wasm(void* ptr, uint32_t size) { + validate_ptr(ptr, size); + return eosio::chain::array_ptr((char*)ptr); + } + // memcmp + auto from_wasm(void* ptr, eosio::chain::array_ptr /*src*/, uint32_t size) { + validate_ptr(ptr, size); + return eosio::chain::array_ptr((char*)ptr); + } + }; + + template + struct construct_derived { + static auto &value(Ctx& ctx) { return ctx.trx_context; } + }; + + template <> + struct construct_derived { + static auto &value(eosio::chain::apply_context& ctx) { return ctx; } + }; + + template<> + struct wasm_type_converter : linear_memory_access { + auto from_wasm(void* ptr) { + validate_c_str(ptr); + return eosio::chain::null_terminated_ptr{ static_cast(ptr) }; + } + }; + +}} // ns eosio::vm + +namespace eosio { namespace chain { namespace webassembly { namespace eos_vm_runtime { + +using namespace fc; +using namespace eosio::vm; +using namespace eosio::chain::webassembly::common; + +template +class eos_vm_runtime : public eosio::chain::wasm_runtime_interface { + public: + eos_vm_runtime(); + bool inject_module(IR::Module&) override; + std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector, + const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version) override; + + void immediately_exit_currently_running_module() override; + + private: + // todo: managing this will get more complicated with sync calls; + // immediately_exit_currently_running_module() should probably + // move from wasm_runtime_interface to wasm_instantiated_module_interface. + backend* _bkend = nullptr; // non owning pointer to allow for immediate exit + + template + friend class eos_vm_instantiated_module; +}; + +} } } }// eosio::chain::webassembly::wabt_runtime + +#define __EOS_VM_INTRINSIC_NAME(LBL, SUF) LBL##SUF +#define _EOS_VM_INTRINSIC_NAME(LBL, SUF) __INTRINSIC_NAME(LBL, SUF) + +#define _REGISTER_EOS_VM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) \ + eosio::vm::registered_function _EOS_VM_INTRINSIC_NAME(__eos_vm_intrinsic_fn, __COUNTER__)(std::string(MOD), std::string(NAME)); + +#else + +#define _REGISTER_EOS_VM_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) + +#endif diff --git a/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp b/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp index 2a9b8119b67..882ea2169da 100644 --- a/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp @@ -2,6 +2,10 @@ #include #include +namespace IR { + struct Module; +} + namespace eosio { namespace chain { class apply_context; @@ -15,7 +19,9 @@ class wasm_instantiated_module_interface { class wasm_runtime_interface { public: - virtual std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) = 0; + virtual bool inject_module(IR::Module& module) = 0; + virtual std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory, + const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version) = 0; //immediately exit the currently running wasm_instantiated_module_interface. Yep, this assumes only one can possibly run at a time. virtual void immediately_exit_currently_running_module() = 0; @@ -23,4 +29,4 @@ class wasm_runtime_interface { virtual ~wasm_runtime_interface(); }; -}} \ No newline at end of file +}} diff --git a/libraries/chain/include/eosio/chain/webassembly/wabt.hpp b/libraries/chain/include/eosio/chain/webassembly/wabt.hpp index 31456dc1dda..ac0339fdfc2 100644 --- a/libraries/chain/include/eosio/chain/webassembly/wabt.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/wabt.hpp @@ -35,7 +35,7 @@ struct intrinsic_registrator { struct intrinsic_func_info { FuncSignature sig; intrinsic_fn func; - }; + }; static auto& get_map(){ static map> _map; @@ -50,7 +50,9 @@ struct intrinsic_registrator { class wabt_runtime : public eosio::chain::wasm_runtime_interface { public: wabt_runtime(); - std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) override; + bool inject_module(IR::Module&) override; + std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory, + const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version) override; void immediately_exit_currently_running_module() override; @@ -188,7 +190,7 @@ inline auto convert_native_to_literal(const wabt_apply_instance_vars&, const dou inline auto convert_native_to_literal(const wabt_apply_instance_vars&, const name &val) { TypedValue tv(Type::I64); - tv.set_i64(val.value); + tv.set_i64(val.to_uint64_t()); return tv; } @@ -362,9 +364,9 @@ struct intrinsic_invoker_impl> { * @tparam Inputs - the remaining native parameters to transcribe */ template -struct intrinsic_invoker_impl, size_t, Inputs...>> { +struct intrinsic_invoker_impl, uint32_t, Inputs...>> { using next_step = intrinsic_invoker_impl>; - using then_type = Ret(*)(wabt_apply_instance_vars&, array_ptr, size_t, Inputs..., const TypedValues&, int); + using then_type = Ret(*)(wabt_apply_instance_vars&, array_ptr, uint32_t, Inputs..., const TypedValues&, int); template static auto translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) -> std::enable_if_t::value, Ret> { @@ -395,13 +397,13 @@ struct intrinsic_invoker_impl, size_t, Inputs...>> std::vector > copy(length > 0 ? length : 1); T* copy_ptr = ©[0]; memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); - Ret ret = Then(vars, static_cast>(copy_ptr), length, rest..., args, (uint32_t)offset - 2); + Ret ret = Then(vars, static_cast>(copy_ptr), length, rest..., args, (uint32_t)offset - 2); memcpy( (void*)base, (void*)copy_ptr, length * sizeof(T) ); return ret; } return Then(vars, static_cast>(base), length, rest..., args, (uint32_t)offset - 2); }; - + template static const auto fn() { return next_step::template fn>(); @@ -442,9 +444,9 @@ struct intrinsic_invoker_impl> { * @tparam Inputs - the remaining native parameters to transcribe */ template -struct intrinsic_invoker_impl, array_ptr, size_t, Inputs...>> { +struct intrinsic_invoker_impl, array_ptr, uint32_t, Inputs...>> { using next_step = intrinsic_invoker_impl>; - using then_type = Ret(*)(wabt_apply_instance_vars&, array_ptr, array_ptr, size_t, Inputs..., const TypedValues&, int); + using then_type = Ret(*)(wabt_apply_instance_vars&, array_ptr, array_ptr, uint32_t, Inputs..., const TypedValues&, int); template static Ret translate_one(wabt_apply_instance_vars& vars, Inputs... rest, const TypedValues& args, int offset) { @@ -468,9 +470,9 @@ struct intrinsic_invoker_impl, array_ptr, size_t * @tparam Inputs - the remaining native parameters to transcribe */ template -struct intrinsic_invoker_impl, int, size_t>> { +struct intrinsic_invoker_impl, int, uint32_t>> { using next_step = intrinsic_invoker_impl>; - using then_type = Ret(*)(wabt_apply_instance_vars&, array_ptr, int, size_t, const TypedValues&, int); + using then_type = Ret(*)(wabt_apply_instance_vars&, array_ptr, int, uint32_t, const TypedValues&, int); template static Ret translate_one(wabt_apply_instance_vars& vars, const TypedValues& args, int offset) { @@ -525,7 +527,7 @@ struct intrinsic_invoker_impl> { memcpy( (void*)©, (void*)base, sizeof(T) ); Ret ret = Then(vars, ©, rest..., args, (uint32_t)offset - 1); memcpy( (void*)base, (void*)©, sizeof(T) ); - return ret; + return ret; } return Then(vars, base, rest..., args, (uint32_t)offset - 1); }; @@ -632,7 +634,7 @@ struct intrinsic_invoker_impl> { memcpy( (void*)©, (void*)base, sizeof(T) ); Ret ret = Then(vars, copy, rest..., args, (uint32_t)offset - 1); memcpy( (void*)base, (void*)©, sizeof(T) ); - return ret; + return ret; } return Then(vars, *base, rest..., args, (uint32_t)offset - 1); } @@ -686,23 +688,48 @@ struct intrinsic_function_invoker { template struct intrinsic_function_invoker_wrapper; +template +struct void_ret_wrapper { + using type = T; +}; + +template<> +struct void_ret_wrapper { + using type = char; +}; + +template +using void_ret_wrapper_t = typename void_ret_wrapper::type; + template struct intrinsic_function_invoker_wrapper { + static_assert( !(std::is_pointer_v && alignof(std::remove_pointer_t>) != 1) && + !(std::is_lvalue_reference_v && alignof(std::remove_reference_t>) != 1), + "intrinsics should only return a reference or pointer with single byte alignment"); using type = intrinsic_function_invoker; }; template struct intrinsic_function_invoker_wrapper { + static_assert( !(std::is_pointer_v && alignof(std::remove_pointer_t>) != 1) && + !(std::is_lvalue_reference_v && alignof(std::remove_reference_t>) != 1), + "intrinsics should only return a reference or pointer with single byte alignment"); using type = intrinsic_function_invoker; }; template struct intrinsic_function_invoker_wrapper { + static_assert( !(std::is_pointer_v && alignof(std::remove_pointer_t>) != 1) && + !(std::is_lvalue_reference_v && alignof(std::remove_reference_t>) != 1), + "intrinsics should only return a reference or pointer with single byte alignment"); using type = intrinsic_function_invoker; }; template struct intrinsic_function_invoker_wrapper { + static_assert( !(std::is_pointer_v && alignof(std::remove_pointer_t>) != 1) && + !(std::is_lvalue_reference_v && alignof(std::remove_reference_t>) != 1), + "intrinsics should only return a reference or pointer with single byte alignment"); using type = intrinsic_function_invoker; }; diff --git a/libraries/chain/include/eosio/chain/webassembly/wavm.hpp b/libraries/chain/include/eosio/chain/webassembly/wavm.hpp index 5bce9db8b40..7dde4a443bd 100644 --- a/libraries/chain/include/eosio/chain/webassembly/wavm.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/wavm.hpp @@ -20,7 +20,9 @@ class wavm_runtime : public eosio::chain::wasm_runtime_interface { public: wavm_runtime(); ~wavm_runtime(); - std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) override; + bool inject_module(IR::Module&) override; + std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory, + const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version) override; void immediately_exit_currently_running_module() override; }; @@ -32,148 +34,6 @@ struct running_instance_context { }; extern running_instance_context the_running_instance_context; -/** - * class to represent an in-wasm-memory array - * it is a hint to the transcriber that the next parameter will - * be a size (in Ts) and that the pair are validated together - * This triggers the template specialization of intrinsic_invoker_impl - * @tparam T - */ -template -inline array_ptr array_ptr_impl (running_instance_context& ctx, U32 ptr, size_t length) -{ - MemoryInstance* mem = ctx.memory; - if (!mem) - Runtime::causeException(Exception::Cause::accessViolation); - - size_t mem_total = IR::numBytesPerPage * Runtime::getMemoryNumPages(mem); - if (ptr >= mem_total || length > (mem_total - ptr) / sizeof(T)) - Runtime::causeException(Exception::Cause::accessViolation); - - T* ret_ptr = (T*)(getMemoryBaseAddress(mem) + ptr); - - return array_ptr((T*)(getMemoryBaseAddress(mem) + ptr)); -} - -/** - * class to represent an in-wasm-memory char array that must be null terminated - */ -inline null_terminated_ptr null_terminated_ptr_impl(running_instance_context& ctx, U32 ptr) -{ - MemoryInstance* mem = ctx.memory; - if(!mem) - Runtime::causeException(Exception::Cause::accessViolation); - - char *value = (char*)(getMemoryBaseAddress(mem) + ptr); - const char* p = value; - const char* const top_of_memory = (char*)(getMemoryBaseAddress(mem) + IR::numBytesPerPage*Runtime::getMemoryNumPages(mem)); - while(p < top_of_memory) - if(*p++ == '\0') - return null_terminated_ptr(value); - - Runtime::causeException(Exception::Cause::accessViolation); -} - - -/** - * template that maps native types to WASM VM types - * @tparam T the native type - */ -template -struct native_to_wasm { - using type = void; -}; - -/** - * specialization for mapping pointers to int32's - */ -template -struct native_to_wasm { - using type = I32; -}; - -/** - * Mappings for native types - */ -template<> -struct native_to_wasm { - using type = F32; -}; -template<> -struct native_to_wasm { - using type = F64; -}; -template<> -struct native_to_wasm { - using type = I32; -}; -template<> -struct native_to_wasm { - using type = I32; -}; -template<> -struct native_to_wasm { - using type = I64; -}; -template<> -struct native_to_wasm { - using type = I64; -}; -template<> -struct native_to_wasm { - using type = I32; -}; -template<> -struct native_to_wasm { - using type = I64; -}; -template<> -struct native_to_wasm { - using type = I64; -}; -template<> -struct native_to_wasm { - using type = I32; -}; - -template<> -struct native_to_wasm { - using type = I32; -}; - -// convenience alias -template -using native_to_wasm_t = typename native_to_wasm::type; - -template -inline auto convert_native_to_wasm(const running_instance_context& ctx, T val) { - return native_to_wasm_t(val); -} - -inline auto convert_native_to_wasm(const running_instance_context& ctx, const name &val) { - return native_to_wasm_t(val.value); -} - -inline auto convert_native_to_wasm(const running_instance_context& ctx, const fc::time_point_sec& val) { - return native_to_wasm_t(val.sec_since_epoch()); -} - -inline auto convert_native_to_wasm(running_instance_context& ctx, char* ptr) { - MemoryInstance* mem = ctx.memory; - if(!mem) - Runtime::causeException(Exception::Cause::accessViolation); - char* base = (char*)getMemoryBaseAddress(mem); - char* top_of_memory = base + IR::numBytesPerPage*Runtime::getMemoryNumPages(mem); - if(ptr < base || ptr >= top_of_memory) - Runtime::causeException(Exception::Cause::accessViolation); - return (U32)(ptr - base); -} - -template -inline auto convert_wasm_to_native(native_to_wasm_t val) { - return T(val); -} - template struct wasm_to_value_type; @@ -282,428 +142,6 @@ struct wasm_function_type_provider { } }; -/** - * Forward declaration of the invoker type which transcribes arguments to/from a native method - * and injects the appropriate checks - * - * @tparam Ret - the return type of the native function - * @tparam NativeParameters - a std::tuple of the remaining native parameters to transcribe - * @tparam WasmParameters - a std::tuple of the transribed parameters - */ -template -struct intrinsic_invoker_impl; - -/** - * Specialization for the fully transcribed signature - * @tparam Ret - the return type of the native function - * @tparam Translated - the arguments to the wasm function - */ -template -struct intrinsic_invoker_impl, std::tuple> { - using next_method_type = Ret (*)(running_instance_context &, Translated...); - - template - static native_to_wasm_t invoke(Translated... translated) { - try { - return convert_native_to_wasm(the_running_instance_context, Method(the_running_instance_context, translated...)); - } - catch(...) { - Platform::immediately_exit(std::current_exception()); - } - } - - template - static const auto fn() { - return invoke; - } -}; - -/** - * specialization of the fully transcribed signature for void return values - * @tparam Translated - the arguments to the wasm function - */ -template -struct intrinsic_invoker_impl, std::tuple> { - using next_method_type = void_type (*)(running_instance_context &, Translated...); - - template - static void invoke(Translated... translated) { - try { - Method(the_running_instance_context, translated...); - } - catch(...) { - Platform::immediately_exit(std::current_exception()); - } - } - - template - static const auto fn() { - return invoke; - } -}; - -/** - * Sepcialization for transcribing a simple type in the native method signature - * @tparam Ret - the return type of the native method - * @tparam Input - the type of the native parameter to transcribe - * @tparam Inputs - the remaining native parameters to transcribe - * @tparam Translated - the list of transcribed wasm parameters - */ -template -struct intrinsic_invoker_impl, std::tuple> { - using translated_type = native_to_wasm_t; - using next_step = intrinsic_invoker_impl, std::tuple>; - using then_type = Ret (*)(running_instance_context &, Input, Inputs..., Translated...); - - template - static Ret translate_one(running_instance_context& ctx, Inputs... rest, Translated... translated, translated_type last) { - auto native = convert_wasm_to_native(last); - return Then(ctx, native, rest..., translated...); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a array_ptr type in the native method signature - * This type transcribes into 2 wasm parameters: a pointer and byte length and checks the validity of that memory - * range before dispatching to the native method - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - * @tparam Translated - the list of transcribed wasm parameters - */ -template -struct intrinsic_invoker_impl, size_t, Inputs...>, std::tuple> { - using next_step = intrinsic_invoker_impl, std::tuple>; - using then_type = Ret(*)(running_instance_context&, array_ptr, size_t, Inputs..., Translated...); - - template - static auto translate_one(running_instance_context& ctx, Inputs... rest, Translated... translated, I32 ptr, I32 size) -> std::enable_if_t::value, Ret> { - static_assert(!std::is_pointer::value, "Currently don't support array of pointers"); - const auto length = size_t(size); - T* base = array_ptr_impl(ctx, (U32)ptr, length); - if ( reinterpret_cast(base) % alignof(T) != 0 ) { - if(ctx.apply_ctx->control.contracts_console()) - wlog( "misaligned array of const values" ); - std::vector > copy(length > 0 ? length : 1); - T* copy_ptr = ©[0]; - memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); - return Then(ctx, static_cast>(copy_ptr), length, rest..., translated...); - } - return Then(ctx, static_cast>(base), length, rest..., translated...); - }; - - template - static auto translate_one(running_instance_context& ctx, Inputs... rest, Translated... translated, I32 ptr, I32 size) -> std::enable_if_t::value, Ret> { - static_assert(!std::is_pointer::value, "Currently don't support array of pointers"); - const auto length = size_t(size); - T* base = array_ptr_impl(ctx, (U32)ptr, length); - if ( reinterpret_cast(base) % alignof(T) != 0 ) { - if(ctx.apply_ctx->control.contracts_console()) - wlog( "misaligned array of values" ); - std::vector > copy(length > 0 ? length : 1); - T* copy_ptr = ©[0]; - memcpy( (void*)copy_ptr, (void*)base, length * sizeof(T) ); - Ret ret = Then(ctx, static_cast>(copy_ptr), length, rest..., translated...); - memcpy( (void*)base, (void*)copy_ptr, length * sizeof(T) ); - return ret; - } - return Then(ctx, static_cast>(base), length, rest..., translated...); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a null_terminated_ptr type in the native method signature - * This type transcribes 1 wasm parameters: a char pointer which is validated to contain - * a null value before the end of the allocated memory. - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - * @tparam Translated - the list of transcribed wasm parameters - */ -template -struct intrinsic_invoker_impl, std::tuple> { - using next_step = intrinsic_invoker_impl, std::tuple>; - using then_type = Ret(*)(running_instance_context&, null_terminated_ptr, Inputs..., Translated...); - - template - static Ret translate_one(running_instance_context& ctx, Inputs... rest, Translated... translated, I32 ptr) { - return Then(ctx, null_terminated_ptr_impl(ctx, (U32)ptr), rest..., translated...); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a pair of array_ptr types in the native method signature that share size - * This type transcribes into 3 wasm parameters: 2 pointers and byte length and checks the validity of those memory - * ranges before dispatching to the native method - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - * @tparam Translated - the list of transcribed wasm parameters - */ -template -struct intrinsic_invoker_impl, array_ptr, size_t, Inputs...>, std::tuple> { - using next_step = intrinsic_invoker_impl, std::tuple>; - using then_type = Ret(*)(running_instance_context&, array_ptr, array_ptr, size_t, Inputs..., Translated...); - - template - static Ret translate_one(running_instance_context& ctx, Inputs... rest, Translated... translated, I32 ptr_t, I32 ptr_u, I32 size) { - static_assert(std::is_same, char>::value && std::is_same, char>::value, "Currently only support array of (const)chars"); - const auto length = size_t(size); - return Then(ctx, array_ptr_impl(ctx, (U32)ptr_t, length), array_ptr_impl(ctx, (U32)ptr_u, length), length, rest..., translated...); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing memset parameters - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - * @tparam Translated - the list of transcribed wasm parameters - */ -template -struct intrinsic_invoker_impl, int, size_t>, std::tuple<>> { - using next_step = intrinsic_invoker_impl, std::tuple>; - using then_type = Ret(*)(running_instance_context&, array_ptr, int, size_t); - - template - static Ret translate_one(running_instance_context& ctx, I32 ptr, I32 value, I32 size) { - const auto length = size_t(size); - return Then(ctx, array_ptr_impl(ctx, (U32)ptr, length), value, length); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a pointer type in the native method signature - * This type transcribes into an int32 pointer checks the validity of that memory - * range before dispatching to the native method - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - * @tparam Translated - the list of transcribed wasm parameters - */ -template -struct intrinsic_invoker_impl, std::tuple> { - using next_step = intrinsic_invoker_impl, std::tuple>; - using then_type = Ret (*)(running_instance_context&, T *, Inputs..., Translated...); - - template - static auto translate_one(running_instance_context& ctx, Inputs... rest, Translated... translated, I32 ptr) -> std::enable_if_t::value, Ret> { - T* base = array_ptr_impl(ctx, (U32)ptr, 1); - if ( reinterpret_cast(base) % alignof(T) != 0 ) { - if(ctx.apply_ctx->control.contracts_console()) - wlog( "misaligned const pointer" ); - std::remove_const_t copy; - T* copy_ptr = © - memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); - return Then(ctx, copy_ptr, rest..., translated...); - } - return Then(ctx, base, rest..., translated...); - }; - - template - static auto translate_one(running_instance_context& ctx, Inputs... rest, Translated... translated, I32 ptr) -> std::enable_if_t::value, Ret> { - T* base = array_ptr_impl(ctx, (U32)ptr, 1); - if ( reinterpret_cast(base) % alignof(T) != 0 ) { - if(ctx.apply_ctx->control.contracts_console()) - wlog( "misaligned pointer" ); - std::remove_const_t copy; - T* copy_ptr = © - memcpy( (void*)copy_ptr, (void*)base, sizeof(T) ); - Ret ret = Then(ctx, copy_ptr, rest..., translated...); - memcpy( (void*)base, (void*)copy_ptr, sizeof(T) ); - return ret; - } - return Then(ctx, base, rest..., translated...); - }; - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a reference to a name which can be passed as a native value - * This type transcribes into a native type which is loaded by value into a - * variable on the stack and then passed by reference to the intrinsic. - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - * @tparam Translated - the list of transcribed wasm parameters - */ -template -struct intrinsic_invoker_impl, std::tuple> { - using next_step = intrinsic_invoker_impl, std::tuple >>; - using then_type = Ret (*)(running_instance_context&, const name&, Inputs..., Translated...); - - template - static Ret translate_one(running_instance_context& ctx, Inputs... rest, Translated... translated, native_to_wasm_t wasm_value) { - auto value = name(wasm_value); - return Then(ctx, value, rest..., translated...); - } - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * Specialization for transcribing a reference type in the native method signature - * This type transcribes into an int32 pointer checks the validity of that memory - * range before dispatching to the native method - * - * @tparam Ret - the return type of the native method - * @tparam Inputs - the remaining native parameters to transcribe - * @tparam Translated - the list of transcribed wasm parameters - */ -template -struct intrinsic_invoker_impl, std::tuple> { - using next_step = intrinsic_invoker_impl, std::tuple>; - using then_type = Ret (*)(running_instance_context &, T &, Inputs..., Translated...); - - template - static auto translate_one(running_instance_context& ctx, Inputs... rest, Translated... translated, I32 ptr) -> std::enable_if_t::value, Ret> { - // references cannot be created for null pointers - EOS_ASSERT((U32)ptr != 0, wasm_exception, "references cannot be created for null pointers"); - MemoryInstance* mem = ctx.memory; - if(!mem || (U32)ptr+sizeof(T) >= IR::numBytesPerPage*Runtime::getMemoryNumPages(mem)) - Runtime::causeException(Exception::Cause::accessViolation); - T &base = *(T*)(getMemoryBaseAddress(mem)+(U32)ptr); - if ( reinterpret_cast(&base) % alignof(T) != 0 ) { - if(ctx.apply_ctx->control.contracts_console()) - wlog( "misaligned const reference" ); - std::remove_const_t copy; - T* copy_ptr = © - memcpy( (void*)copy_ptr, (void*)&base, sizeof(T) ); - return Then(ctx, *copy_ptr, rest..., translated...); - } - return Then(ctx, base, rest..., translated...); - } - - template - static auto translate_one(running_instance_context& ctx, Inputs... rest, Translated... translated, I32 ptr) -> std::enable_if_t::value, Ret> { - // references cannot be created for null pointers - EOS_ASSERT((U32)ptr != 0, wasm_exception, "reference cannot be created for null pointers"); - MemoryInstance* mem = ctx.memory; - if(!mem || (U32)ptr+sizeof(T) >= IR::numBytesPerPage*Runtime::getMemoryNumPages(mem)) - Runtime::causeException(Exception::Cause::accessViolation); - T &base = *(T*)(getMemoryBaseAddress(mem)+(U32)ptr); - if ( reinterpret_cast(&base) % alignof(T) != 0 ) { - if(ctx.apply_ctx->control.contracts_console()) - wlog( "misaligned reference" ); - std::remove_const_t copy; - T* copy_ptr = © - memcpy( (void*)copy_ptr, (void*)&base, sizeof(T) ); - Ret ret = Then(ctx, *copy_ptr, rest..., translated...); - memcpy( (void*)&base, (void*)copy_ptr, sizeof(T) ); - return ret; - } - return Then(ctx, base, rest..., translated...); - } - - template - static const auto fn() { - return next_step::template fn>(); - } -}; - -/** - * forward declaration of a wrapper class to call methods of the class - */ -template -struct intrinsic_function_invoker { - using impl = intrinsic_invoker_impl, std::tuple<>>; - - template - static Ret wrapper(running_instance_context& ctx, Params... params) { - class_from_wasm::value(*ctx.apply_ctx).checktime(); - return (class_from_wasm::value(*ctx.apply_ctx).*Method)(params...); - } - - template - static const WasmSig *fn() { - auto fn = impl::template fn>(); - static_assert(std::is_same::value, - "Intrinsic function signature does not match the ABI"); - return fn; - } -}; - -template -struct intrinsic_function_invoker { - using impl = intrinsic_invoker_impl, std::tuple<>>; - - template - static void_type wrapper(running_instance_context& ctx, Params... params) { - class_from_wasm::value(*ctx.apply_ctx).checktime(); - (class_from_wasm::value(*ctx.apply_ctx).*Method)(params...); - return void_type(); - } - - template - static const WasmSig *fn() { - auto fn = impl::template fn>(); - static_assert(std::is_same::value, - "Intrinsic function signature does not match the ABI"); - return fn; - } -}; - -template -struct intrinsic_function_invoker_wrapper; - -template -struct intrinsic_function_invoker_wrapper { - using type = intrinsic_function_invoker; -}; - -template -struct intrinsic_function_invoker_wrapper { - using type = intrinsic_function_invoker; -}; - -template -struct intrinsic_function_invoker_wrapper { - using type = intrinsic_function_invoker; -}; - -template -struct intrinsic_function_invoker_wrapper { - using type = intrinsic_function_invoker; -}; - -#define _ADD_PAREN_1(...) ((__VA_ARGS__)) _ADD_PAREN_2 -#define _ADD_PAREN_2(...) ((__VA_ARGS__)) _ADD_PAREN_1 -#define _ADD_PAREN_1_END -#define _ADD_PAREN_2_END -#define _WRAPPED_SEQ(SEQ) BOOST_PP_CAT(_ADD_PAREN_1 SEQ, _END) - #define __INTRINSIC_NAME(LABEL, SUFFIX) LABEL##SUFFIX #define _INTRINSIC_NAME(LABEL, SUFFIX) __INTRINSIC_NAME(LABEL,SUFFIX) @@ -711,8 +149,7 @@ struct intrinsic_function_invoker_wrapper::type(),\ - (void *)eosio::chain::webassembly::wavm::intrinsic_function_invoker_wrapper::type::fn<&CLS::METHOD>()\ - );\ - + nullptr\ + ); } } } }// eosio::chain::webassembly::wavm diff --git a/libraries/chain/include/eosio/chain/whitelisted_intrinsics.hpp b/libraries/chain/include/eosio/chain/whitelisted_intrinsics.hpp index 96fbf2e2195..3997993d77c 100644 --- a/libraries/chain/include/eosio/chain/whitelisted_intrinsics.hpp +++ b/libraries/chain/include/eosio/chain/whitelisted_intrinsics.hpp @@ -1,8 +1,3 @@ - -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/libraries/chain/name.cpp b/libraries/chain/name.cpp index 7e0a7e4a621..79ffe1603d4 100644 --- a/libraries/chain/name.cpp +++ b/libraries/chain/name.cpp @@ -4,22 +4,22 @@ #include #include -namespace eosio { namespace chain { +namespace eosio::chain { - void name::set( const char* str ) { - const auto len = strnlen(str, 14); - EOS_ASSERT(len <= 13, name_type_exception, "Name is longer than 13 characters (${name}) ", ("name", string(str))); - value = string_to_name(str); - EOS_ASSERT(to_string() == string(str), name_type_exception, + void name::set( std::string_view str ) { + const auto len = str.size(); + EOS_ASSERT(len <= 13, name_type_exception, "Name is longer than 13 characters (${name}) ", ("name", std::string(str))); + value = string_to_uint64_t(str); + EOS_ASSERT(to_string() == str, name_type_exception, "Name not properly normalized (name: ${name}, normalized: ${normalized}) ", - ("name", string(str))("normalized", to_string())); + ("name", std::string(str))("normalized", to_string())); } // keep in sync with name::to_string() in contract definition for name - name::operator string()const { + std::string name::to_string()const { static const char* charmap = ".12345abcdefghijklmnopqrstuvwxyz"; - string str(13,'.'); + std::string str(13,'.'); uint64_t tmp = value; for( uint32_t i = 0; i <= 12; ++i ) { @@ -32,9 +32,9 @@ namespace eosio { namespace chain { return str; } -} } /// eosio::chain +} // eosio::chain namespace fc { - void to_variant(const eosio::chain::name& c, fc::variant& v) { v = std::string(c); } - void from_variant(const fc::variant& v, eosio::chain::name& check) { check = v.get_string(); } + void to_variant(const eosio::chain::name& c, fc::variant& v) { v = c.to_string(); } + void from_variant(const fc::variant& v, eosio::chain::name& check) { check.set( v.get_string() ); } } // fc diff --git a/libraries/chain/platform_timer_accuracy.cpp b/libraries/chain/platform_timer_accuracy.cpp new file mode 100755 index 00000000000..94a69c89440 --- /dev/null +++ b/libraries/chain/platform_timer_accuracy.cpp @@ -0,0 +1,69 @@ +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace eosio { namespace chain { + +namespace bacc = boost::accumulators; + +void compute_and_print_timer_accuracy(platform_timer& timer) { + static std::mutex m; + static bool once_is_enough; + + std::lock_guard guard(m); + + if(once_is_enough) + return; + + bacc::accumulator_set, float> samples; + + //keep longest first in list. You're effectively going to take test_intervals[0]*sizeof(test_intervals[0]) + //time to do the the test + int test_intervals[] = {50000, 10000, 5000, 1000, 500, 100, 50, 10}; + + for(int& interval : test_intervals) { + unsigned int loops = test_intervals[0]/interval; + + for(unsigned int i = 0; i < loops; ++i) { + auto start = std::chrono::high_resolution_clock::now(); + timer.start(fc::time_point(fc::time_point::now().time_since_epoch() + fc::microseconds(interval))); + while(!timer.expired) {} + auto end = std::chrono::high_resolution_clock::now(); + int timer_slop = std::chrono::duration_cast(end-start).count() - interval; + + //since more samples are run for the shorter expirations, weigh the longer expirations accordingly. This + //helps to make a few results more fair. Two such examples: AWS c4&i5 xen instances being rather stable + //down to 100us but then struggling with 50us and 10us. MacOS having performance that seems to correlate + //with expiry length; that is, long expirations have high error, short expirations have low error. + //That said, for these platforms, a tighter tolerance may possibly be achieved by taking performance + //metrics in mulitple bins and appliying the slop based on which bin a deadline resides in. Not clear + //if that's worth the extra complexity at this point. + samples(timer_slop, bacc::weight = interval/(float)test_intervals[0]); + } + } + + #define TIMER_STATS_FORMAT "min:${min}us max:${max}us mean:${mean}us stddev:${stddev}us" + #define TIMER_STATS \ + ("min", bacc::min(samples))("max", bacc::max(samples)) \ + ("mean", (int)bacc::mean(samples))("stddev", (int)sqrt(bacc::variance(samples))) + + ilog("Checktime timer accuracy: " TIMER_STATS_FORMAT, TIMER_STATS); + if(bacc::mean(samples) + sqrt(bacc::variance(samples))*2 > 250) + wlog("Checktime timer accuracy on this platform and hardware combination is poor; accuracy of subjective transaction deadline enforcement will suffer"); + + once_is_enough = true; +} + +}} diff --git a/libraries/chain/platform_timer_asio_fallback.cpp b/libraries/chain/platform_timer_asio_fallback.cpp new file mode 100644 index 00000000000..79bc975de50 --- /dev/null +++ b/libraries/chain/platform_timer_asio_fallback.cpp @@ -0,0 +1,92 @@ +#include +#include + +#include +#include //set_os_thread_name() + +#include + +#include +#include + +namespace eosio { namespace chain { + +//a thread is shared for all instances +static std::mutex timer_ref_mutex; +static unsigned refcount; +static std::thread checktime_thread; +static std::unique_ptr checktime_ios; + +struct platform_timer::impl { + std::unique_ptr timer; +}; + +platform_timer::platform_timer() { + static_assert(sizeof(impl) <= fwd_size); + + std::lock_guard guard(timer_ref_mutex); + + if(refcount++ == 0) { + std::promise p; + checktime_thread = std::thread([&p]() { + fc::set_os_thread_name("checktime"); + checktime_ios = std::make_unique(); + boost::asio::io_service::work work(*checktime_ios); + p.set_value(); + + checktime_ios->run(); + }); + p.get_future().get(); + } + + my->timer = std::make_unique(*checktime_ios); + + //compute_and_print_timer_accuracy(*this); +} + +platform_timer::~platform_timer() { + stop(); + if(std::lock_guard guard(timer_ref_mutex); --refcount == 0) { + checktime_ios->stop(); + checktime_thread.join(); + checktime_ios.reset(); + } +} + +void platform_timer::start(fc::time_point tp) { + if(tp == fc::time_point::maximum()) { + expired = 0; + return; + } + fc::microseconds x = tp.time_since_epoch() - fc::time_point::now().time_since_epoch(); + if(x.count() <= 0) + expired = 1; + else { +#if 0 + std::promise p; + checktime_ios->post([&p,this]() { + expired = 0; + p.set_value(); + }); + p.get_future().get(); +#endif + expired = 0; + my->timer->expires_after(std::chrono::microseconds((int)x.count())); + my->timer->async_wait([this](const boost::system::error_code& ec) { + if(ec) + return; + expired = 1; + call_expiration_callback(); + }); + } +} + +void platform_timer::stop() { + if(expired) + return; + + my->timer->cancel(); + expired = 1; +} + +}} diff --git a/libraries/chain/platform_timer_macos.cpp b/libraries/chain/platform_timer_macos.cpp new file mode 100755 index 00000000000..545a683bae0 --- /dev/null +++ b/libraries/chain/platform_timer_macos.cpp @@ -0,0 +1,110 @@ +#include +#include + +#include +#include +#include +#include //set_os_thread_name() + +#include +#include + +#include + +namespace eosio { namespace chain { + +//a kqueue & thread is shared for all platform_timer_macos instances +static std::mutex timer_ref_mutex; +static unsigned next_timerid; +static unsigned refcount; +static int kqueue_fd; +static std::thread kevent_thread; + +struct platform_timer::impl { + uint64_t timerid; + + constexpr static uint64_t quit_event_id = 1; +}; + +platform_timer::platform_timer() { + static_assert(sizeof(impl) <= fwd_size); + + std::lock_guard guard(timer_ref_mutex); + + if(refcount++ == 0) { + kqueue_fd = kqueue(); + + FC_ASSERT(kqueue_fd != -1, "failed to create kqueue"); + + //set up a EVFILT_USER which will be signaled to shut down the thread + struct kevent64_s quit_event; + EV_SET64(&quit_event, impl::quit_event_id, EVFILT_USER, EV_ADD|EV_ENABLE, NOTE_FFNOP, 0, 0, 0, 0); + FC_ASSERT(kevent64(kqueue_fd, &quit_event, 1, NULL, 0, KEVENT_FLAG_IMMEDIATE, NULL) == 0, "failed to create quit event"); + + kevent_thread = std::thread([]() { + fc::set_os_thread_name("checktime"); + while(true) { + struct kevent64_s anEvent; + int c = kevent64(kqueue_fd, NULL, 0, &anEvent, 1, 0, NULL); + + if(c == 1 && anEvent.filter == EVFILT_TIMER) { + platform_timer* self = (platform_timer*)anEvent.udata; + self->expired = 1; + self->call_expiration_callback(); + } + else if(c == 1 && anEvent.filter == EVFILT_USER) + return; + else if(c == -1 && errno == EINTR) + continue; + else if(c == -1) + return; //?? not much we can do now + } + }); + } + + my->timerid = next_timerid++; + + compute_and_print_timer_accuracy(*this); +} + +platform_timer::~platform_timer() { + stop(); + if(std::lock_guard guard(timer_ref_mutex); --refcount == 0) { + struct kevent64_s signal_quit_event; + EV_SET64(&signal_quit_event, impl::quit_event_id, EVFILT_USER, 0, NOTE_TRIGGER, 0, 0, 0, 0); + + if(kevent64(kqueue_fd, &signal_quit_event, 1, NULL, 0, KEVENT_FLAG_IMMEDIATE, NULL) != -1) + kevent_thread.join(); + close(kqueue_fd); + } +} + +void platform_timer::start(fc::time_point tp) { + if(tp == fc::time_point::maximum()) { + expired = 0; + return; + } + fc::microseconds x = tp.time_since_epoch() - fc::time_point::now().time_since_epoch(); + if(x.count() <= 0) + expired = 1; + else { + struct kevent64_s aTimerEvent; + EV_SET64(&aTimerEvent, my->timerid, EVFILT_TIMER, EV_ADD|EV_ENABLE|EV_ONESHOT, NOTE_USECONDS|NOTE_CRITICAL, (int)x.count(), (uint64_t)this, 0, 0); + + expired = 0; + if(kevent64(kqueue_fd, &aTimerEvent, 1, NULL, 0, KEVENT_FLAG_IMMEDIATE, NULL) != 0) + expired = 1; + } +} + +void platform_timer::stop() { + if(expired) + return; + + struct kevent64_s stop_timer_event; + EV_SET64(&stop_timer_event, my->timerid, EVFILT_TIMER, EV_DELETE, 0, 0, 0, 0, 0); + kevent64(kqueue_fd, &stop_timer_event, 1, NULL, 0, KEVENT_FLAG_IMMEDIATE, NULL); + expired = 1; +} + +}} diff --git a/libraries/chain/platform_timer_posix.cpp b/libraries/chain/platform_timer_posix.cpp new file mode 100644 index 00000000000..1575664ba59 --- /dev/null +++ b/libraries/chain/platform_timer_posix.cpp @@ -0,0 +1,80 @@ +#include +#include + +#include +#include +#include + +#include + +#include +#include + +namespace eosio { namespace chain { + +static_assert(std::atomic_bool::is_always_lock_free, "Only lock-free atomics AS-safe."); + +struct platform_timer::impl { + timer_t timerid; + + static void sig_handler(int, siginfo_t* si, void*) { + platform_timer* self = (platform_timer*)si->si_value.sival_ptr; + self->expired = 1; + self->call_expiration_callback(); + } +}; + +platform_timer::platform_timer() { + static_assert(sizeof(impl) <= fwd_size); + + static bool initialized; + static std::mutex initalized_mutex; + + if(std::lock_guard guard(initalized_mutex); !initialized) { + struct sigaction act; + sigemptyset(&act.sa_mask); + act.sa_sigaction = impl::sig_handler; + act.sa_flags = SA_SIGINFO; + FC_ASSERT(sigaction(SIGRTMIN, &act, NULL) == 0, "failed to aquire SIGRTMIN signal"); + initialized = true; + } + + struct sigevent se; + se.sigev_notify = SIGEV_SIGNAL; + se.sigev_signo = SIGRTMIN; + se.sigev_value.sival_ptr = (void*)this; + + FC_ASSERT(timer_create(CLOCK_REALTIME, &se, &my->timerid) == 0, "failed to create timer"); + + compute_and_print_timer_accuracy(*this); +} + +platform_timer::~platform_timer() { + timer_delete(my->timerid); +} + +void platform_timer::start(fc::time_point tp) { + if(tp == fc::time_point::maximum()) { + expired = 0; + return; + } + fc::microseconds x = tp.time_since_epoch() - fc::time_point::now().time_since_epoch(); + if(x.count() <= 0) + expired = 1; + else { + struct itimerspec enable = {{0, 0}, {0, (int)x.count()*1000}}; + expired = 0; + if(timer_settime(my->timerid, 0, &enable, NULL) != 0) + expired = 1; + } +} + +void platform_timer::stop() { + if(expired) + return; + struct itimerspec disable = {{0, 0}, {0, 0}}; + timer_settime(my->timerid, 0, &disable, NULL); + expired = 1; +} + +}} diff --git a/libraries/chain/platform_timer_posix_test.c b/libraries/chain/platform_timer_posix_test.c new file mode 100644 index 00000000000..c96ec92810a --- /dev/null +++ b/libraries/chain/platform_timer_posix_test.c @@ -0,0 +1,10 @@ +#include +#include +#include + +int main() { + struct itimerval enable = {{0, 0}, {0, 1000u}}; + if(setitimer(ITIMER_REAL, &enable, NULL)) + return 1; + return 0; +} \ No newline at end of file diff --git a/libraries/chain/producer_schedule.cpp b/libraries/chain/producer_schedule.cpp new file mode 100644 index 00000000000..23aa205273f --- /dev/null +++ b/libraries/chain/producer_schedule.cpp @@ -0,0 +1,23 @@ +#include + +namespace eosio { namespace chain { + +fc::variant producer_authority::get_abi_variant() const { + auto authority_variant = authority.visit([](const auto& a){ + fc::variant value; + fc::to_variant(a, value); + + fc::variant type = std::string(std::decay_t::abi_type_name()); + + return fc::variants { + std::move(type), + std::move(value) + }; + }); + + return fc::mutable_variant_object() + ("producer_name", producer_name) + ("authority", std::move(authority_variant)); +} + +} } /// eosio::chain \ No newline at end of file diff --git a/libraries/chain/protocol_feature_activation.cpp b/libraries/chain/protocol_feature_activation.cpp index b0b7a563073..ca6f689d744 100644 --- a/libraries/chain/protocol_feature_activation.cpp +++ b/libraries/chain/protocol_feature_activation.cpp @@ -1,8 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ - #include #include diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index ca0457ce11d..d61a7fe25c4 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -1,8 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ - #include #include #include @@ -154,6 +149,40 @@ is not allowed to schedule a deferred transaction in which the RAM costs are pai unless that account authorized the action; but is allowed to execute database operations that increase RAM usage of an account other than the receiver as long as either the account authorized the action or the action's net effect on RAM usage for the account is to not increase it. +*/ + {} + } ) + ( builtin_protocol_feature_t::webauthn_key, builtin_protocol_feature_spec{ + "WEBAUTHN_KEY", + fc::variant("927fdf78c51e77a899f2db938249fb1f8bb38f4e43d9c1f75b190492080cbc34").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: WEBAUTHN_KEY + +Enables usage of WebAuthn keys and signatures. +*/ + {} + } ) + ( builtin_protocol_feature_t::wtmsig_block_signatures, builtin_protocol_feature_spec{ + "WTMSIG_BLOCK_SIGNATURES", + fc::variant("ab76031cad7a457f4fd5f5fca97a3f03b8a635278e0416f77dcc91eb99a48e10").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: WTMSIG_BLOCK_SIGNATURES + +Allows producers to specify a multisig of weighted keys as the authority for signing blocks. + +A valid block header: +is no longer allowed to have a non-empty `new_producers` field; +must announce new producer schedules using a block header extension with ID `1`. + +A valid signed block: +must continue to have exactly one signature in its `signatures` field; +and may have additional signatures in a block extension with ID `2`. + +Privileged Contracts: +may continue to use `set_proposed_producers` as they have; +may use a new `set_proposed_producers_ex` intrinsic to access extended features. */ {} } ) diff --git a/libraries/chain/protocol_state_object.cpp b/libraries/chain/protocol_state_object.cpp index 7009ee57874..142df3b1319 100644 --- a/libraries/chain/protocol_state_object.cpp +++ b/libraries/chain/protocol_state_object.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include namespace eosio { namespace chain { diff --git a/libraries/chain/snapshot.cpp b/libraries/chain/snapshot.cpp index e226a4f4886..66966ef6be5 100644 --- a/libraries/chain/snapshot.cpp +++ b/libraries/chain/snapshot.cpp @@ -30,6 +30,7 @@ void variant_snapshot_writer::finalize() { variant_snapshot_reader::variant_snapshot_reader(const fc::variant& snapshot) :snapshot(snapshot) +,cur_section(nullptr) ,cur_row(0) { } @@ -114,6 +115,10 @@ void variant_snapshot_reader::clear_section() { cur_row = 0; } +void variant_snapshot_reader::return_to_header() { + clear_section(); +} + ostream_snapshot_writer::ostream_snapshot_writer(std::ostream& snapshot) :snapshot(snapshot) ,header_pos(snapshot.tellp()) @@ -336,6 +341,11 @@ void istream_snapshot_reader::clear_section() { cur_row = 0; } +void istream_snapshot_reader::return_to_header() { + snapshot.seekg( header_pos ); + clear_section(); +} + integrity_hash_snapshot_writer::integrity_hash_snapshot_writer(fc::sha256::encoder& enc) :enc(enc) { @@ -358,4 +368,4 @@ void integrity_hash_snapshot_writer::finalize() { // no-op for structural details } -}} \ No newline at end of file +}} diff --git a/libraries/chain/thread_utils.cpp b/libraries/chain/thread_utils.cpp index 1d8a2707c14..88f49e4228c 100644 --- a/libraries/chain/thread_utils.cpp +++ b/libraries/chain/thread_utils.cpp @@ -1,8 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ - #include #include diff --git a/libraries/chain/trace.cpp b/libraries/chain/trace.cpp index 44a7ba94cfd..7ce555d8a11 100644 --- a/libraries/chain/trace.cpp +++ b/libraries/chain/trace.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include namespace eosio { namespace chain { diff --git a/libraries/chain/transaction.cpp b/libraries/chain/transaction.cpp index 4999a44cc6d..ee51d639304 100644 --- a/libraries/chain/transaction.cpp +++ b/libraries/chain/transaction.cpp @@ -1,18 +1,8 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include -#include #include -#include #include -#include -#include -#include -#include #include #include #include @@ -23,33 +13,6 @@ namespace eosio { namespace chain { -using namespace boost::multi_index; - -struct cached_pub_key { - transaction_id_type trx_id; - public_key_type pub_key; - signature_type sig; - fc::microseconds cpu_usage; - cached_pub_key(const cached_pub_key&) = delete; - cached_pub_key() = delete; - cached_pub_key& operator=(const cached_pub_key&) = delete; - cached_pub_key(cached_pub_key&&) = default; -}; -struct by_sig{}; - -typedef multi_index_container< - cached_pub_key, - indexed_by< - sequenced<>, - hashed_unique< - tag, - member - > - > -> recovery_cache_type; - void deferred_transaction_generation_context::reflector_init() { static_assert( fc::raw::has_feature_reflector_init_on_unpacked_reflected_types, "deferred_transaction_generation_context expects FC to support reflector_init" ); @@ -97,62 +60,27 @@ fc::microseconds transaction::get_signature_keys( const vector& const chain_id_type& chain_id, fc::time_point deadline, const vector& cfd, flat_set& recovered_pub_keys, bool allow_duplicate_keys)const { try { - using boost::adaptors::transformed; - - constexpr size_t recovery_cache_size = 10000; - static recovery_cache_type recovery_cache; - static std::mutex cache_mtx; - auto start = fc::time_point::now(); recovered_pub_keys.clear(); const digest_type digest = sig_digest(chain_id, cfd); - std::unique_lock lock(cache_mtx, std::defer_lock); - fc::microseconds sig_cpu_usage; - const auto digest_time = fc::time_point::now() - start; for(const signature_type& sig : signatures) { - auto sig_start = fc::time_point::now(); - EOS_ASSERT( sig_start < deadline, tx_cpu_usage_exceeded, "transaction signature verification executed for too long", - ("now", sig_start)("deadline", deadline)("start", start) ); - public_key_type recov; - const auto& tid = id(); - lock.lock(); - recovery_cache_type::index::type::iterator it = recovery_cache.get().find( sig ); - if( it == recovery_cache.get().end() || it->trx_id != tid ) { - lock.unlock(); - recov = public_key_type( sig, digest ); - fc::microseconds cpu_usage = fc::time_point::now() - sig_start; - lock.lock(); - recovery_cache.emplace_back( cached_pub_key{tid, recov, sig, cpu_usage} ); //could fail on dup signatures; not a problem - sig_cpu_usage += cpu_usage; - } else { - recov = it->pub_key; - sig_cpu_usage += it->cpu_usage; - } - lock.unlock(); - bool successful_insertion = false; - std::tie(std::ignore, successful_insertion) = recovered_pub_keys.insert(recov); + auto now = fc::time_point::now(); + EOS_ASSERT( now < deadline, tx_cpu_usage_exceeded, "transaction signature verification executed for too long ${time}us", + ("time", now - start)("now", now)("deadline", deadline)("start", start) ); + auto[ itr, successful_insertion ] = recovered_pub_keys.emplace( sig, digest ); EOS_ASSERT( allow_duplicate_keys || successful_insertion, tx_duplicate_sig, "transaction includes more than one signature signed using the same key associated with public key: ${key}", - ("key", recov) ); + ("key", *itr ) ); } - lock.lock(); - while ( recovery_cache.size() > recovery_cache_size ) - recovery_cache.erase( recovery_cache.begin()); - lock.unlock(); - - return sig_cpu_usage + digest_time; + return fc::time_point::now() - start; } FC_CAPTURE_AND_RETHROW() } -vector transaction::validate_and_extract_extensions()const { - using transaction_extensions_t = transaction_extension_types::transaction_extensions_t; +flat_multimap transaction::validate_and_extract_extensions()const { using decompose_t = transaction_extension_types::decompose_t; - static_assert( std::is_same::value, - "transaction_extensions is not setup as expected" ); - - vector results; + flat_multimap results; uint16_t id_type_lower_bound = 0; @@ -164,9 +92,12 @@ vector transaction::validate_and_extract_extensions()con "Transaction extensions are not in the correct order (ascending id types required)" ); - results.emplace_back(); + auto iter = results.emplace(std::piecewise_construct, + std::forward_as_tuple(id), + std::forward_as_tuple() + ); - auto match = decompose_t::extract( id, e.second, results.back() ); + auto match = decompose_t::extract( id, e.second, iter->second ); EOS_ASSERT( match, invalid_transaction_extension, "Transaction extension with id type ${id} is not supported", ("id", id) @@ -329,9 +260,9 @@ bytes packed_transaction::get_raw_transaction() const { try { switch(compression) { - case none: + case compression_type::none: return packed_trx; - case zlib: + case compression_type::zlib: return zlib_decompress(packed_trx); default: EOS_THROW(unknown_transaction_compression, "Unknown transaction compression algorithm"); @@ -367,6 +298,7 @@ packed_transaction::packed_transaction( transaction&& t, vector& ,compression(_compression) ,packed_context_free_data(std::move(packed_cfd)) ,unpacked_trx(std::move(t), signatures, {}) +,trx_id(unpacked_trx.id()) { local_pack_transaction(); if( !packed_context_free_data.empty() ) { @@ -388,15 +320,16 @@ void packed_transaction::local_unpack_transaction(vector&& context_free_d { try { switch( compression ) { - case none: + case compression_type::none: unpacked_trx = signed_transaction( unpack_transaction( packed_trx ), signatures, std::move(context_free_data) ); break; - case zlib: + case compression_type::zlib: unpacked_trx = signed_transaction( zlib_decompress_transaction( packed_trx ), signatures, std::move(context_free_data) ); break; default: EOS_THROW( unknown_transaction_compression, "Unknown transaction compression algorithm" ); } + trx_id = unpacked_trx.id(); } FC_CAPTURE_AND_RETHROW( (compression) ) } @@ -405,10 +338,10 @@ void packed_transaction::local_unpack_context_free_data() try { EOS_ASSERT(unpacked_trx.context_free_data.empty(), tx_decompression_error, "packed_transaction.context_free_data not empty"); switch( compression ) { - case none: + case compression_type::none: unpacked_trx.context_free_data = unpack_context_free_data( packed_context_free_data ); break; - case zlib: + case compression_type::zlib: unpacked_trx.context_free_data = zlib_decompress_context_free_data( packed_context_free_data ); break; default: @@ -421,10 +354,10 @@ void packed_transaction::local_pack_transaction() { try { switch(compression) { - case none: + case compression_type::none: packed_trx = pack_transaction(unpacked_trx); break; - case zlib: + case compression_type::zlib: packed_trx = zlib_compress_transaction(unpacked_trx); break; default: @@ -437,10 +370,10 @@ void packed_transaction::local_pack_context_free_data() { try { switch(compression) { - case none: + case compression_type::none: packed_context_free_data = pack_context_free_data(unpacked_trx.context_free_data); break; - case zlib: + case compression_type::zlib: packed_context_free_data = zlib_compress_context_free_data(unpacked_trx.context_free_data); break; default: diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 407cfb74c08..49a67dff882 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -21,134 +21,32 @@ namespace eosio { namespace chain { -namespace bacc = boost::accumulators; - - struct deadline_timer_verify { - deadline_timer_verify() { - //keep longest first in list. You're effectively going to take test_intervals[0]*sizeof(test_intervals[0]) - //time to do the the "calibration" - int test_intervals[] = {50000, 10000, 5000, 1000, 500, 100, 50, 10}; - - struct sigaction act; - sigemptyset(&act.sa_mask); - act.sa_handler = timer_hit; - act.sa_flags = 0; - if(sigaction(SIGALRM, &act, NULL)) - return; - - sigset_t alrm; - sigemptyset(&alrm); - sigaddset(&alrm, SIGALRM); - int dummy; - - for(int& interval : test_intervals) { - unsigned int loops = test_intervals[0]/interval; - - for(unsigned int i = 0; i < loops; ++i) { - struct itimerval enable = {{0, 0}, {0, interval}}; - hit = 0; - auto start = std::chrono::high_resolution_clock::now(); - if(setitimer(ITIMER_REAL, &enable, NULL)) - return; - while(!hit) {} - auto end = std::chrono::high_resolution_clock::now(); - int timer_slop = std::chrono::duration_cast(end-start).count() - interval; - - //since more samples are run for the shorter expirations, weigh the longer expirations accordingly. This - //helps to make a few results more fair. Two such examples: AWS c4&i5 xen instances being rather stable - //down to 100us but then struggling with 50us and 10us. MacOS having performance that seems to correlate - //with expiry length; that is, long expirations have high error, short expirations have low error. - //That said, for these platforms, a tighter tolerance may possibly be achieved by taking performance - //metrics in mulitple bins and appliying the slop based on which bin a deadline resides in. Not clear - //if that's worth the extra complexity at this point. - samples(timer_slop, bacc::weight = interval/(float)test_intervals[0]); - } - } - timer_overhead = bacc::mean(samples) + sqrt(bacc::variance(samples))*2; //target 95% of expirations before deadline - use_deadline_timer = timer_overhead < 1000; - - act.sa_handler = SIG_DFL; - sigaction(SIGALRM, &act, NULL); - } - - static void timer_hit(int) { - hit = 1; - } - static volatile sig_atomic_t hit; - - bacc::accumulator_set, float> samples; - bool use_deadline_timer = false; - int timer_overhead; - }; - volatile sig_atomic_t deadline_timer_verify::hit; - static deadline_timer_verify deadline_timer_verification; - - deadline_timer::deadline_timer() { - if(initialized) - return; - initialized = true; - - #define TIMER_STATS_FORMAT "min:${min}us max:${max}us mean:${mean}us stddev:${stddev}us" - #define TIMER_STATS \ - ("min", bacc::min(deadline_timer_verification.samples))("max", bacc::max(deadline_timer_verification.samples)) \ - ("mean", (int)bacc::mean(deadline_timer_verification.samples))("stddev", (int)sqrt(bacc::variance(deadline_timer_verification.samples))) \ - ("t", deadline_timer_verification.timer_overhead) - - if(deadline_timer_verification.use_deadline_timer) { - struct sigaction act; - act.sa_handler = timer_expired; - sigemptyset(&act.sa_mask); - act.sa_flags = 0; - if(sigaction(SIGALRM, &act, NULL) == 0) { - ilog("Using ${t}us deadline timer for checktime: " TIMER_STATS_FORMAT, TIMER_STATS); - return; - } - } - - wlog("Using polled checktime; deadline timer too inaccurate: " TIMER_STATS_FORMAT, TIMER_STATS); - deadline_timer_verification.use_deadline_timer = false; //set in case sigaction() fails above + transaction_checktime_timer::transaction_checktime_timer(platform_timer& timer) + : expired(timer.expired), _timer(timer) { + expired = 0; } - void deadline_timer::start(fc::time_point tp) { - if(tp == fc::time_point::maximum()) { - expired = 0; - return; - } - if(!deadline_timer_verification.use_deadline_timer) { - expired = 1; - return; - } - microseconds x = tp.time_since_epoch() - fc::time_point::now().time_since_epoch(); - if(x.count() <= deadline_timer_verification.timer_overhead) - expired = 1; - else { - struct itimerval enable = {{0, 0}, {0, (int)x.count()-deadline_timer_verification.timer_overhead}}; - expired = 0; - if(setitimer(ITIMER_REAL, &enable, NULL)) - expired = 1; - } + void transaction_checktime_timer::start(fc::time_point tp) { + _timer.start(tp); } - void deadline_timer::stop() { - if(expired) - return; - struct itimerval disable = {{0, 0}, {0, 0}}; - setitimer(ITIMER_REAL, &disable, NULL); + void transaction_checktime_timer::stop() { + _timer.stop(); } - deadline_timer::~deadline_timer() { - stop(); + void transaction_checktime_timer::set_expiration_callback(void(*func)(void*), void* user) { + _timer.set_expiration_callback(func, user); } - void deadline_timer::timer_expired(int) { - expired = 1; + transaction_checktime_timer::~transaction_checktime_timer() { + stop(); + _timer.set_expiration_callback(nullptr, nullptr); } - volatile sig_atomic_t deadline_timer::expired = 0; - bool deadline_timer::initialized = false; transaction_context::transaction_context( controller& c, const signed_transaction& t, const transaction_id_type& trx_id, + transaction_checktime_timer&& tmr, fc::time_point s ) :control(c) ,trx(t) @@ -156,6 +54,7 @@ namespace bacc = boost::accumulators; ,undo_session() ,trace(std::make_shared()) ,start(s) + ,transaction_timer(std::move(tmr)) ,net_usage(trace->net_usage) ,pseudo_start(s) { @@ -281,9 +180,9 @@ namespace bacc = boost::accumulators; checktime(); // Fail early if deadline has already been exceeded if(control.skip_trx_checks()) - _deadline_timer.expired = 0; + transaction_timer.start(fc::time_point::maximum()); else - _deadline_timer.start(_deadline); + transaction_timer.start(_deadline); is_initialized = true; } @@ -466,35 +365,34 @@ namespace bacc = boost::accumulators; } void transaction_context::checktime()const { - if(BOOST_LIKELY(_deadline_timer.expired == false)) + if(BOOST_LIKELY(transaction_timer.expired == false)) return; + auto now = fc::time_point::now(); - if( BOOST_UNLIKELY( now > _deadline ) ) { - // edump((now-start)(now-pseudo_start)); - if( explicit_billed_cpu_time || deadline_exception_code == deadline_exception::code_value ) { - EOS_THROW( deadline_exception, "deadline exceeded", ("now", now)("deadline", _deadline)("start", start) ); - } else if( deadline_exception_code == block_cpu_usage_exceeded::code_value ) { - EOS_THROW( block_cpu_usage_exceeded, - "not enough time left in block to complete executing transaction", - ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); - } else if( deadline_exception_code == tx_cpu_usage_exceeded::code_value ) { - if (cpu_limit_due_to_greylist) { - EOS_THROW( greylist_cpu_usage_exceeded, - "greylisted transaction was executing for too long", - ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); - } else { - EOS_THROW( tx_cpu_usage_exceeded, - "transaction was executing for too long", - ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); - } - } else if( deadline_exception_code == leeway_deadline_exception::code_value ) { - EOS_THROW( leeway_deadline_exception, - "the transaction was unable to complete by deadline, " - "but it is possible it could have succeeded if it were allowed to run to completion", - ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); + if( explicit_billed_cpu_time || deadline_exception_code == deadline_exception::code_value ) { + EOS_THROW( deadline_exception, "deadline exceeded ${billing_timer}us", + ("billing_timer", now - pseudo_start)("now", now)("deadline", _deadline)("start", start) ); + } else if( deadline_exception_code == block_cpu_usage_exceeded::code_value ) { + EOS_THROW( block_cpu_usage_exceeded, + "not enough time left in block to complete executing transaction ${billing_timer}us", + ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); + } else if( deadline_exception_code == tx_cpu_usage_exceeded::code_value ) { + if (cpu_limit_due_to_greylist) { + EOS_THROW( greylist_cpu_usage_exceeded, + "greylisted transaction was executing for too long ${billing_timer}us", + ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); + } else { + EOS_THROW( tx_cpu_usage_exceeded, + "transaction was executing for too long ${billing_timer}us", + ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); } - EOS_ASSERT( false, transaction_exception, "unexpected deadline exception code" ); + } else if( deadline_exception_code == leeway_deadline_exception::code_value ) { + EOS_THROW( leeway_deadline_exception, + "the transaction was unable to complete by deadline, " + "but it is possible it could have succeeded if it were allowed to run to completion ${billing_timer}", + ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); } + EOS_ASSERT( false, transaction_exception, "unexpected deadline exception code ${code}", ("code", deadline_exception_code) ); } void transaction_context::pause_billing_timer() { @@ -504,7 +402,7 @@ namespace bacc = boost::accumulators; billed_time = now - pseudo_start; deadline_exception_code = deadline_exception::code_value; // Other timeout exceptions cannot be thrown while billable timer is paused. pseudo_start = fc::time_point(); - _deadline_timer.stop(); + transaction_timer.stop(); } void transaction_context::resume_billing_timer() { @@ -519,7 +417,7 @@ namespace bacc = boost::accumulators; _deadline = deadline; deadline_exception_code = deadline_exception::code_value; } - _deadline_timer.start(_deadline); + transaction_timer.start(_deadline); } void transaction_context::validate_cpu_usage_to_bill( int64_t billed_us, bool check_minimum )const { diff --git a/libraries/chain/transaction_metadata.cpp b/libraries/chain/transaction_metadata.cpp index ddcbd2d934e..00a9e2c329a 100644 --- a/libraries/chain/transaction_metadata.cpp +++ b/libraries/chain/transaction_metadata.cpp @@ -4,51 +4,22 @@ namespace eosio { namespace chain { -recovery_keys_type transaction_metadata::recover_keys( const chain_id_type& chain_id ) { - // Unlikely for more than one chain_id to be used in one nodeos instance - if( signing_keys_future.valid() ) { - const std::tuple>& sig_keys = signing_keys_future.get(); - if( std::get<0>( sig_keys ) == chain_id ) { - return std::make_pair( std::get<1>( sig_keys ), std::cref( std::get<2>( sig_keys ) ) ); - } - } - - // shared_keys_future not created or different chain_id - std::promise p; - flat_set recovered_pub_keys; - const signed_transaction& trn = packed_trx->get_signed_transaction(); - fc::microseconds cpu_usage = trn.get_signature_keys( chain_id, fc::time_point::maximum(), recovered_pub_keys ); - p.set_value( std::make_tuple( chain_id, cpu_usage, std::move( recovered_pub_keys ) ) ); - signing_keys_future = p.get_future().share(); - - const std::tuple>& sig_keys = signing_keys_future.get(); - return std::make_pair( std::get<1>( sig_keys ), std::cref( std::get<2>( sig_keys ) ) ); -} - -signing_keys_future_type transaction_metadata::start_recover_keys( const transaction_metadata_ptr& mtrx, - boost::asio::io_context& thread_pool, - const chain_id_type& chain_id, - fc::microseconds time_limit ) +recover_keys_future transaction_metadata::start_recover_keys( packed_transaction_ptr trx, + boost::asio::io_context& thread_pool, + const chain_id_type& chain_id, + fc::microseconds time_limit, + uint32_t max_variable_sig_size ) { - if( mtrx->signing_keys_future.valid() && std::get<0>( mtrx->signing_keys_future.get() ) == chain_id ) // already created - return mtrx->signing_keys_future; - - std::weak_ptr mtrx_wp = mtrx; - mtrx->signing_keys_future = async_thread_pool( thread_pool, [time_limit, chain_id, mtrx_wp]() { - fc::time_point deadline = time_limit == fc::microseconds::maximum() ? - fc::time_point::maximum() : fc::time_point::now() + time_limit; - auto mtrx = mtrx_wp.lock(); - fc::microseconds cpu_usage; - flat_set recovered_pub_keys; - if( mtrx ) { - const signed_transaction& trn = mtrx->packed_trx->get_signed_transaction(); - cpu_usage = trn.get_signature_keys( chain_id, deadline, recovered_pub_keys ); + return async_thread_pool( thread_pool, [trx{std::move(trx)}, chain_id, time_limit, max_variable_sig_size]() mutable { + fc::time_point deadline = time_limit == fc::microseconds::maximum() ? + fc::time_point::maximum() : fc::time_point::now() + time_limit; + check_variable_sig_size( trx, max_variable_sig_size ); + const signed_transaction& trn = trx->get_signed_transaction(); + flat_set recovered_pub_keys; + fc::microseconds cpu_usage = trn.get_signature_keys( chain_id, deadline, recovered_pub_keys ); + return std::make_shared( private_type(), std::move( trx ), cpu_usage, std::move( recovered_pub_keys ) ); } - return std::make_tuple( chain_id, cpu_usage, std::move( recovered_pub_keys )); - } ); - - return mtrx->signing_keys_future; + ); } - } } // eosio::chain diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 6ad6a9cb131..c29bb77838c 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -25,11 +25,15 @@ #include #include +#if defined(EOSIO_EOS_VM_RUNTIME_ENABLED) || defined(EOSIO_EOS_VM_JIT_RUNTIME_ENABLED) +#include +#endif + namespace eosio { namespace chain { - using namespace webassembly; using namespace webassembly::common; - wasm_interface::wasm_interface(vm_type vm, const chainbase::database& d) : my( new wasm_interface_impl(vm, d) ) {} + wasm_interface::wasm_interface(vm_type vm, bool eosvmoc_tierup, const chainbase::database& d, const boost::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config) + : my( new wasm_interface_impl(vm, eosvmoc_tierup, d, data_dir, eosvmoc_config) ) {} wasm_interface::~wasm_interface() {} @@ -70,6 +74,26 @@ namespace eosio { namespace chain { } void wasm_interface::apply( const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context ) { +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + if(my->eosvmoc) { + const chain::eosvmoc::code_descriptor* cd = nullptr; + try { + cd = my->eosvmoc->cc.get_descriptor_for_code(code_hash, vm_version); + } + catch(...) { + //swallow errors here, if EOS VM OC has gone in to the weeds we shouldn't bail: continue to try and run baseline + //In the future, consider moving bits of EOS VM that can fire exceptions and such out of this call path + static bool once_is_enough; + if(!once_is_enough) + elog("EOS VM OC has encountered an unexpected failure"); + once_is_enough = true; + } + if(cd) { + my->eosvmoc->exec.execute(*cd, my->eosvmoc->mem, context); + return; + } + } +#endif my->get_instantiated_module(code_hash, vm_type, vm_version, context.trx_context)->apply(context); } @@ -110,7 +134,7 @@ class context_free_api : public context_aware_api { EOS_ASSERT( context.is_context_free(), unaccessible_api, "this API may only be called from context_free apply" ); } - int get_context_free_data( uint32_t index, array_ptr buffer, size_t buffer_size )const { + int get_context_free_data( uint32_t index, array_ptr buffer, uint32_t buffer_size )const { return context.get_context_free_data( index, buffer, buffer_size ); } }; @@ -175,30 +199,86 @@ class privileged_api : public context_aware_api { context.control.get_resource_limits_manager().get_account_limits( account, ram_bytes, net_weight, cpu_weight); } - int64_t set_proposed_producers( array_ptr packed_producer_schedule, size_t datalen) { - datastream ds( packed_producer_schedule, datalen ); - vector producers; - fc::raw::unpack(ds, producers); + int64_t set_proposed_producers_common( vector && producers, bool validate_keys ) { + EOS_ASSERT(producers.size() <= config::max_producers, wasm_execution_error, "Producer schedule exceeds the maximum producer count for this chain"); EOS_ASSERT( producers.size() > 0 - || !context.control.is_builtin_activated( - builtin_protocol_feature_t::disallow_empty_producer_schedule - ), + || !context.control.is_builtin_activated( builtin_protocol_feature_t::disallow_empty_producer_schedule ), wasm_execution_error, "Producer schedule cannot be empty" ); - EOS_ASSERT(producers.size() <= config::max_producers, wasm_execution_error, "Producer schedule exceeds the maximum producer count for this chain"); + + const auto num_supported_key_types = context.db.get().num_supported_key_types; + // check that producers are unique std::set unique_producers; for (const auto& p: producers) { EOS_ASSERT( context.is_account(p.producer_name), wasm_execution_error, "producer schedule includes a nonexisting account" ); - EOS_ASSERT( p.block_signing_key.valid(), wasm_execution_error, "producer schedule includes an invalid key" ); + + p.authority.visit([&p, num_supported_key_types, validate_keys](const auto& a) { + uint32_t sum_weights = 0; + std::set unique_keys; + for (const auto& kw: a.keys ) { + EOS_ASSERT( kw.key.which() < num_supported_key_types, unactivated_key_type, + "Unactivated key type used in proposed producer schedule"); + + if( validate_keys ) { + EOS_ASSERT( kw.key.valid(), wasm_execution_error, "producer schedule includes an invalid key" ); + } + + if (std::numeric_limits::max() - sum_weights <= kw.weight) { + sum_weights = std::numeric_limits::max(); + } else { + sum_weights += kw.weight; + } + + unique_keys.insert(kw.key); + } + + EOS_ASSERT( a.keys.size() == unique_keys.size(), wasm_execution_error, "producer schedule includes a duplicated key for ${account}", ("account", p.producer_name)); + EOS_ASSERT( a.threshold > 0, wasm_execution_error, "producer schedule includes an authority with a threshold of 0 for ${account}", ("account", p.producer_name)); + EOS_ASSERT( sum_weights >= a.threshold, wasm_execution_error, "producer schedule includes an unsatisfiable authority for ${account}", ("account", p.producer_name)); + }); + + unique_producers.insert(p.producer_name); } EOS_ASSERT( producers.size() == unique_producers.size(), wasm_execution_error, "duplicate producer name in producer schedule" ); + return context.control.set_proposed_producers( std::move(producers) ); } - uint32_t get_blockchain_parameters_packed( array_ptr packed_blockchain_parameters, size_t buffer_size) { + int64_t set_proposed_producers( array_ptr packed_producer_schedule, uint32_t datalen ) { + datastream ds( packed_producer_schedule, datalen ); + vector producers; + + vector old_version; + fc::raw::unpack(ds, old_version); + + /* + * Up-convert the producers + */ + for ( const auto& p: old_version ) { + producers.emplace_back(producer_authority{ p.producer_name, block_signing_authority_v0{ 1, {{p.block_signing_key, 1}} } } ); + } + + return set_proposed_producers_common(std::move(producers), true); + } + + int64_t set_proposed_producers_ex( uint64_t packed_producer_format, array_ptr packed_producer_schedule, uint32_t datalen ) { + if (packed_producer_format == 0) { + return set_proposed_producers(packed_producer_schedule, datalen); + } else if (packed_producer_format == 1) { + datastream ds( packed_producer_schedule, datalen ); + vector producers; + + fc::raw::unpack(ds, producers); + return set_proposed_producers_common(std::move(producers), false); + } else { + EOS_THROW(wasm_execution_error, "Producer schedule is in an unknown format!"); + } + } + + uint32_t get_blockchain_parameters_packed( array_ptr packed_blockchain_parameters, uint32_t buffer_size) { auto& gpo = context.control.get_global_properties(); auto s = fc::raw::pack_size( gpo.configuration ); @@ -212,7 +292,7 @@ class privileged_api : public context_aware_api { return 0; } - void set_blockchain_parameters_packed( array_ptr packed_blockchain_parameters, size_t datalen) { + void set_blockchain_parameters_packed( array_ptr packed_blockchain_parameters, uint32_t datalen) { datastream ds( packed_blockchain_parameters, datalen ); chain::chain_config cfg; fc::raw::unpack(ds, cfg); @@ -246,19 +326,19 @@ class softfloat_api : public context_aware_api { #pragma GCC diagnostic ignored "-Wstrict-aliasing" // float binops float _eosio_f32_add( float a, float b ) { - float32_t ret = f32_add( to_softfloat32(a), to_softfloat32(b) ); + float32_t ret = ::f32_add( to_softfloat32(a), to_softfloat32(b) ); return *reinterpret_cast(&ret); } float _eosio_f32_sub( float a, float b ) { - float32_t ret = f32_sub( to_softfloat32(a), to_softfloat32(b) ); + float32_t ret = ::f32_sub( to_softfloat32(a), to_softfloat32(b) ); return *reinterpret_cast(&ret); } float _eosio_f32_div( float a, float b ) { - float32_t ret = f32_div( to_softfloat32(a), to_softfloat32(b) ); + float32_t ret = ::f32_div( to_softfloat32(a), to_softfloat32(b) ); return *reinterpret_cast(&ret); } float _eosio_f32_mul( float a, float b ) { - float32_t ret = f32_mul( to_softfloat32(a), to_softfloat32(b) ); + float32_t ret = ::f32_mul( to_softfloat32(a), to_softfloat32(b) ); return *reinterpret_cast(&ret); } #pragma GCC diagnostic pop @@ -274,7 +354,7 @@ class softfloat_api : public context_aware_api { if ( f32_sign_bit(a) != f32_sign_bit(b) ) { return f32_sign_bit(a) ? af : bf; } - return f32_lt(a,b) ? af : bf; + return ::f32_lt(a,b) ? af : bf; } float _eosio_f32_max( float af, float bf ) { float32_t a = to_softfloat32(af); @@ -288,12 +368,11 @@ class softfloat_api : public context_aware_api { if ( f32_sign_bit(a) != f32_sign_bit(b) ) { return f32_sign_bit(a) ? bf : af; } - return f32_lt( a, b ) ? bf : af; + return ::f32_lt( a, b ) ? bf : af; } float _eosio_f32_copysign( float af, float bf ) { float32_t a = to_softfloat32(af); float32_t b = to_softfloat32(bf); - uint32_t sign_of_a = a.v >> 31; uint32_t sign_of_b = b.v >> 31; a.v &= ~(1 << 31); // clear the sign bit a.v = a.v | (sign_of_b << 31); // add the sign of b @@ -313,7 +392,7 @@ class softfloat_api : public context_aware_api { return from_softfloat32(a); } float _eosio_f32_sqrt( float a ) { - float32_t ret = f32_sqrt( to_softfloat32(a) ); + float32_t ret = ::f32_sqrt( to_softfloat32(a) ); return from_softfloat32(ret); } // ceil, floor, trunc and nearest are lifted from libc @@ -382,19 +461,19 @@ class softfloat_api : public context_aware_api { if (e >= 0x7f+23) return af; if (s) - y = f32_add( f32_sub( a, float32_t{inv_float_eps} ), float32_t{inv_float_eps} ); + y = ::f32_add( ::f32_sub( a, float32_t{inv_float_eps} ), float32_t{inv_float_eps} ); else - y = f32_sub( f32_add( a, float32_t{inv_float_eps} ), float32_t{inv_float_eps} ); - if (f32_eq( y, {0} ) ) + y = ::f32_sub( ::f32_add( a, float32_t{inv_float_eps} ), float32_t{inv_float_eps} ); + if (::f32_eq( y, {0} ) ) return s ? -0.0f : 0.0f; return from_softfloat32(y); } // float relops - bool _eosio_f32_eq( float a, float b ) { return f32_eq( to_softfloat32(a), to_softfloat32(b) ); } - bool _eosio_f32_ne( float a, float b ) { return !f32_eq( to_softfloat32(a), to_softfloat32(b) ); } - bool _eosio_f32_lt( float a, float b ) { return f32_lt( to_softfloat32(a), to_softfloat32(b) ); } - bool _eosio_f32_le( float a, float b ) { return f32_le( to_softfloat32(a), to_softfloat32(b) ); } + bool _eosio_f32_eq( float a, float b ) { return ::f32_eq( to_softfloat32(a), to_softfloat32(b) ); } + bool _eosio_f32_ne( float a, float b ) { return !::f32_eq( to_softfloat32(a), to_softfloat32(b) ); } + bool _eosio_f32_lt( float a, float b ) { return ::f32_lt( to_softfloat32(a), to_softfloat32(b) ); } + bool _eosio_f32_le( float a, float b ) { return ::f32_le( to_softfloat32(a), to_softfloat32(b) ); } bool _eosio_f32_gt( float af, float bf ) { float32_t a = to_softfloat32(af); float32_t b = to_softfloat32(bf); @@ -402,7 +481,7 @@ class softfloat_api : public context_aware_api { return false; if (is_nan(b)) return false; - return !f32_le( a, b ); + return !::f32_le( a, b ); } bool _eosio_f32_ge( float af, float bf ) { float32_t a = to_softfloat32(af); @@ -411,24 +490,24 @@ class softfloat_api : public context_aware_api { return false; if (is_nan(b)) return false; - return !f32_lt( a, b ); + return !::f32_lt( a, b ); } // double binops double _eosio_f64_add( double a, double b ) { - float64_t ret = f64_add( to_softfloat64(a), to_softfloat64(b) ); + float64_t ret = ::f64_add( to_softfloat64(a), to_softfloat64(b) ); return from_softfloat64(ret); } double _eosio_f64_sub( double a, double b ) { - float64_t ret = f64_sub( to_softfloat64(a), to_softfloat64(b) ); + float64_t ret = ::f64_sub( to_softfloat64(a), to_softfloat64(b) ); return from_softfloat64(ret); } double _eosio_f64_div( double a, double b ) { - float64_t ret = f64_div( to_softfloat64(a), to_softfloat64(b) ); + float64_t ret = ::f64_div( to_softfloat64(a), to_softfloat64(b) ); return from_softfloat64(ret); } double _eosio_f64_mul( double a, double b ) { - float64_t ret = f64_mul( to_softfloat64(a), to_softfloat64(b) ); + float64_t ret = ::f64_mul( to_softfloat64(a), to_softfloat64(b) ); return from_softfloat64(ret); } double _eosio_f64_min( double af, double bf ) { @@ -440,7 +519,7 @@ class softfloat_api : public context_aware_api { return bf; if (f64_sign_bit(a) != f64_sign_bit(b)) return f64_sign_bit(a) ? af : bf; - return f64_lt( a, b ) ? af : bf; + return ::f64_lt( a, b ) ? af : bf; } double _eosio_f64_max( double af, double bf ) { float64_t a = to_softfloat64(af); @@ -451,12 +530,11 @@ class softfloat_api : public context_aware_api { return bf; if (f64_sign_bit(a) != f64_sign_bit(b)) return f64_sign_bit(a) ? bf : af; - return f64_lt( a, b ) ? bf : af; + return ::f64_lt( a, b ) ? bf : af; } double _eosio_f64_copysign( double af, double bf ) { float64_t a = to_softfloat64(af); float64_t b = to_softfloat64(bf); - uint64_t sign_of_a = a.v >> 63; uint64_t sign_of_b = b.v >> 63; a.v &= ~(uint64_t(1) << 63); // clear the sign bit a.v = a.v | (sign_of_b << 63); // add the sign of b @@ -477,7 +555,7 @@ class softfloat_api : public context_aware_api { return from_softfloat64(a); } double _eosio_f64_sqrt( double a ) { - float64_t ret = f64_sqrt( to_softfloat64(a) ); + float64_t ret = ::f64_sqrt( to_softfloat64(a) ); return from_softfloat64(ret); } // ceil, floor, trunc and nearest are lifted from libc @@ -486,22 +564,22 @@ class softfloat_api : public context_aware_api { float64_t ret; int e = a.v >> 52 & 0x7ff; float64_t y; - if (e >= 0x3ff+52 || f64_eq( a, { 0 } )) + if (e >= 0x3ff+52 || ::f64_eq( a, { 0 } )) return af; /* y = int(x) - x, where int(x) is an integer neighbor of x */ if (a.v >> 63) - y = f64_sub( f64_add( f64_sub( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ), a ); + y = ::f64_sub( ::f64_add( ::f64_sub( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ), a ); else - y = f64_sub( f64_sub( f64_add( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ), a ); + y = ::f64_sub( ::f64_sub( ::f64_add( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ), a ); /* special case because of non-nearest rounding modes */ if (e <= 0x3ff-1) { return a.v >> 63 ? -0.0 : 1.0; //float64_t{0x8000000000000000} : float64_t{0xBE99999A3F800000}; //either -0.0 or 1 } - if (f64_lt( y, to_softfloat64(0) )) { - ret = f64_add( f64_add( a, y ), to_softfloat64(1) ); // 0xBE99999A3F800000 } ); // plus 1 + if (::f64_lt( y, to_softfloat64(0) )) { + ret = ::f64_add( ::f64_add( a, y ), to_softfloat64(1) ); // 0xBE99999A3F800000 } ); // plus 1 return from_softfloat64(ret); } - ret = f64_add( a, y ); + ret = ::f64_add( a, y ); return from_softfloat64(ret); } double _eosio_f64_floor( double af ) { @@ -517,17 +595,17 @@ class softfloat_api : public context_aware_api { return af; } if (a.v >> 63) - y = f64_sub( f64_add( f64_sub( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ), a ); + y = ::f64_sub( ::f64_add( ::f64_sub( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ), a ); else - y = f64_sub( f64_sub( f64_add( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ), a ); + y = ::f64_sub( ::f64_sub( ::f64_add( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ), a ); if (e <= 0x3FF-1) { return a.v>>63 ? -1.0 : 0.0; //float64_t{0xBFF0000000000000} : float64_t{0}; // -1 or 0 } - if ( !f64_le( y, float64_t{0} ) ) { - ret = f64_sub( f64_add(a,y), to_softfloat64(1.0)); + if ( !::f64_le( y, float64_t{0} ) ) { + ret = ::f64_sub( ::f64_add(a,y), to_softfloat64(1.0)); return from_softfloat64(ret); } - ret = f64_add( a, y ); + ret = ::f64_add( a, y ); return from_softfloat64(ret); } double _eosio_f64_trunc( double af ) { @@ -553,19 +631,19 @@ class softfloat_api : public context_aware_api { if ( e >= 0x3FF+52 ) return af; if ( s ) - y = f64_add( f64_sub( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ); + y = ::f64_add( ::f64_sub( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ); else - y = f64_sub( f64_add( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ); - if ( f64_eq( y, float64_t{0} ) ) + y = ::f64_sub( ::f64_add( a, float64_t{inv_double_eps} ), float64_t{inv_double_eps} ); + if ( ::f64_eq( y, float64_t{0} ) ) return s ? -0.0 : 0.0; return from_softfloat64(y); } // double relops - bool _eosio_f64_eq( double a, double b ) { return f64_eq( to_softfloat64(a), to_softfloat64(b) ); } - bool _eosio_f64_ne( double a, double b ) { return !f64_eq( to_softfloat64(a), to_softfloat64(b) ); } - bool _eosio_f64_lt( double a, double b ) { return f64_lt( to_softfloat64(a), to_softfloat64(b) ); } - bool _eosio_f64_le( double a, double b ) { return f64_le( to_softfloat64(a), to_softfloat64(b) ); } + bool _eosio_f64_eq( double a, double b ) { return ::f64_eq( to_softfloat64(a), to_softfloat64(b) ); } + bool _eosio_f64_ne( double a, double b ) { return !::f64_eq( to_softfloat64(a), to_softfloat64(b) ); } + bool _eosio_f64_lt( double a, double b ) { return ::f64_lt( to_softfloat64(a), to_softfloat64(b) ); } + bool _eosio_f64_le( double a, double b ) { return ::f64_le( to_softfloat64(a), to_softfloat64(b) ); } bool _eosio_f64_gt( double af, double bf ) { float64_t a = to_softfloat64(af); float64_t b = to_softfloat64(bf); @@ -573,7 +651,7 @@ class softfloat_api : public context_aware_api { return false; if (is_nan(b)) return false; - return !f64_le( a, b ); + return !::f64_le( a, b ); } bool _eosio_f64_ge( double af, double bf ) { float64_t a = to_softfloat64(af); @@ -582,7 +660,7 @@ class softfloat_api : public context_aware_api { return false; if (is_nan(b)) return false; - return !f64_lt( a, b ); + return !::f64_lt( a, b ); } // float and double conversions @@ -701,14 +779,14 @@ class producer_api : public context_aware_api { public: using context_aware_api::context_aware_api; - int get_active_producers(array_ptr producers, size_t buffer_size) { + int get_active_producers(array_ptr producers, uint32_t buffer_size) { auto active_producers = context.get_active_producers(); size_t len = active_producers.size(); auto s = len * sizeof(chain::account_name); if( buffer_size == 0 ) return s; - auto copy_size = std::min( buffer_size, s ); + auto copy_size = std::min( static_cast(buffer_size), s ); memcpy( producers, active_producers.data(), copy_size ); return copy_size; @@ -724,8 +802,8 @@ class crypto_api : public context_aware_api { * no possible side effects other than "passing". */ void assert_recover_key( const fc::sha256& digest, - array_ptr sig, size_t siglen, - array_ptr pub, size_t publen ) { + array_ptr sig, uint32_t siglen, + array_ptr pub, uint32_t publen ) { fc::crypto::signature s; fc::crypto::public_key p; datastream ds( sig, siglen ); @@ -734,23 +812,56 @@ class crypto_api : public context_aware_api { fc::raw::unpack(ds, s); fc::raw::unpack(pubds, p); + EOS_ASSERT(s.which() < context.db.get().num_supported_key_types, unactivated_signature_type, + "Unactivated signature type used during assert_recover_key"); + EOS_ASSERT(p.which() < context.db.get().num_supported_key_types, unactivated_key_type, + "Unactivated key type used when creating assert_recover_key"); + + if(context.control.is_producing_block()) + EOS_ASSERT(s.variable_size() <= context.control.configured_subjective_signature_length_limit(), + sig_variable_size_limit_exception, "signature variable length component size greater than subjective maximum"); + auto check = fc::crypto::public_key( s, digest, false ); EOS_ASSERT( check == p, crypto_api_exception, "Error expected key different than recovered key" ); } int recover_key( const fc::sha256& digest, - array_ptr sig, size_t siglen, - array_ptr pub, size_t publen ) { + array_ptr sig, uint32_t siglen, + array_ptr pub, uint32_t publen ) { fc::crypto::signature s; datastream ds( sig, siglen ); - datastream pubds( pub, publen ); - fc::raw::unpack(ds, s); - fc::raw::pack( pubds, fc::crypto::public_key( s, digest, false ) ); - return pubds.tellp(); + + EOS_ASSERT(s.which() < context.db.get().num_supported_key_types, unactivated_signature_type, + "Unactivated signature type used during recover_key"); + + if(context.control.is_producing_block()) + EOS_ASSERT(s.variable_size() <= context.control.configured_subjective_signature_length_limit(), + sig_variable_size_limit_exception, "signature variable length component size greater than subjective maximum"); + + + auto recovered = fc::crypto::public_key(s, digest, false); + + // the key types newer than the first 2 may be varible in length + if (s.which() >= config::genesis_num_supported_key_types ) { + EOS_ASSERT(publen >= 33, wasm_execution_error, + "destination buffer must at least be able to hold an ECC public key"); + auto packed_pubkey = fc::raw::pack(recovered); + auto copy_size = std::min(publen, packed_pubkey.size()); + memcpy(pub, packed_pubkey.data(), copy_size); + return packed_pubkey.size(); + } else { + // legacy behavior, key types 0 and 1 always pack to 33 bytes. + // this will do one less copy for those keys while maintaining the rules of + // [0..33) dest sizes: assert (asserts in fc::raw::pack) + // [33..inf) dest sizes: return packed size (always 33) + datastream out_ds( pub, publen ); + fc::raw::pack(out_ds, recovered); + return out_ds.tellp(); + } } - template auto encode(char* data, size_t datalen) { + template auto encode(char* data, uint32_t datalen) { Encoder e; const size_t bs = eosio::chain::config::hashing_checktime_block_size; while ( datalen > bs ) { @@ -763,39 +874,39 @@ class crypto_api : public context_aware_api { return e.result(); } - void assert_sha256(array_ptr data, size_t datalen, const fc::sha256& hash_val) { + void assert_sha256(array_ptr data, uint32_t datalen, const fc::sha256& hash_val) { auto result = encode( data, datalen ); EOS_ASSERT( result == hash_val, crypto_api_exception, "hash mismatch" ); } - void assert_sha1(array_ptr data, size_t datalen, const fc::sha1& hash_val) { + void assert_sha1(array_ptr data, uint32_t datalen, const fc::sha1& hash_val) { auto result = encode( data, datalen ); EOS_ASSERT( result == hash_val, crypto_api_exception, "hash mismatch" ); } - void assert_sha512(array_ptr data, size_t datalen, const fc::sha512& hash_val) { + void assert_sha512(array_ptr data, uint32_t datalen, const fc::sha512& hash_val) { auto result = encode( data, datalen ); EOS_ASSERT( result == hash_val, crypto_api_exception, "hash mismatch" ); } - void assert_ripemd160(array_ptr data, size_t datalen, const fc::ripemd160& hash_val) { + void assert_ripemd160(array_ptr data, uint32_t datalen, const fc::ripemd160& hash_val) { auto result = encode( data, datalen ); EOS_ASSERT( result == hash_val, crypto_api_exception, "hash mismatch" ); } - void sha1(array_ptr data, size_t datalen, fc::sha1& hash_val) { + void sha1(array_ptr data, uint32_t datalen, fc::sha1& hash_val) { hash_val = encode( data, datalen ); } - void sha256(array_ptr data, size_t datalen, fc::sha256& hash_val) { + void sha256(array_ptr data, uint32_t datalen, fc::sha256& hash_val) { hash_val = encode( data, datalen ); } - void sha512(array_ptr data, size_t datalen, fc::sha512& hash_val) { + void sha512(array_ptr data, uint32_t datalen, fc::sha512& hash_val) { hash_val = encode( data, datalen ); } - void ripemd160(array_ptr data, size_t datalen, fc::ripemd160& hash_val) { + void ripemd160(array_ptr data, uint32_t datalen, fc::ripemd160& hash_val) { hash_val = encode( data, datalen ); } }; @@ -804,9 +915,9 @@ class permission_api : public context_aware_api { public: using context_aware_api::context_aware_api; - bool check_transaction_authorization( array_ptr trx_data, size_t trx_size, - array_ptr pubkeys_data, size_t pubkeys_size, - array_ptr perms_data, size_t perms_size + bool check_transaction_authorization( array_ptr trx_data, uint32_t trx_size, + array_ptr pubkeys_data, uint32_t pubkeys_size, + array_ptr perms_data, uint32_t perms_size ) { transaction trx = fc::raw::unpack( trx_data, trx_size ); @@ -834,8 +945,8 @@ class permission_api : public context_aware_api { } bool check_permission_authorization( account_name account, permission_name permission, - array_ptr pubkeys_data, size_t pubkeys_size, - array_ptr perms_data, size_t perms_size, + array_ptr pubkeys_data, uint32_t pubkeys_size, + array_ptr perms_data, uint32_t perms_size, uint64_t delay_us ) { @@ -878,14 +989,14 @@ class permission_api : public context_aware_api { } private: - void unpack_provided_keys( flat_set& keys, const char* pubkeys_data, size_t pubkeys_size ) { + void unpack_provided_keys( flat_set& keys, const char* pubkeys_data, uint32_t pubkeys_size ) { keys.clear(); if( pubkeys_size == 0 ) return; keys = fc::raw::unpack>( pubkeys_data, pubkeys_size ); } - void unpack_provided_permissions( flat_set& permissions, const char* perms_data, size_t perms_size ) { + void unpack_provided_permissions( flat_set& permissions, const char* perms_data, uint32_t perms_size ) { permissions.clear(); if( perms_size == 0 ) return; @@ -898,16 +1009,16 @@ class authorization_api : public context_aware_api { public: using context_aware_api::context_aware_api; - void require_authorization( const account_name& account ) { + void require_auth( account_name account ) { context.require_authorization( account ); } - bool has_authorization( const account_name& account )const { + bool has_auth( account_name account )const { return context.has_authorization( account ); } - void require_authorization(const account_name& account, - const permission_name& permission) { + void require_auth2( account_name account, + permission_name permission) { context.require_authorization( account, permission ); } @@ -915,7 +1026,7 @@ class authorization_api : public context_aware_api { context.require_recipient( recipient ); } - bool is_account( const account_name& account )const { + bool is_account( account_name account )const { return context.is_account( account ); } @@ -965,7 +1076,7 @@ class context_free_system_api : public context_aware_api { } } - void eosio_assert_message( bool condition, array_ptr msg, size_t msg_len ) { + void eosio_assert_message( bool condition, array_ptr msg, uint32_t msg_len ) { if( BOOST_UNLIKELY( !condition ) ) { const size_t sz = msg_len > max_assert_message ? max_assert_message : msg_len; std::string message( msg, sz ); @@ -1006,12 +1117,12 @@ class action_api : public context_aware_api { action_api( apply_context& ctx ) :context_aware_api(ctx,true){} - int read_action_data(array_ptr memory, size_t buffer_size) { + int read_action_data(array_ptr memory, uint32_t buffer_size) { auto s = context.get_action().data.size(); if( buffer_size == 0 ) return s; - auto copy_size = std::min( buffer_size, s ); - memcpy( memory, context.get_action().data.data(), copy_size ); + auto copy_size = std::min( static_cast(buffer_size), s ); + memcpy( (char*)memory.value, context.get_action().data.data(), copy_size ); return copy_size; } @@ -1038,7 +1149,7 @@ class console_api : public context_aware_api { } } - void prints_l(array_ptr str, size_t str_len ) { + void prints_l(array_ptr str, uint32_t str_len ) { if ( !ignore ) { context.console_append(string(str, str_len)); } @@ -1145,13 +1256,13 @@ class console_api : public context_aware_api { } } - void printn(const name& value) { + void printn(name value) { if ( !ignore ) { context.console_append(value.to_string()); } } - void printhex(array_ptr data, size_t data_len ) { + void printhex(array_ptr data, uint32_t data_len ) { if ( !ignore ) { context.console_append(fc::to_hex(data, data_len)); } @@ -1163,10 +1274,10 @@ class console_api : public context_aware_api { #define DB_API_METHOD_WRAPPERS_SIMPLE_SECONDARY(IDX, TYPE)\ int db_##IDX##_store( uint64_t scope, uint64_t table, uint64_t payer, uint64_t id, const TYPE& secondary ) {\ - return context.IDX.store( scope, table, payer, id, secondary );\ + return context.IDX.store( scope, table, account_name(payer), id, secondary );\ }\ void db_##IDX##_update( int iterator, uint64_t payer, const TYPE& secondary ) {\ - return context.IDX.update( iterator, payer, secondary );\ + return context.IDX.update( iterator, account_name(payer), secondary );\ }\ void db_##IDX##_remove( int iterator ) {\ return context.IDX.remove( iterator );\ @@ -1194,45 +1305,45 @@ class console_api : public context_aware_api { } #define DB_API_METHOD_WRAPPERS_ARRAY_SECONDARY(IDX, ARR_SIZE, ARR_ELEMENT_TYPE)\ - int db_##IDX##_store( uint64_t scope, uint64_t table, uint64_t payer, uint64_t id, array_ptr data, size_t data_len) {\ + int db_##IDX##_store( uint64_t scope, uint64_t table, uint64_t payer, uint64_t id, array_ptr data, uint32_t data_len) {\ EOS_ASSERT( data_len == ARR_SIZE,\ db_api_exception,\ "invalid size of secondary key array for " #IDX ": given ${given} bytes but expected ${expected} bytes",\ ("given",data_len)("expected",ARR_SIZE) );\ - return context.IDX.store(scope, table, payer, id, data.value);\ + return context.IDX.store(scope, table, account_name(payer), id, data.value);\ }\ - void db_##IDX##_update( int iterator, uint64_t payer, array_ptr data, size_t data_len ) {\ + void db_##IDX##_update( int iterator, uint64_t payer, array_ptr data, uint32_t data_len ) {\ EOS_ASSERT( data_len == ARR_SIZE,\ db_api_exception,\ "invalid size of secondary key array for " #IDX ": given ${given} bytes but expected ${expected} bytes",\ ("given",data_len)("expected",ARR_SIZE) );\ - return context.IDX.update(iterator, payer, data.value);\ + return context.IDX.update(iterator, account_name(payer), data.value);\ }\ void db_##IDX##_remove( int iterator ) {\ return context.IDX.remove(iterator);\ }\ - int db_##IDX##_find_secondary( uint64_t code, uint64_t scope, uint64_t table, array_ptr data, size_t data_len, uint64_t& primary ) {\ + int db_##IDX##_find_secondary( uint64_t code, uint64_t scope, uint64_t table, array_ptr data, uint32_t data_len, uint64_t& primary ) {\ EOS_ASSERT( data_len == ARR_SIZE,\ db_api_exception,\ "invalid size of secondary key array for " #IDX ": given ${given} bytes but expected ${expected} bytes",\ ("given",data_len)("expected",ARR_SIZE) );\ return context.IDX.find_secondary(code, scope, table, data, primary);\ }\ - int db_##IDX##_find_primary( uint64_t code, uint64_t scope, uint64_t table, array_ptr data, size_t data_len, uint64_t primary ) {\ + int db_##IDX##_find_primary( uint64_t code, uint64_t scope, uint64_t table, array_ptr data, uint32_t data_len, uint64_t primary ) {\ EOS_ASSERT( data_len == ARR_SIZE,\ db_api_exception,\ "invalid size of secondary key array for " #IDX ": given ${given} bytes but expected ${expected} bytes",\ ("given",data_len)("expected",ARR_SIZE) );\ return context.IDX.find_primary(code, scope, table, data.value, primary);\ }\ - int db_##IDX##_lowerbound( uint64_t code, uint64_t scope, uint64_t table, array_ptr data, size_t data_len, uint64_t& primary ) {\ + int db_##IDX##_lowerbound( uint64_t code, uint64_t scope, uint64_t table, array_ptr data, uint32_t data_len, uint64_t& primary ) {\ EOS_ASSERT( data_len == ARR_SIZE,\ db_api_exception,\ "invalid size of secondary key array for " #IDX ": given ${given} bytes but expected ${expected} bytes",\ ("given",data_len)("expected",ARR_SIZE) );\ return context.IDX.lowerbound_secondary(code, scope, table, data.value, primary);\ }\ - int db_##IDX##_upperbound( uint64_t code, uint64_t scope, uint64_t table, array_ptr data, size_t data_len, uint64_t& primary ) {\ + int db_##IDX##_upperbound( uint64_t code, uint64_t scope, uint64_t table, array_ptr data, uint32_t data_len, uint64_t& primary ) {\ EOS_ASSERT( data_len == ARR_SIZE,\ db_api_exception,\ "invalid size of secondary key array for " #IDX ": given ${given} bytes but expected ${expected} bytes",\ @@ -1252,11 +1363,11 @@ class console_api : public context_aware_api { #define DB_API_METHOD_WRAPPERS_FLOAT_SECONDARY(IDX, TYPE)\ int db_##IDX##_store( uint64_t scope, uint64_t table, uint64_t payer, uint64_t id, const TYPE& secondary ) {\ EOS_ASSERT( !softfloat_api::is_nan( secondary ), transaction_exception, "NaN is not an allowed value for a secondary key" );\ - return context.IDX.store( scope, table, payer, id, secondary );\ + return context.IDX.store( scope, table, account_name(payer), id, secondary );\ }\ void db_##IDX##_update( int iterator, uint64_t payer, const TYPE& secondary ) {\ EOS_ASSERT( !softfloat_api::is_nan( secondary ), transaction_exception, "NaN is not an allowed value for a secondary key" );\ - return context.IDX.update( iterator, payer, secondary );\ + return context.IDX.update( iterator, account_name(payer), secondary );\ }\ void db_##IDX##_remove( int iterator ) {\ return context.IDX.remove( iterator );\ @@ -1290,16 +1401,16 @@ class database_api : public context_aware_api { public: using context_aware_api::context_aware_api; - int db_store_i64( uint64_t scope, uint64_t table, uint64_t payer, uint64_t id, array_ptr buffer, size_t buffer_size ) { - return context.db_store_i64( scope, table, payer, id, buffer, buffer_size ); + int db_store_i64( uint64_t scope, uint64_t table, uint64_t payer, uint64_t id, array_ptr buffer, uint32_t buffer_size ) { + return context.db_store_i64( name(scope), name(table), account_name(payer), id, buffer, buffer_size ); } - void db_update_i64( int itr, uint64_t payer, array_ptr buffer, size_t buffer_size ) { - context.db_update_i64( itr, payer, buffer, buffer_size ); + void db_update_i64( int itr, uint64_t payer, array_ptr buffer, uint32_t buffer_size ) { + context.db_update_i64( itr, account_name(payer), buffer, buffer_size ); } void db_remove_i64( int itr ) { context.db_remove_i64( itr ); } - int db_get_i64( int itr, array_ptr buffer, size_t buffer_size ) { + int db_get_i64( int itr, array_ptr buffer, uint32_t buffer_size ) { return context.db_get_i64( itr, buffer, buffer_size ); } int db_next_i64( int itr, uint64_t& primary ) { @@ -1309,16 +1420,16 @@ class database_api : public context_aware_api { return context.db_previous_i64(itr, primary); } int db_find_i64( uint64_t code, uint64_t scope, uint64_t table, uint64_t id ) { - return context.db_find_i64( code, scope, table, id ); + return context.db_find_i64( name(code), name(scope), name(table), id ); } int db_lowerbound_i64( uint64_t code, uint64_t scope, uint64_t table, uint64_t id ) { - return context.db_lowerbound_i64( code, scope, table, id ); + return context.db_lowerbound_i64( name(code), name(scope), name(table), id ); } int db_upperbound_i64( uint64_t code, uint64_t scope, uint64_t table, uint64_t id ) { - return context.db_upperbound_i64( code, scope, table, id ); + return context.db_upperbound_i64( name(code), name(scope), name(table), id ); } int db_end_i64( uint64_t code, uint64_t scope, uint64_t table ) { - return context.db_end_i64( code, scope, table ); + return context.db_end_i64( name(code), name(scope), name(table) ); } DB_API_METHOD_WRAPPERS_SIMPLE_SECONDARY(idx64, uint64_t) @@ -1333,17 +1444,17 @@ class memory_api : public context_aware_api { memory_api( apply_context& ctx ) :context_aware_api(ctx,true){} - char* memcpy( array_ptr dest, array_ptr src, size_t length) { + char* memcpy( array_ptr dest, array_ptr src, uint32_t length) { EOS_ASSERT((size_t)(std::abs((ptrdiff_t)dest.value - (ptrdiff_t)src.value)) >= length, overlapping_memory_error, "memcpy can only accept non-aliasing pointers"); return (char *)::memcpy(dest, src, length); } - char* memmove( array_ptr dest, array_ptr src, size_t length) { + char* memmove( array_ptr dest, array_ptr src, uint32_t length) { return (char *)::memmove(dest, src, length); } - int memcmp( array_ptr dest, array_ptr src, size_t length) { + int memcmp( array_ptr dest, array_ptr src, uint32_t length) { int ret = ::memcmp(dest, src, length); if(ret < 0) return -1; @@ -1352,7 +1463,7 @@ class memory_api : public context_aware_api { return 0; } - char* memset( array_ptr dest, int value, size_t length ) { + char* memset( array_ptr dest, int value, uint32_t length ) { return (char *)::memset( dest, value, length ); } }; @@ -1361,7 +1472,7 @@ class transaction_api : public context_aware_api { public: using context_aware_api::context_aware_api; - void send_inline( array_ptr data, size_t data_len ) { + void send_inline( array_ptr data, uint32_t data_len ) { //TODO: Why is this limit even needed? And why is it not consistently checked on actions in input or deferred transactions EOS_ASSERT( data_len < context.control.get_global_properties().configuration.max_inline_action_size, inline_action_too_big, "inline action too big" ); @@ -1371,7 +1482,7 @@ class transaction_api : public context_aware_api { context.execute_inline(std::move(act)); } - void send_context_free_inline( array_ptr data, size_t data_len ) { + void send_context_free_inline( array_ptr data, uint32_t data_len ) { //TODO: Why is this limit even needed? And why is it not consistently checked on actions in input or deferred transactions EOS_ASSERT( data_len < context.control.get_global_properties().configuration.max_inline_action_size, inline_action_too_big, "inline action too big" ); @@ -1381,7 +1492,7 @@ class transaction_api : public context_aware_api { context.execute_context_free_inline(std::move(act)); } - void send_deferred( const uint128_t& sender_id, account_name payer, array_ptr data, size_t data_len, uint32_t replace_existing) { + void send_deferred( const uint128_t& sender_id, account_name payer, array_ptr data, uint32_t data_len, uint32_t replace_existing) { transaction trx; fc::raw::unpack(data, data_len, trx); context.schedule_deferred_transaction(sender_id, payer, std::move(trx), replace_existing); @@ -1399,13 +1510,13 @@ class context_free_transaction_api : public context_aware_api { context_free_transaction_api( apply_context& ctx ) :context_aware_api(ctx,true){} - int read_transaction( array_ptr data, size_t buffer_size ) { + int read_transaction( array_ptr data, uint32_t buffer_size ) { bytes trx = context.get_packed_transaction(); auto s = trx.size(); if( buffer_size == 0) return s; - auto copy_size = std::min( buffer_size, s ); + auto copy_size = std::min( static_cast(buffer_size), s ); memcpy( data, trx.data(), copy_size ); return copy_size; @@ -1426,7 +1537,7 @@ class context_free_transaction_api : public context_aware_api { return context.trx_context.trx.ref_block_prefix; } - int get_action( uint32_t type, uint32_t index, array_ptr buffer, size_t buffer_size )const { + int get_action( uint32_t type, uint32_t index, array_ptr buffer, uint32_t buffer_size )const { return context.get_action( type, index, buffer, buffer_size ); } }; @@ -1698,53 +1809,53 @@ class call_depth_api : public context_aware_api { }; REGISTER_INJECTED_INTRINSICS(call_depth_api, - (call_depth_assert, void() ) + (call_depth_assert, void() ) ); REGISTER_INTRINSICS(compiler_builtins, - (__ashlti3, void(int, int64_t, int64_t, int) ) - (__ashrti3, void(int, int64_t, int64_t, int) ) - (__lshlti3, void(int, int64_t, int64_t, int) ) - (__lshrti3, void(int, int64_t, int64_t, int) ) - (__divti3, void(int, int64_t, int64_t, int64_t, int64_t) ) - (__udivti3, void(int, int64_t, int64_t, int64_t, int64_t) ) - (__modti3, void(int, int64_t, int64_t, int64_t, int64_t) ) - (__umodti3, void(int, int64_t, int64_t, int64_t, int64_t) ) - (__multi3, void(int, int64_t, int64_t, int64_t, int64_t) ) - (__addtf3, void(int, int64_t, int64_t, int64_t, int64_t) ) - (__subtf3, void(int, int64_t, int64_t, int64_t, int64_t) ) - (__multf3, void(int, int64_t, int64_t, int64_t, int64_t) ) - (__divtf3, void(int, int64_t, int64_t, int64_t, int64_t) ) - (__eqtf2, int(int64_t, int64_t, int64_t, int64_t) ) - (__netf2, int(int64_t, int64_t, int64_t, int64_t) ) - (__getf2, int(int64_t, int64_t, int64_t, int64_t) ) - (__gttf2, int(int64_t, int64_t, int64_t, int64_t) ) - (__lttf2, int(int64_t, int64_t, int64_t, int64_t) ) - (__letf2, int(int64_t, int64_t, int64_t, int64_t) ) - (__cmptf2, int(int64_t, int64_t, int64_t, int64_t) ) - (__unordtf2, int(int64_t, int64_t, int64_t, int64_t) ) - (__negtf2, void (int, int64_t, int64_t) ) - (__floatsitf, void (int, int) ) - (__floatunsitf, void (int, int) ) - (__floatditf, void (int, int64_t) ) - (__floatunditf, void (int, int64_t) ) - (__floattidf, double (int64_t, int64_t) ) - (__floatuntidf, double (int64_t, int64_t) ) - (__floatsidf, double(int) ) - (__extendsftf2, void(int, float) ) - (__extenddftf2, void(int, double) ) - (__fixtfti, void(int, int64_t, int64_t) ) - (__fixtfdi, int64_t(int64_t, int64_t) ) - (__fixtfsi, int(int64_t, int64_t) ) - (__fixunstfti, void(int, int64_t, int64_t) ) - (__fixunstfdi, int64_t(int64_t, int64_t) ) - (__fixunstfsi, int(int64_t, int64_t) ) - (__fixsfti, void(int, float) ) - (__fixdfti, void(int, double) ) - (__fixunssfti, void(int, float) ) - (__fixunsdfti, void(int, double) ) - (__trunctfdf2, double(int64_t, int64_t) ) - (__trunctfsf2, float(int64_t, int64_t) ) + (__ashlti3, void(int, int64_t, int64_t, int) ) + (__ashrti3, void(int, int64_t, int64_t, int) ) + (__lshlti3, void(int, int64_t, int64_t, int) ) + (__lshrti3, void(int, int64_t, int64_t, int) ) + (__divti3, void(int, int64_t, int64_t, int64_t, int64_t) ) + (__udivti3, void(int, int64_t, int64_t, int64_t, int64_t) ) + (__modti3, void(int, int64_t, int64_t, int64_t, int64_t) ) + (__umodti3, void(int, int64_t, int64_t, int64_t, int64_t) ) + (__multi3, void(int, int64_t, int64_t, int64_t, int64_t) ) + (__addtf3, void(int, int64_t, int64_t, int64_t, int64_t) ) + (__subtf3, void(int, int64_t, int64_t, int64_t, int64_t) ) + (__multf3, void(int, int64_t, int64_t, int64_t, int64_t) ) + (__divtf3, void(int, int64_t, int64_t, int64_t, int64_t) ) + (__eqtf2, int(int64_t, int64_t, int64_t, int64_t) ) + (__netf2, int(int64_t, int64_t, int64_t, int64_t) ) + (__getf2, int(int64_t, int64_t, int64_t, int64_t) ) + (__gttf2, int(int64_t, int64_t, int64_t, int64_t) ) + (__lttf2, int(int64_t, int64_t, int64_t, int64_t) ) + (__letf2, int(int64_t, int64_t, int64_t, int64_t) ) + (__cmptf2, int(int64_t, int64_t, int64_t, int64_t) ) + (__unordtf2, int(int64_t, int64_t, int64_t, int64_t) ) + (__negtf2, void (int, int64_t, int64_t) ) + (__floatsitf, void (int, int) ) + (__floatunsitf, void (int, int) ) + (__floatditf, void (int, int64_t) ) + (__floatunditf, void (int, int64_t) ) + (__floattidf, double (int64_t, int64_t) ) + (__floatuntidf, double (int64_t, int64_t) ) + (__floatsidf, double(int) ) + (__extendsftf2, void(int, float) ) + (__extenddftf2, void(int, double) ) + (__fixtfti, void(int, int64_t, int64_t) ) + (__fixtfdi, int64_t(int64_t, int64_t) ) + (__fixtfsi, int(int64_t, int64_t) ) + (__fixunstfti, void(int, int64_t, int64_t) ) + (__fixunstfdi, int64_t(int64_t, int64_t) ) + (__fixunstfsi, int(int64_t, int64_t) ) + (__fixsfti, void(int, float) ) + (__fixdfti, void(int, double) ) + (__fixunssfti, void(int, float) ) + (__fixunsdfti, void(int, double) ) + (__trunctfdf2, double(int64_t, int64_t) ) + (__trunctfsf2, float(int64_t, int64_t) ) ); REGISTER_INTRINSICS(privileged_api, @@ -1753,6 +1864,7 @@ REGISTER_INTRINSICS(privileged_api, (get_resource_limits, void(int64_t,int,int,int) ) (set_resource_limits, void(int64_t,int64_t,int64_t,int64_t) ) (set_proposed_producers, int64_t(int,int) ) + (set_proposed_producers_ex, int64_t(int64_t, int, int) ) (get_blockchain_parameters_packed, int(int, int) ) (set_blockchain_parameters_packed, void(int,int) ) (is_privileged, int(int64_t) ) @@ -1761,7 +1873,7 @@ REGISTER_INTRINSICS(privileged_api, ); REGISTER_INJECTED_INTRINSICS(transaction_context, - (checktime, void()) + (checktime, void() ) ); REGISTER_INTRINSICS(producer_api, @@ -1769,40 +1881,40 @@ REGISTER_INTRINSICS(producer_api, ); #define DB_SECONDARY_INDEX_METHODS_SIMPLE(IDX) \ - (db_##IDX##_store, int(int64_t,int64_t,int64_t,int64_t,int))\ - (db_##IDX##_remove, void(int))\ - (db_##IDX##_update, void(int,int64_t,int))\ - (db_##IDX##_find_primary, int(int64_t,int64_t,int64_t,int,int64_t))\ - (db_##IDX##_find_secondary, int(int64_t,int64_t,int64_t,int,int))\ - (db_##IDX##_lowerbound, int(int64_t,int64_t,int64_t,int,int))\ - (db_##IDX##_upperbound, int(int64_t,int64_t,int64_t,int,int))\ - (db_##IDX##_end, int(int64_t,int64_t,int64_t))\ - (db_##IDX##_next, int(int, int))\ - (db_##IDX##_previous, int(int, int)) + (db_##IDX##_store, int(int64_t,int64_t,int64_t,int64_t,int) )\ + (db_##IDX##_remove, void(int) )\ + (db_##IDX##_update, void(int,int64_t,int) )\ + (db_##IDX##_find_primary, int(int64_t,int64_t,int64_t,int,int64_t) )\ + (db_##IDX##_find_secondary, int(int64_t,int64_t,int64_t,int,int) )\ + (db_##IDX##_lowerbound, int(int64_t,int64_t,int64_t,int,int) )\ + (db_##IDX##_upperbound, int(int64_t,int64_t,int64_t,int,int) )\ + (db_##IDX##_end, int(int64_t,int64_t,int64_t) )\ + (db_##IDX##_next, int(int, int) )\ + (db_##IDX##_previous, int(int, int) ) #define DB_SECONDARY_INDEX_METHODS_ARRAY(IDX) \ - (db_##IDX##_store, int(int64_t,int64_t,int64_t,int64_t,int,int))\ - (db_##IDX##_remove, void(int))\ - (db_##IDX##_update, void(int,int64_t,int,int))\ - (db_##IDX##_find_primary, int(int64_t,int64_t,int64_t,int,int,int64_t))\ - (db_##IDX##_find_secondary, int(int64_t,int64_t,int64_t,int,int,int))\ - (db_##IDX##_lowerbound, int(int64_t,int64_t,int64_t,int,int,int))\ - (db_##IDX##_upperbound, int(int64_t,int64_t,int64_t,int,int,int))\ - (db_##IDX##_end, int(int64_t,int64_t,int64_t))\ - (db_##IDX##_next, int(int, int))\ - (db_##IDX##_previous, int(int, int)) + (db_##IDX##_store, int(int64_t,int64_t,int64_t,int64_t,int,int) )\ + (db_##IDX##_remove, void(int) )\ + (db_##IDX##_update, void(int,int64_t,int,int) )\ + (db_##IDX##_find_primary, int(int64_t,int64_t,int64_t,int,int,int64_t) )\ + (db_##IDX##_find_secondary, int(int64_t,int64_t,int64_t,int,int,int) )\ + (db_##IDX##_lowerbound, int(int64_t,int64_t,int64_t,int,int,int) )\ + (db_##IDX##_upperbound, int(int64_t,int64_t,int64_t,int,int,int) )\ + (db_##IDX##_end, int(int64_t,int64_t,int64_t) )\ + (db_##IDX##_next, int(int, int) )\ + (db_##IDX##_previous, int(int, int) ) REGISTER_INTRINSICS( database_api, - (db_store_i64, int(int64_t,int64_t,int64_t,int64_t,int,int)) - (db_update_i64, void(int,int64_t,int,int)) - (db_remove_i64, void(int)) - (db_get_i64, int(int, int, int)) - (db_next_i64, int(int, int)) - (db_previous_i64, int(int, int)) - (db_find_i64, int(int64_t,int64_t,int64_t,int64_t)) - (db_lowerbound_i64, int(int64_t,int64_t,int64_t,int64_t)) - (db_upperbound_i64, int(int64_t,int64_t,int64_t,int64_t)) - (db_end_i64, int(int64_t,int64_t,int64_t)) + (db_store_i64, int(int64_t,int64_t,int64_t,int64_t,int,int) ) + (db_update_i64, void(int,int64_t,int,int) ) + (db_remove_i64, void(int) ) + (db_get_i64, int(int, int, int) ) + (db_next_i64, int(int, int) ) + (db_previous_i64, int(int, int) ) + (db_find_i64, int(int64_t,int64_t,int64_t,int64_t) ) + (db_lowerbound_i64, int(int64_t,int64_t,int64_t,int64_t) ) + (db_upperbound_i64, int(int64_t,int64_t,int64_t,int64_t) ) + (db_end_i64, int(int64_t,int64_t,int64_t) ) DB_SECONDARY_INDEX_METHODS_SIMPLE(idx64) DB_SECONDARY_INDEX_METHODS_SIMPLE(idx128) @@ -1828,8 +1940,8 @@ REGISTER_INTRINSICS(crypto_api, REGISTER_INTRINSICS(permission_api, (check_transaction_authorization, int(int, int, int, int, int, int) ) (check_permission_authorization, int(int64_t, int64_t, int, int, int, int, int64_t) ) - (get_permission_last_used, int64_t(int64_t, int64_t) ) - (get_account_creation_time, int64_t(int64_t) ) + (get_permission_last_used, int64_t(int64_t, int64_t) ) + (get_account_creation_time, int64_t(int64_t) ) ); @@ -1851,15 +1963,15 @@ REGISTER_INTRINSICS(context_free_system_api, REGISTER_INTRINSICS(action_api, (read_action_data, int(int, int) ) (action_data_size, int() ) - (current_receiver, int64_t() ) + (current_receiver, int64_t() ) ); REGISTER_INTRINSICS(authorization_api, - (require_recipient, void(int64_t) ) - (require_authorization, void(int64_t), "require_auth", void(authorization_api::*)(const account_name&) ) - (require_authorization, void(int64_t, int64_t), "require_auth2", void(authorization_api::*)(const account_name&, const permission_name& permission) ) - (has_authorization, int(int64_t), "has_auth", bool(authorization_api::*)(const account_name&)const ) - (is_account, int(int64_t) ) + (require_recipient, void(int64_t) ) + (require_auth, void(int64_t) ) + (require_auth2, void(int64_t, int64_t) ) + (has_auth, int(int64_t) ) + (is_account, int(int64_t) ) ); REGISTER_INTRINSICS(console_api, @@ -1877,19 +1989,19 @@ REGISTER_INTRINSICS(console_api, ); REGISTER_INTRINSICS(context_free_transaction_api, - (read_transaction, int(int, int) ) - (transaction_size, int() ) - (expiration, int() ) - (tapos_block_prefix, int() ) - (tapos_block_num, int() ) - (get_action, int (int, int, int, int) ) + (read_transaction, int(int, int) ) + (transaction_size, int() ) + (expiration, int() ) + (tapos_block_prefix, int() ) + (tapos_block_num, int() ) + (get_action, int(int, int, int, int) ) ); REGISTER_INTRINSICS(transaction_api, - (send_inline, void(int, int) ) - (send_context_free_inline, void(int, int) ) + (send_inline, void(int, int) ) + (send_context_free_inline, void(int, int) ) (send_deferred, void(int, int64_t, int, int, int32_t) ) - (cancel_deferred, int(int) ) + (cancel_deferred, int(int) ) ); REGISTER_INTRINSICS(context_free_api, @@ -1967,10 +2079,14 @@ REGISTER_INJECTED_INTRINSICS(softfloat_api, std::istream& operator>>(std::istream& in, wasm_interface::vm_type& runtime) { std::string s; in >> s; - if (s == "wavm") - runtime = eosio::chain::wasm_interface::vm_type::wavm; - else if (s == "wabt") + if (s == "wabt") runtime = eosio::chain::wasm_interface::vm_type::wabt; + else if (s == "eos-vm") + runtime = eosio::chain::wasm_interface::vm_type::eos_vm; + else if (s == "eos-vm-jit") + runtime = eosio::chain::wasm_interface::vm_type::eos_vm_jit; + else if (s == "eos-vm-oc") + runtime = eosio::chain::wasm_interface::vm_type::eos_vm_oc; else in.setstate(std::ios_base::failbit); return in; diff --git a/libraries/chain/wast_to_wasm.cpp b/libraries/chain/wast_to_wasm.cpp index 09add7a0b6e..0527d57f604 100644 --- a/libraries/chain/wast_to_wasm.cpp +++ b/libraries/chain/wast_to_wasm.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include diff --git a/libraries/chain/webassembly/eos-vm-oc.cpp b/libraries/chain/webassembly/eos-vm-oc.cpp new file mode 100644 index 00000000000..3db4399f133 --- /dev/null +++ b/libraries/chain/webassembly/eos-vm-oc.cpp @@ -0,0 +1,54 @@ +#include +#include +#include +#include +#include + +#include +#include + +namespace eosio { namespace chain { namespace webassembly { namespace eosvmoc { + +class eosvmoc_instantiated_module : public wasm_instantiated_module_interface { + public: + eosvmoc_instantiated_module(const digest_type& code_hash, const uint8_t& vm_version, eosvmoc_runtime& wr) : + _code_hash(code_hash), + _vm_version(vm_version), + _eosvmoc_runtime(wr) + { + + } + + ~eosvmoc_instantiated_module() { + _eosvmoc_runtime.cc.free_code(_code_hash, _vm_version); + } + + void apply(apply_context& context) override { + const code_descriptor* const cd = _eosvmoc_runtime.cc.get_descriptor_for_code_sync(_code_hash, _vm_version); + EOS_ASSERT(cd, wasm_execution_error, "EOS VM OC instantiation failed"); + + _eosvmoc_runtime.exec.execute(*cd, _eosvmoc_runtime.mem, context); + } + + const digest_type _code_hash; + const uint8_t _vm_version; + eosvmoc_runtime& _eosvmoc_runtime; +}; + +eosvmoc_runtime::eosvmoc_runtime(const boost::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) + : cc(data_dir, eosvmoc_config, db), exec(cc) { +} + +eosvmoc_runtime::~eosvmoc_runtime() { +} + +std::unique_ptr eosvmoc_runtime::instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory, + const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version) { + + return std::make_unique(code_hash, vm_type, *this); +} + +//never called. EOS VM OC overrides eosio_exit to its own implementation +void eosvmoc_runtime::immediately_exit_currently_running_module() {} + +}}}} diff --git a/libraries/chain/webassembly/eos-vm-oc/About WAVM b/libraries/chain/webassembly/eos-vm-oc/About WAVM new file mode 100644 index 00000000000..a948eb0736d --- /dev/null +++ b/libraries/chain/webassembly/eos-vm-oc/About WAVM @@ -0,0 +1,26 @@ +The EOS VM Optimized Compiler was created in part based on WAVM +https://github.com/WebAssembly/wasm-jit-prototype +subject the following: +Copyright (c) 2019, Andrew Scheidecker +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are +permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright notice, this list of +conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this list of +conditions and the following disclaimer in the documentation and/or other materials +provided with the distribution. +* Neither the name of WAVM nor the names of its contributors may be used to endorse or +promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/libraries/wasm-jit/Source/Runtime/LLVMEmitIR.cpp b/libraries/chain/webassembly/eos-vm-oc/LLVMEmitIR.cpp similarity index 52% rename from libraries/wasm-jit/Source/Runtime/LLVMEmitIR.cpp rename to libraries/chain/webassembly/eos-vm-oc/LLVMEmitIR.cpp index 2c002c497ea..d52a2b94ab8 100644 --- a/libraries/wasm-jit/Source/Runtime/LLVMEmitIR.cpp +++ b/libraries/chain/webassembly/eos-vm-oc/LLVMEmitIR.cpp @@ -1,39 +1,146 @@ +/* +The EOS VM Optimized Compiler was created in part based on WAVM +https://github.com/WebAssembly/wasm-jit-prototype +subject the following: + +Copyright (c) 2016-2019, Andrew Scheidecker +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +* Neither the name of WAVM nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include "LLVMJIT.h" #include "llvm/ADT/SmallVector.h" -#include "Inline/Timing.h" #include "IR/Operators.h" #include "IR/OperatorPrinter.h" -#include "Logging/Logging.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/Analysis/Passes.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/Verifier.h" +#include "llvm/IR/ValueHandle.h" +#include "llvm/IR/DebugLoc.h" +#include "llvm/Object/ObjectFile.h" +#include "llvm/Object/SymbolSize.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/DataTypes.h" +#include "llvm/Support/TargetSelect.h" +#include "llvm/Support/Host.h" +#include "llvm/Support/DynamicLibrary.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/IR/DIBuilder.h" +#include "llvm/Transforms/InstCombine/InstCombine.h" +#include "llvm/Transforms/Utils.h" + +#include +#include + #define ENABLE_LOGGING 0 #define ENABLE_FUNCTION_ENTER_EXIT_HOOKS 0 using namespace IR; +namespace eosio { namespace chain { namespace eosvmoc { namespace LLVMJIT { + static std::string getExternalFunctionName(Uptr functionDefIndex) + { + return "wasmFunc" + std::to_string(functionDefIndex); + } + + bool getFunctionIndexFromExternalName(const char* externalName,Uptr& outFunctionDefIndex) + { + const char wasmFuncPrefix[] = "wasmFunc"; + const Uptr numPrefixChars = sizeof(wasmFuncPrefix) - 1; + if(!strncmp(externalName,wasmFuncPrefix,numPrefixChars)) + { + char* numberEnd = nullptr; + U64 functionDefIndex64 = std::strtoull(externalName + numPrefixChars,&numberEnd,10); + if(functionDefIndex64 > UINTPTR_MAX) { return false; } + outFunctionDefIndex = Uptr(functionDefIndex64); + return true; + } + else { return false; } + } + + llvm::Value* CreateInBoundsGEPWAR(llvm::IRBuilder<>& irBuilder, llvm::Value* Ptr, llvm::Value* v1, llvm::Value* v2 = nullptr); + + llvm::LLVMContext context; + llvm::Type* llvmResultTypes[(Uptr)ResultType::num]; + + llvm::Type* llvmI8Type; + llvm::Type* llvmI16Type; + llvm::Type* llvmI32Type; + llvm::Type* llvmI64Type; + llvm::Type* llvmF32Type; + llvm::Type* llvmF64Type; + llvm::Type* llvmVoidType; + llvm::Type* llvmBoolType; + llvm::Type* llvmI8PtrType; + + llvm::Constant* typedZeroConstants[(Uptr)ValueType::num]; + + // Converts a WebAssembly type to a LLVM type. + inline llvm::Type* asLLVMType(ValueType type) { return llvmResultTypes[(Uptr)asResultType(type)]; } + inline llvm::Type* asLLVMType(ResultType type) { return llvmResultTypes[(Uptr)type]; } + + // Converts a WebAssembly function type to a LLVM type. + inline llvm::FunctionType* asLLVMType(const FunctionType* functionType) + { + auto llvmArgTypes = (llvm::Type**)alloca(sizeof(llvm::Type*) * functionType->parameters.size()); + for(Uptr argIndex = 0;argIndex < functionType->parameters.size();++argIndex) + { + llvmArgTypes[argIndex] = asLLVMType(functionType->parameters[argIndex]); + } + auto llvmResultType = asLLVMType(functionType->ret); + return llvm::FunctionType::get(llvmResultType,llvm::ArrayRef(llvmArgTypes,functionType->parameters.size()),false); + } + + // Overloaded functions that compile a literal value to a LLVM constant of the right type. + inline llvm::ConstantInt* emitLiteral(U32 value) { return (llvm::ConstantInt*)llvm::ConstantInt::get(llvmI32Type,llvm::APInt(32,(U64)value,false)); } + inline llvm::ConstantInt* emitLiteral(I32 value) { return (llvm::ConstantInt*)llvm::ConstantInt::get(llvmI32Type,llvm::APInt(32,(I64)value,false)); } + inline llvm::ConstantInt* emitLiteral(U64 value) { return (llvm::ConstantInt*)llvm::ConstantInt::get(llvmI64Type,llvm::APInt(64,value,false)); } + inline llvm::ConstantInt* emitLiteral(I64 value) { return (llvm::ConstantInt*)llvm::ConstantInt::get(llvmI64Type,llvm::APInt(64,value,false)); } + inline llvm::Constant* emitLiteral(F32 value) { return llvm::ConstantFP::get(context,llvm::APFloat(value)); } + inline llvm::Constant* emitLiteral(F64 value) { return llvm::ConstantFP::get(context,llvm::APFloat(value)); } + inline llvm::Constant* emitLiteral(bool value) { return llvm::ConstantInt::get(llvmBoolType,llvm::APInt(1,value ? 1 : 0,false)); } + inline llvm::Constant* emitLiteralPointer(const void* pointer,llvm::Type* type) + { + auto pointerInt = llvm::APInt(sizeof(Uptr) == 8 ? 64 : 32,reinterpret_cast(pointer)); + return llvm::Constant::getIntegerValue(type,pointerInt); + } + // The LLVM IR for a module. struct EmitModuleContext { const Module& module; - ModuleInstance* moduleInstance; llvm::Module* llvmModule; std::vector functionDefs; - std::vector importedFunctionPointers; - std::vector globalPointers; + std::vector importedFunctionOffsets; + std::vector globals; llvm::Constant* defaultTablePointer; llvm::Constant* defaultTableMaxElementIndex; llvm::Constant* defaultMemoryBase; - llvm::Constant* defaultMemoryEndOffset; + llvm::Constant* depthCounter; + bool tableOnlyHasDefinedFuncs = true; llvm::MDNode* likelyFalseBranchWeights; llvm::MDNode* likelyTrueBranchWeights; - EmitModuleContext(const Module& inModule,ModuleInstance* inModuleInstance) + EmitModuleContext(const Module& inModule) : module(inModule) - , moduleInstance(inModuleInstance) , llvmModule(new llvm::Module("",context)) { auto zeroAsMetadata = llvm::ConstantAsMetadata::get(emitLiteral(I32(0))); @@ -54,7 +161,6 @@ namespace LLVMJIT const Module& module; const FunctionDef& functionDef; const FunctionType* functionType; - FunctionInstance* functionInstance; llvm::Function* llvmFunction; llvm::IRBuilder<> irBuilder; @@ -96,12 +202,11 @@ namespace LLVMJIT std::vector branchTargetStack; std::vector stack; - EmitFunctionContext(EmitModuleContext& inEmitModuleContext,const Module& inModule,const FunctionDef& inFunctionDef,FunctionInstance* inFunctionInstance,llvm::Function* inLLVMFunction) + EmitFunctionContext(EmitModuleContext& inEmitModuleContext,const Module& inModule,const FunctionDef& inFunctionDef,llvm::Function* inLLVMFunction) : moduleContext(inEmitModuleContext) , module(inModule) , functionDef(inFunctionDef) , functionType(inModule.types[inFunctionDef.type.index]) - , functionInstance(inFunctionInstance) , llvmFunction(inLLVMFunction) , irBuilder(context) {} @@ -182,7 +287,7 @@ namespace LLVMJIT } if(stack.size() == stackBase) { stackString += "|"; } - Log::printf(Log::Category::debug,"%-50s %-50s %-50s\n",controlStackString.c_str(),operatorDescription.c_str(),stackString.c_str()); + //Log::printf(Log::Category::debug,"%-50s %-50s %-50s\n",controlStackString.c_str(),operatorDescription.c_str(),stackString.c_str()); } } @@ -199,58 +304,23 @@ namespace LLVMJIT // Bounds checks and converts a memory operation I32 address operand to a LLVM pointer. llvm::Value* coerceByteIndexToPointer(llvm::Value* byteIndex,U32 offset,llvm::Type* memoryType) { - if(HAS_64BIT_ADDRESS_SPACE) - { - // On a 64 bit runtime, if the address is 32-bits, zext it to 64-bits. - // This is crucial for security, as LLVM will otherwise implicitly sign extend it to 64-bits in the GEP below, - // interpreting it as a signed offset and allowing access to memory outside the sandboxed memory range. - // There are no 'far addresses' in a 32 bit runtime. - if(sizeof(Uptr) != 4) { byteIndex = irBuilder.CreateZExt(byteIndex,llvmI64Type); } - - // Add the offset to the byte index. - if(offset) - { - byteIndex = irBuilder.CreateAdd(byteIndex,irBuilder.CreateZExt(emitLiteral(offset),llvmI64Type)); - } - // If HAS_64BIT_ADDRESS_SPACE, the memory has enough virtual address space allocated to - // ensure that any 32-bit byte index + 32-bit offset will fall within the virtual address sandbox, - // so no explicit bounds check is necessary. - } - else - { - // Add the offset to the byte index using a LLVM intrinsic that returns a carry bit if the add overflowed. - llvm::Value* overflowed = emitLiteral(false); - if(offset) - { - auto offsetByteIndexWithOverflow = irBuilder.CreateCall( - getLLVMIntrinsic({llvmI32Type},llvm::Intrinsic::uadd_with_overflow), - {byteIndex,emitLiteral(U32(offset))} - ); - byteIndex = irBuilder.CreateExtractValue(offsetByteIndexWithOverflow,{0}); - overflowed = irBuilder.CreateExtractValue(offsetByteIndexWithOverflow,{1}); - } + // On a 64 bit runtime, if the address is 32-bits, zext it to 64-bits. + // This is crucial for security, as LLVM will otherwise implicitly sign extend it to 64-bits in the GEP below, + // interpreting it as a signed offset and allowing access to memory outside the sandboxed memory range. + // There are no 'far addresses' in a 32 bit runtime. + byteIndex = irBuilder.CreateZExt(byteIndex,llvmI64Type); - // Check that the offset didn't overflow, and that the final byte index is within the virtual address space - // allocated for the memory. - emitConditionalTrapIntrinsic( - irBuilder.CreateOr( - overflowed, - irBuilder.CreateICmpUGT( - byteIndex, - irBuilder.CreateSub( - moduleContext.defaultMemoryEndOffset, - emitLiteral(Uptr(memoryType->getPrimitiveSizeInBits() / 8) - 1) - ) - ) - ), - "wavmIntrinsics.accessViolationTrap",FunctionType::get(),{}); + // Add the offset to the byte index. + if(offset) + { + byteIndex = irBuilder.CreateAdd(byteIndex,irBuilder.CreateZExt(emitLiteral(offset),llvmI64Type)); } // Cast the pointer to the appropriate type. - auto bytePointer = irBuilder.CreateInBoundsGEP(moduleContext.defaultMemoryBase,byteIndex); - - return irBuilder.CreatePointerCast(bytePointer,memoryType->getPointerTo()); + auto bytePointer = CreateInBoundsGEPWAR(irBuilder, moduleContext.defaultMemoryBase, byteIndex); + + return irBuilder.CreatePointerCast(bytePointer,memoryType->getPointerTo(256)); } // Traps a divide-by-zero @@ -258,7 +328,7 @@ namespace LLVMJIT { emitConditionalTrapIntrinsic( irBuilder.CreateICmpEQ(divisor,typedZeroConstants[(Uptr)type]), - "wavmIntrinsics.divideByZeroOrIntegerOverflowTrap",FunctionType::get(),{}); + "eosvmoc_internal.div0_or_overflow",FunctionType::get(),{}); } // Traps on (x / 0) or (INT_MIN / -1). @@ -272,7 +342,7 @@ namespace LLVMJIT ), irBuilder.CreateICmpEQ(right,typedZeroConstants[(Uptr)type]) ), - "wavmIntrinsics.divideByZeroOrIntegerOverflowTrap",FunctionType::get(),{}); + "eosvmoc_internal.div0_or_overflow",FunctionType::get(),{}); } llvm::Value* getLLVMIntrinsic(const std::initializer_list& argTypes,llvm::Intrinsic::ID id) @@ -283,12 +353,10 @@ namespace LLVMJIT // Emits a call to a WAVM intrinsic function. llvm::Value* emitRuntimeIntrinsic(const char* intrinsicName,const FunctionType* intrinsicType,const std::initializer_list& args) { - ObjectInstance* intrinsicObject = Intrinsics::find(intrinsicName,intrinsicType); - WAVM_ASSERT_THROW(intrinsicObject); - FunctionInstance* intrinsicFunction = asFunction(intrinsicObject); - WAVM_ASSERT_THROW(intrinsicFunction->type == intrinsicType); - auto intrinsicFunctionPointer = emitLiteralPointer(intrinsicFunction->nativeFunction,asLLVMType(intrinsicType)->getPointerTo()); - return irBuilder.CreateCall(intrinsicFunctionPointer,llvm::ArrayRef(args.begin(),args.end())); + const eosio::chain::eosvmoc::intrinsic_entry& ie = eosio::chain::eosvmoc::get_intrinsic_map().at(intrinsicName); + llvm::Value* ic = irBuilder.CreateLoad( emitLiteralPointer((void*)(OFFSET_OF_FIRST_INTRINSIC-ie.ordinal*8), llvmI64Type->getPointerTo(256)) ); + llvm::Value* itp = irBuilder.CreateIntToPtr(ic, asLLVMType(ie.type)->getPointerTo()); + return irBuilder.CreateCall(itp,llvm::ArrayRef(args.begin(),args.end())); } // A helper function to emit a conditional call to a non-returning intrinsic function. @@ -590,7 +658,7 @@ namespace LLVMJIT void unreachable(NoImm) { // Call an intrinsic that causes a trap, and insert the LLVM unreachable terminator. - emitRuntimeIntrinsic("wavmIntrinsics.unreachableTrap",FunctionType::get(),{}); + emitRuntimeIntrinsic("eosvmoc_internal.unreachable",FunctionType::get(),{}); irBuilder.CreateUnreachable(); enterUnreachable(); @@ -619,16 +687,17 @@ namespace LLVMJIT // Map the callee function index to either an imported function pointer or a function in this module. llvm::Value* callee; const FunctionType* calleeType; - if(imm.functionIndex < moduleContext.importedFunctionPointers.size()) + bool isExit = false; + if(imm.functionIndex < moduleContext.importedFunctionOffsets.size()) { - WAVM_ASSERT_THROW(imm.functionIndex < moduleContext.moduleInstance->functions.size()); - callee = moduleContext.importedFunctionPointers[imm.functionIndex]; - calleeType = moduleContext.moduleInstance->functions[imm.functionIndex]->type; + calleeType = module.types[module.functions.imports[imm.functionIndex].type.index]; + llvm::Value* ic = irBuilder.CreateLoad( emitLiteralPointer((void*)(OFFSET_OF_FIRST_INTRINSIC-moduleContext.importedFunctionOffsets[imm.functionIndex]*8), llvmI64Type->getPointerTo(256)) ); + callee = irBuilder.CreateIntToPtr(ic, asLLVMType(calleeType)->getPointerTo()); + isExit = module.functions.imports[imm.functionIndex].moduleName == "env" && module.functions.imports[imm.functionIndex].exportName == "eosio_exit"; } else { - const Uptr calleeIndex = imm.functionIndex - moduleContext.importedFunctionPointers.size(); - WAVM_ASSERT_THROW(calleeIndex < moduleContext.functionDefs.size()); + const Uptr calleeIndex = imm.functionIndex - moduleContext.importedFunctionOffsets.size(); callee = moduleContext.functionDefs[calleeIndex]; calleeType = module.types[module.functions.defs[calleeIndex].type.index]; } @@ -639,6 +708,10 @@ namespace LLVMJIT // Call the function. auto result = irBuilder.CreateCall(callee,llvm::ArrayRef(llvmArgs,calleeType->parameters.size())); + if(isExit) { + irBuilder.CreateUnreachable(); + enterUnreachable(); + } // Push the result on the operand stack. if(calleeType->ret != ResultType::none) { push(result); } @@ -648,7 +721,7 @@ namespace LLVMJIT WAVM_ASSERT_THROW(imm.type.index < module.types.size()); auto calleeType = module.types[imm.type.index]; - auto functionPointerType = asLLVMType(calleeType)->getPointerTo()->getPointerTo(); + auto functionPointerType = asLLVMType(calleeType)->getPointerTo(); // Compile the function index. auto tableElementIndex = pop(); @@ -658,35 +731,75 @@ namespace LLVMJIT popMultiple(llvmArgs,calleeType->parameters.size()); // Zero extend the function index to the pointer size. - auto functionIndexZExt = irBuilder.CreateZExt(tableElementIndex,sizeof(Uptr) == 4 ? llvmI32Type : llvmI64Type); + auto functionIndexZExt = irBuilder.CreateZExt(tableElementIndex, llvmI64Type); // If the function index is larger than the function table size, trap. emitConditionalTrapIntrinsic( irBuilder.CreateICmpUGE(functionIndexZExt,moduleContext.defaultTableMaxElementIndex), - "wavmIntrinsics.indirectCallIndexOutOfBounds",FunctionType::get(),{}); + "eosvmoc_internal.indirect_call_oob",FunctionType::get(),{}); // Load the type for this table entry. - auto functionTypePointerPointer = irBuilder.CreateInBoundsGEP(moduleContext.defaultTablePointer,{functionIndexZExt,emitLiteral((U32)0)}); + auto functionTypePointerPointer = CreateInBoundsGEPWAR(irBuilder, moduleContext.defaultTablePointer, functionIndexZExt, emitLiteral((U32)0)); auto functionTypePointer = irBuilder.CreateLoad(functionTypePointerPointer); auto llvmCalleeType = emitLiteralPointer(calleeType,llvmI8PtrType); // If the function type doesn't match, trap. emitConditionalTrapIntrinsic( irBuilder.CreateICmpNE(llvmCalleeType,functionTypePointer), - "wavmIntrinsics.indirectCallSignatureMismatch", - FunctionType::get(ResultType::none,{ValueType::i32,ValueType::i64,ValueType::i64}), - { tableElementIndex, - irBuilder.CreatePtrToInt(llvmCalleeType,llvmI64Type), - emitLiteral(reinterpret_cast(moduleContext.moduleInstance->defaultTable)) } + "eosvmoc_internal.indirect_call_mismatch", + FunctionType::get(),{} ); - // Call the function loaded from the table. - auto functionPointerPointer = irBuilder.CreateInBoundsGEP(moduleContext.defaultTablePointer,{functionIndexZExt,emitLiteral((U32)1)}); - auto functionPointer = irBuilder.CreateLoad(irBuilder.CreatePointerCast(functionPointerPointer,functionPointerType)); - auto result = irBuilder.CreateCall(functionPointer,llvm::ArrayRef(llvmArgs,calleeType->parameters.size())); + //If the WASM only contains table elements to function definitions internal to the wasm, we can take a + // simple and approach + if(moduleContext.tableOnlyHasDefinedFuncs) { + auto functionPointerPointer = CreateInBoundsGEPWAR(irBuilder, moduleContext.defaultTablePointer, functionIndexZExt, emitLiteral((U32)1)); + auto functionInfo = irBuilder.CreateLoad(functionPointerPointer); //offset of code + llvm::Value* running_code_start = irBuilder.CreateLoad(emitLiteralPointer((void*)OFFSET_OF_CONTROL_BLOCK_MEMBER(running_code_base), llvmI64Type->getPointerTo(256))); + llvm::Value* offset_from_start = irBuilder.CreateAdd(running_code_start, functionInfo); + llvm::Value* ptr_cast = irBuilder.CreateIntToPtr(offset_from_start, functionPointerType); + auto result = irBuilder.CreateCall(ptr_cast,llvm::ArrayRef(llvmArgs,calleeType->parameters.size())); + + // Push the result on the operand stack. + if(calleeType->ret != ResultType::none) { push(result); } + } + else { + auto functionPointerPointer = CreateInBoundsGEPWAR(irBuilder, moduleContext.defaultTablePointer, functionIndexZExt, emitLiteral((U32)1)); + auto functionInfo = irBuilder.CreateLoad(functionPointerPointer); //offset of code + + auto is_intrnsic = irBuilder.CreateICmpSLT(functionInfo, typedZeroConstants[(Uptr)ValueType::i64]); - // Push the result on the operand stack. - if(calleeType->ret != ResultType::none) { push(result); } + llvm::BasicBlock* is_intrinsic_block = llvm::BasicBlock::Create(context, "isintrinsic", llvmFunction); + llvm::BasicBlock* is_code_offset_block = llvm::BasicBlock::Create(context, "isoffset"); + llvm::BasicBlock* continuation_block = llvm::BasicBlock::Create(context, "cont"); + + irBuilder.CreateCondBr(is_intrnsic, is_intrinsic_block, is_code_offset_block, moduleContext.likelyFalseBranchWeights); + + irBuilder.SetInsertPoint(is_intrinsic_block); + llvm::Value* intrinsic_start = emitLiteral((I64)OFFSET_OF_FIRST_INTRINSIC); + llvm::Value* intrinsic_offset = irBuilder.CreateAdd(intrinsic_start, functionInfo); + llvm::Value* intrinsic_ptr = irBuilder.CreateLoad(irBuilder.CreateIntToPtr(intrinsic_offset, llvmI64Type->getPointerTo(256))); + irBuilder.CreateBr(continuation_block); + + llvmFunction->getBasicBlockList().push_back(is_code_offset_block); + irBuilder.SetInsertPoint(is_code_offset_block); + llvm::Value* running_code_start = irBuilder.CreateLoad(emitLiteralPointer((void*)OFFSET_OF_CONTROL_BLOCK_MEMBER(running_code_base), llvmI64Type->getPointerTo(256))); + llvm::Value* offset_from_start = irBuilder.CreateAdd(running_code_start, functionInfo); + irBuilder.CreateBr(continuation_block); + + llvmFunction->getBasicBlockList().push_back(continuation_block); + irBuilder.SetInsertPoint(continuation_block); + + llvm::PHINode* PN = irBuilder.CreatePHI(llvmI64Type, 2, "indirecttypephi"); + PN->addIncoming(intrinsic_ptr, is_intrinsic_block); + PN->addIncoming(offset_from_start, is_code_offset_block); + + llvm::Value* ptr_cast = irBuilder.CreateIntToPtr(PN, functionPointerType); + auto result = irBuilder.CreateCall(ptr_cast,llvm::ArrayRef(llvmArgs,calleeType->parameters.size())); + + // Push the result on the operand stack. + if(calleeType->ret != ResultType::none) { push(result); } + } } // @@ -713,14 +826,17 @@ namespace LLVMJIT void get_global(GetOrSetVariableImm imm) { - WAVM_ASSERT_THROW(imm.variableIndex < moduleContext.globalPointers.size()); - push(irBuilder.CreateLoad(moduleContext.globalPointers[imm.variableIndex])); + WAVM_ASSERT_THROW(imm.variableIndex < moduleContext.globals.size()); + if(moduleContext.globals[imm.variableIndex]->getType()->isPointerTy()) + push(irBuilder.CreateLoad(moduleContext.globals[imm.variableIndex])); + else + push(moduleContext.globals[imm.variableIndex]); } void set_global(GetOrSetVariableImm imm) { - WAVM_ASSERT_THROW(imm.variableIndex < moduleContext.globalPointers.size()); - auto value = irBuilder.CreateBitCast(pop(),moduleContext.globalPointers[imm.variableIndex]->getType()->getPointerElementType()); - irBuilder.CreateStore(value,moduleContext.globalPointers[imm.variableIndex]); + WAVM_ASSERT_THROW(imm.variableIndex < moduleContext.globals.size()); + auto value = irBuilder.CreateBitCast(pop(),moduleContext.globals[imm.variableIndex]->getType()->getPointerElementType()); + irBuilder.CreateStore(value,moduleContext.globals[imm.variableIndex]); } // @@ -731,21 +847,20 @@ namespace LLVMJIT void grow_memory(MemoryImm) { auto deltaNumPages = pop(); - auto defaultMemoryObjectAsI64 = emitLiteral(reinterpret_cast(moduleContext.moduleInstance->defaultMemory)); + auto maxMemoryPages = emitLiteral((U32)moduleContext.module.memories.defs[0].type.size.max); auto previousNumPages = emitRuntimeIntrinsic( - "wavmIntrinsics.growMemory", - FunctionType::get(ResultType::i32,{ValueType::i32,ValueType::i64}), - {deltaNumPages,defaultMemoryObjectAsI64}); + "eosvmoc_internal.grow_memory", + FunctionType::get(ResultType::i32,{ValueType::i32,ValueType::i32}), + {deltaNumPages,maxMemoryPages}); push(previousNumPages); } void current_memory(MemoryImm) { - auto defaultMemoryObjectAsI64 = emitLiteral(reinterpret_cast(moduleContext.moduleInstance->defaultMemory)); - auto currentNumPages = emitRuntimeIntrinsic( - "wavmIntrinsics.currentMemory", - FunctionType::get(ResultType::i32,{ValueType::i64}), - {defaultMemoryObjectAsI64}); - push(currentNumPages); + auto offset = emitLiteral((I32)OFFSET_OF_CONTROL_BLOCK_MEMBER(current_linear_memory_pages)); + auto bytePointer = CreateInBoundsGEPWAR(irBuilder, moduleContext.defaultMemoryBase, offset); + auto ptrTo = irBuilder.CreatePointerCast(bytePointer,llvmI32Type->getPointerTo(256)); + auto load = irBuilder.CreateLoad(ptrTo); + push(load); } // @@ -766,7 +881,7 @@ namespace LLVMJIT auto byteIndex = pop(); \ auto pointer = coerceByteIndexToPointer(byteIndex,imm.offset,llvmMemoryType); \ auto load = irBuilder.CreateLoad(pointer); \ - load->setAlignment(1<setAlignment(1); \ load->setVolatile(true); \ push(conversionOp(load,asLLVMType(ValueType::valueTypeId))); \ } @@ -779,7 +894,7 @@ namespace LLVMJIT auto memoryValue = conversionOp(value,llvmMemoryType); \ auto store = irBuilder.CreateStore(memoryValue,pointer); \ store->setVolatile(true); \ - store->setAlignment(1<setAlignment(1); \ } llvm::Value* identityConversion(llvm::Value* value,llvm::Type* type) { return value; } @@ -906,17 +1021,51 @@ namespace LLVMJIT EMIT_INT_BINARY_OP(shl,irBuilder.CreateShl(left,emitShiftCountMask(type,right))) EMIT_INT_BINARY_OP(shr_s,irBuilder.CreateAShr(left,emitShiftCountMask(type,right))) EMIT_INT_BINARY_OP(shr_u,irBuilder.CreateLShr(left,emitShiftCountMask(type,right))) - - EMIT_INT_BINARY_OP(eq,coerceBoolToI32(irBuilder.CreateICmpEQ(left,right))) - EMIT_INT_BINARY_OP(ne,coerceBoolToI32(irBuilder.CreateICmpNE(left,right))) - EMIT_INT_BINARY_OP(lt_s,coerceBoolToI32(irBuilder.CreateICmpSLT(left,right))) - EMIT_INT_BINARY_OP(lt_u,coerceBoolToI32(irBuilder.CreateICmpULT(left,right))) - EMIT_INT_BINARY_OP(le_s,coerceBoolToI32(irBuilder.CreateICmpSLE(left,right))) - EMIT_INT_BINARY_OP(le_u,coerceBoolToI32(irBuilder.CreateICmpULE(left,right))) - EMIT_INT_BINARY_OP(gt_s,coerceBoolToI32(irBuilder.CreateICmpSGT(left,right))) - EMIT_INT_BINARY_OP(gt_u,coerceBoolToI32(irBuilder.CreateICmpUGT(left,right))) - EMIT_INT_BINARY_OP(ge_s,coerceBoolToI32(irBuilder.CreateICmpSGE(left,right))) - EMIT_INT_BINARY_OP(ge_u,coerceBoolToI32(irBuilder.CreateICmpUGE(left,right))) + + static llvm::Value* getNonConstantZero(llvm::IRBuilder<>& irBuilder, llvm::Constant* zero) { + llvm::Value* zeroAlloca = irBuilder.CreateAlloca(zero->getType(), nullptr, "nonConstantZero"); + irBuilder.CreateStore(zero, zeroAlloca); + return irBuilder.CreateLoad(zeroAlloca); + } + + #define EMIT_INT_COMPARE_OP(name, llvmSourceType, llvmDestType, valueType, emitCode) \ + void name(NoImm) { \ + auto right = irBuilder.CreateOr( \ + irBuilder.CreateBitCast(pop(), llvmSourceType), \ + irBuilder.CreateBitCast( \ + getNonConstantZero(irBuilder, typedZeroConstants[Uptr(valueType)]), \ + llvmSourceType)); \ + auto left = irBuilder.CreateBitCast(pop(), llvmSourceType); \ + push(coerceBoolToI32(emitCode)); \ + } + + #define EMIT_INT_COMPARE(name, emitCode) \ + EMIT_INT_COMPARE_OP(i32_##name, llvmI32Type, llvmI32Type, ValueType::i32, emitCode) \ + EMIT_INT_COMPARE_OP(i64_##name, llvmI64Type, llvmI32Type, ValueType::i64, emitCode) + +#if LLVM_VERSION_MAJOR < 9 + EMIT_INT_COMPARE(eq, irBuilder.CreateICmpEQ(left, right)) + EMIT_INT_COMPARE(ne, irBuilder.CreateICmpNE(left, right)) + EMIT_INT_COMPARE(lt_s, irBuilder.CreateICmpSLT(left, right)) + EMIT_INT_COMPARE(lt_u, irBuilder.CreateICmpULT(left, right)) + EMIT_INT_COMPARE(le_s, irBuilder.CreateICmpSLE(left, right)) + EMIT_INT_COMPARE(le_u, irBuilder.CreateICmpULE(left, right)) + EMIT_INT_COMPARE(gt_s, irBuilder.CreateICmpSGT(left, right)) + EMIT_INT_COMPARE(gt_u, irBuilder.CreateICmpUGT(left, right)) + EMIT_INT_COMPARE(ge_s, irBuilder.CreateICmpSGE(left, right)) + EMIT_INT_COMPARE(ge_u, irBuilder.CreateICmpUGE(left, right)) +#else + EMIT_INT_BINARY_OP(eq, coerceBoolToI32(irBuilder.CreateICmpEQ(left, right))) + EMIT_INT_BINARY_OP(ne, coerceBoolToI32(irBuilder.CreateICmpNE(left, right))) + EMIT_INT_BINARY_OP(lt_s, coerceBoolToI32(irBuilder.CreateICmpSLT(left, right))) + EMIT_INT_BINARY_OP(lt_u, coerceBoolToI32(irBuilder.CreateICmpULT(left, right))) + EMIT_INT_BINARY_OP(le_s, coerceBoolToI32(irBuilder.CreateICmpSLE(left, right))) + EMIT_INT_BINARY_OP(le_u, coerceBoolToI32(irBuilder.CreateICmpULE(left, right))) + EMIT_INT_BINARY_OP(gt_s, coerceBoolToI32(irBuilder.CreateICmpSGT(left, right))) + EMIT_INT_BINARY_OP(gt_u, coerceBoolToI32(irBuilder.CreateICmpUGT(left, right))) + EMIT_INT_BINARY_OP(ge_s, coerceBoolToI32(irBuilder.CreateICmpSGE(left, right))) + EMIT_INT_BINARY_OP(ge_u, coerceBoolToI32(irBuilder.CreateICmpUGE(left, right))) +#endif EMIT_INT_UNARY_OP(clz,irBuilder.CreateCall(getLLVMIntrinsic({operand->getType()},llvm::Intrinsic::ctlz),llvm::ArrayRef({operand,emitLiteral(false)}))) EMIT_INT_UNARY_OP(ctz,irBuilder.CreateCall(getLLVMIntrinsic({operand->getType()},llvm::Intrinsic::cttz),llvm::ArrayRef({operand,emitLiteral(false)}))) @@ -961,471 +1110,16 @@ namespace LLVMJIT EMIT_UNARY_OP(i64,reinterpret_f64,irBuilder.CreateBitCast(operand,llvmI64Type)) // These operations don't match LLVM's semantics exactly, so just call out to C++ implementations. - EMIT_FP_BINARY_OP(min,emitRuntimeIntrinsic("wavmIntrinsics.floatMin",FunctionType::get(asResultType(type),{type,type}),{left,right})) - EMIT_FP_BINARY_OP(max,emitRuntimeIntrinsic("wavmIntrinsics.floatMax",FunctionType::get(asResultType(type),{type,type}),{left,right})) - EMIT_FP_UNARY_OP(ceil,emitRuntimeIntrinsic("wavmIntrinsics.floatCeil",FunctionType::get(asResultType(type),{type}),{operand})) - EMIT_FP_UNARY_OP(floor,emitRuntimeIntrinsic("wavmIntrinsics.floatFloor",FunctionType::get(asResultType(type),{type}),{operand})) - EMIT_FP_UNARY_OP(trunc,emitRuntimeIntrinsic("wavmIntrinsics.floatTrunc",FunctionType::get(asResultType(type),{type}),{operand})) - EMIT_FP_UNARY_OP(nearest,emitRuntimeIntrinsic("wavmIntrinsics.floatNearest",FunctionType::get(asResultType(type),{type}),{operand})) - EMIT_INT_UNARY_OP(trunc_s_f32,emitRuntimeIntrinsic("wavmIntrinsics.floatToSignedInt",FunctionType::get(asResultType(type),{ValueType::f32}),{operand})) - EMIT_INT_UNARY_OP(trunc_s_f64,emitRuntimeIntrinsic("wavmIntrinsics.floatToSignedInt",FunctionType::get(asResultType(type),{ValueType::f64}),{operand})) - EMIT_INT_UNARY_OP(trunc_u_f32,emitRuntimeIntrinsic("wavmIntrinsics.floatToUnsignedInt",FunctionType::get(asResultType(type),{ValueType::f32}),{operand})) - EMIT_INT_UNARY_OP(trunc_u_f64,emitRuntimeIntrinsic("wavmIntrinsics.floatToUnsignedInt",FunctionType::get(asResultType(type),{ValueType::f64}),{operand})) - - #if ENABLE_SIMD_PROTOTYPE - llvm::Value* emitAnyTrue(llvm::Value* boolVector) - { - const Uptr numLanes = boolVector->getType()->getVectorNumElements(); - llvm::Value* result = nullptr; - for(Uptr laneIndex = 0;laneIndex < numLanes;++laneIndex) - { - llvm::Value* scalar = irBuilder.CreateExtractElement(boolVector,laneIndex); - result = result ? irBuilder.CreateOr(result,scalar) : scalar; - } - return result; - } - llvm::Value* emitAllTrue(llvm::Value* boolVector) - { - const Uptr numLanes = boolVector->getType()->getVectorNumElements(); - llvm::Value* result = nullptr; - for(Uptr laneIndex = 0;laneIndex < numLanes;++laneIndex) - { - llvm::Value* scalar = irBuilder.CreateExtractElement(boolVector,laneIndex); - result = result ? irBuilder.CreateAnd(result,scalar) : scalar; - } - return result; - } - - llvm::Value* unimplemented() - { - Errors::unreachable(); - } - - #define EMIT_SIMD_SPLAT(vectorType,coerceScalar,numLanes) \ - void vectorType##_splat(NoImm) \ - { \ - auto scalar = pop(); \ - push(irBuilder.CreateVectorSplat(numLanes,coerceScalar)); \ - } - EMIT_SIMD_SPLAT(i8x16,irBuilder.CreateTrunc(scalar,llvmI8Type),16) - EMIT_SIMD_SPLAT(i16x8,irBuilder.CreateTrunc(scalar,llvmI16Type),8) - EMIT_SIMD_SPLAT(i32x4,scalar,4) - EMIT_SIMD_SPLAT(i64x2,scalar,2) - EMIT_SIMD_SPLAT(f32x4,scalar,4) - EMIT_SIMD_SPLAT(f64x2,scalar,2) - - EMIT_STORE_OP(v128,store,value->getType(),4,identityConversion) - EMIT_LOAD_OP(v128,load,llvmI64x2Type,4,identityConversion) - - #define EMIT_SIMD_BINARY_OP(name,llvmType,emitCode) \ - void name(NoImm) \ - { \ - auto right = irBuilder.CreateBitCast(pop(),llvmType); SUPPRESS_UNUSED(right); \ - auto left = irBuilder.CreateBitCast(pop(),llvmType); SUPPRESS_UNUSED(left); \ - push(emitCode); \ - } - #define EMIT_SIMD_UNARY_OP(name,llvmType,emitCode) \ - void name(NoImm) \ - { \ - auto operand = irBuilder.CreateBitCast(pop(),llvmType); SUPPRESS_UNUSED(operand); \ - push(emitCode); \ - } - #define EMIT_SIMD_INT_BINARY_OP(name,emitCode) \ - EMIT_SIMD_BINARY_OP(i8x16##_##name,llvmI8x16Type,emitCode) \ - EMIT_SIMD_BINARY_OP(i16x8##_##name,llvmI16x8Type,emitCode) \ - EMIT_SIMD_BINARY_OP(i32x4##_##name,llvmI32x4Type,emitCode) \ - EMIT_SIMD_BINARY_OP(i64x2##_##name,llvmI64x2Type,emitCode) - #define EMIT_SIMD_FP_BINARY_OP(name,emitCode) \ - EMIT_SIMD_BINARY_OP(f32x4##_##name,llvmF32x4Type,emitCode) \ - EMIT_SIMD_BINARY_OP(f64x2##_##name,llvmF64x2Type,emitCode) - #define EMIT_SIMD_INT_UNARY_OP(name,emitCode) \ - EMIT_SIMD_UNARY_OP(i8x16##_##name,llvmI8x16Type,emitCode) \ - EMIT_SIMD_UNARY_OP(i16x8##_##name,llvmI16x8Type,emitCode) \ - EMIT_SIMD_UNARY_OP(i32x4##_##name,llvmI32x4Type,emitCode) \ - EMIT_SIMD_UNARY_OP(i64x2##_##name,llvmI64x2Type,emitCode) - #define EMIT_SIMD_FP_UNARY_OP(name,emitCode) \ - EMIT_SIMD_UNARY_OP(f32x4##_##name,llvmF32x4Type,emitCode) \ - EMIT_SIMD_UNARY_OP(f64x2##_##name,llvmF64x2Type,emitCode) - EMIT_SIMD_INT_BINARY_OP(add,irBuilder.CreateAdd(left,right)) - EMIT_SIMD_INT_BINARY_OP(sub,irBuilder.CreateSub(left,right)) - - EMIT_SIMD_INT_BINARY_OP(shl,irBuilder.CreateShl(left,right)) - EMIT_SIMD_INT_BINARY_OP(shr_s,irBuilder.CreateAShr(left,right)) - EMIT_SIMD_INT_BINARY_OP(shr_u,irBuilder.CreateLShr(left,right)) - EMIT_SIMD_INT_BINARY_OP(mul,irBuilder.CreateMul(left,right)) - EMIT_SIMD_INT_BINARY_OP(div_s,irBuilder.CreateSDiv(left,right)) - EMIT_SIMD_INT_BINARY_OP(div_u,irBuilder.CreateUDiv(left,right)) - - EMIT_SIMD_INT_BINARY_OP(eq,irBuilder.CreateICmpEQ(left,right)) - EMIT_SIMD_INT_BINARY_OP(ne,irBuilder.CreateICmpNE(left,right)) - EMIT_SIMD_INT_BINARY_OP(lt_s,irBuilder.CreateICmpSLT(left,right)) - EMIT_SIMD_INT_BINARY_OP(lt_u,irBuilder.CreateICmpULT(left,right)) - EMIT_SIMD_INT_BINARY_OP(le_s,irBuilder.CreateICmpSLE(left,right)) - EMIT_SIMD_INT_BINARY_OP(le_u,irBuilder.CreateICmpULE(left,right)) - EMIT_SIMD_INT_BINARY_OP(gt_s,irBuilder.CreateICmpSGT(left,right)) - EMIT_SIMD_INT_BINARY_OP(gt_u,irBuilder.CreateICmpUGT(left,right)) - EMIT_SIMD_INT_BINARY_OP(ge_s,irBuilder.CreateICmpSGE(left,right)) - EMIT_SIMD_INT_BINARY_OP(ge_u,irBuilder.CreateICmpUGE(left,right)) - - EMIT_SIMD_INT_UNARY_OP(neg,irBuilder.CreateNeg(operand)) - - EMIT_SIMD_BINARY_OP(i8x16_add_saturate_s,llvmI8x16Type,unimplemented()) - EMIT_SIMD_BINARY_OP(i8x16_add_saturate_u,llvmI16x8Type,unimplemented()) - EMIT_SIMD_BINARY_OP(i8x16_sub_saturate_s,llvmI8x16Type,unimplemented()) - EMIT_SIMD_BINARY_OP(i8x16_sub_saturate_u,llvmI16x8Type,unimplemented()) - EMIT_SIMD_BINARY_OP(i16x8_add_saturate_s,llvmI8x16Type,unimplemented()) - EMIT_SIMD_BINARY_OP(i16x8_add_saturate_u,llvmI16x8Type,unimplemented()) - EMIT_SIMD_BINARY_OP(i16x8_sub_saturate_s,llvmI8x16Type,unimplemented()) - EMIT_SIMD_BINARY_OP(i16x8_sub_saturate_u,llvmI16x8Type,unimplemented()) - - EMIT_SIMD_UNARY_OP(i32x4_trunc_s_f32x4_sat,llvmF32x4Type,unimplemented()); - EMIT_SIMD_UNARY_OP(i32x4_trunc_u_f32x4_sat,llvmF32x4Type,unimplemented()); - EMIT_SIMD_UNARY_OP(i64x2_trunc_s_f64x2_sat,llvmF64x2Type,unimplemented()); - EMIT_SIMD_UNARY_OP(i64x2_trunc_u_f64x2_sat,llvmF64x2Type,unimplemented()); - - EMIT_SIMD_FP_BINARY_OP(add,irBuilder.CreateFAdd(left,right)) - EMIT_SIMD_FP_BINARY_OP(sub,irBuilder.CreateFSub(left,right)) - EMIT_SIMD_FP_BINARY_OP(mul,irBuilder.CreateFMul(left,right)) - EMIT_SIMD_FP_BINARY_OP(div,irBuilder.CreateFDiv(left,right)) - - EMIT_SIMD_FP_BINARY_OP(eq,irBuilder.CreateFCmpOEQ(left,right)) - EMIT_SIMD_FP_BINARY_OP(ne,irBuilder.CreateFCmpUNE(left,right)) - EMIT_SIMD_FP_BINARY_OP(lt,irBuilder.CreateFCmpOLT(left,right)) - EMIT_SIMD_FP_BINARY_OP(le,irBuilder.CreateFCmpOLE(left,right)) - EMIT_SIMD_FP_BINARY_OP(gt,irBuilder.CreateFCmpOGT(left,right)) - EMIT_SIMD_FP_BINARY_OP(ge,irBuilder.CreateFCmpOGE(left,right)) - EMIT_SIMD_FP_BINARY_OP(min,unimplemented()); - EMIT_SIMD_FP_BINARY_OP(max,unimplemented()); - - EMIT_SIMD_FP_UNARY_OP(neg,irBuilder.CreateFNeg(operand)) - EMIT_SIMD_FP_UNARY_OP(abs,irBuilder.CreateCall(getLLVMIntrinsic({operand->getType()},llvm::Intrinsic::fabs),llvm::ArrayRef({operand}))) - EMIT_SIMD_FP_UNARY_OP(sqrt,irBuilder.CreateCall(getLLVMIntrinsic({operand->getType()},llvm::Intrinsic::sqrt),llvm::ArrayRef({operand}))) - - EMIT_SIMD_UNARY_OP(f32x4_convert_s_i32x4,llvmI32x4Type,irBuilder.CreateSIToFP(operand,llvmF32x4Type)); - EMIT_SIMD_UNARY_OP(f32x4_convert_u_i32x4,llvmI32x4Type,irBuilder.CreateUIToFP(operand,llvmF32x4Type)); - EMIT_SIMD_UNARY_OP(f64x2_convert_s_i64x2,llvmI64x2Type,irBuilder.CreateSIToFP(operand,llvmF64x2Type)); - EMIT_SIMD_UNARY_OP(f64x2_convert_u_i64x2,llvmI64x2Type,irBuilder.CreateUIToFP(operand,llvmF64x2Type)); - - EMIT_SIMD_UNARY_OP(i8x16_any_true,llvmI8x16Type,emitAnyTrue(operand)) - EMIT_SIMD_UNARY_OP(i16x8_any_true,llvmI16x8Type,emitAnyTrue(operand)) - EMIT_SIMD_UNARY_OP(i32x4_any_true,llvmI32x4Type,emitAnyTrue(operand)) - EMIT_SIMD_UNARY_OP(i64x2_any_true,llvmI64x2Type,emitAnyTrue(operand)) - - EMIT_SIMD_UNARY_OP(i8x16_all_true,llvmI8x16Type,emitAllTrue(operand)) - EMIT_SIMD_UNARY_OP(i16x8_all_true,llvmI16x8Type,emitAllTrue(operand)) - EMIT_SIMD_UNARY_OP(i32x4_all_true,llvmI32x4Type,emitAllTrue(operand)) - EMIT_SIMD_UNARY_OP(i64x2_all_true,llvmI64x2Type,emitAllTrue(operand)) - - void v128_and(NoImm) - { - auto right = pop(); - auto left = irBuilder.CreateBitCast(pop(),right->getType()); - push(irBuilder.CreateAnd(left,right)); - } - void v128_or(NoImm) - { - auto right = pop(); - auto left = irBuilder.CreateBitCast(pop(),right->getType()); - push(irBuilder.CreateOr(left,right)); - } - void v128_xor(NoImm) - { - auto right = pop(); - auto left = irBuilder.CreateBitCast(pop(),right->getType()); - push(irBuilder.CreateXor(left,right)); - } - void v128_not(NoImm) - { - auto operand = pop(); - push(irBuilder.CreateNot(operand)); - } - - #define EMIT_SIMD_EXTRACT_LANE_OP(name,llvmType,numLanes,coerceScalar) \ - void name(LaneIndexImm imm) \ - { \ - auto operand = irBuilder.CreateBitCast(pop(),llvmType); \ - auto scalar = irBuilder.CreateExtractElement(operand,imm.laneIndex); \ - push(coerceScalar); \ - } - EMIT_SIMD_EXTRACT_LANE_OP(i8x16_extract_lane_s,llvmI8x16Type,16,irBuilder.CreateSExt(scalar,llvmI32Type)) - EMIT_SIMD_EXTRACT_LANE_OP(i8x16_extract_lane_u,llvmI8x16Type,16,irBuilder.CreateZExt(scalar,llvmI32Type)) - EMIT_SIMD_EXTRACT_LANE_OP(i16x8_extract_lane_s,llvmI16x8Type,8,irBuilder.CreateSExt(scalar,llvmI32Type)) - EMIT_SIMD_EXTRACT_LANE_OP(i16x8_extract_lane_u,llvmI16x8Type,8,irBuilder.CreateZExt(scalar,llvmI32Type)) - EMIT_SIMD_EXTRACT_LANE_OP(i32x4_extract_lane,llvmI32x4Type,4,scalar) - EMIT_SIMD_EXTRACT_LANE_OP(i64x2_extract_lane,llvmI64x2Type,2,scalar) - - EMIT_SIMD_EXTRACT_LANE_OP(f32x4_extract_lane,llvmF32x4Type,4,scalar) - EMIT_SIMD_EXTRACT_LANE_OP(f64x2_extract_lane,llvmF64x2Type,2,scalar) - - #define EMIT_SIMD_REPLACE_LANE_OP(typePrefix,llvmType,numLanes,coerceScalar) \ - void typePrefix##_replace_lane(LaneIndexImm imm) \ - { \ - auto vector = irBuilder.CreateBitCast(pop(),llvmType); \ - auto scalar = pop(); \ - push(irBuilder.CreateInsertElement(vector,coerceScalar,imm.laneIndex)); \ - } - - EMIT_SIMD_REPLACE_LANE_OP(i8x16,llvmI8x16Type,16,irBuilder.CreateTrunc(scalar,llvmI8Type)) - EMIT_SIMD_REPLACE_LANE_OP(i16x8,llvmI16x8Type,8,irBuilder.CreateTrunc(scalar,llvmI16Type)) - EMIT_SIMD_REPLACE_LANE_OP(i32x4,llvmI32x4Type,4,scalar) - EMIT_SIMD_REPLACE_LANE_OP(i64x2,llvmI64x2Type,2,scalar) - - EMIT_SIMD_REPLACE_LANE_OP(f32x4,llvmF32x4Type,4,scalar) - EMIT_SIMD_REPLACE_LANE_OP(f64x2,llvmF64x2Type,2,scalar) - - void v8x16_shuffle(ShuffleImm<16> imm) - { - auto right = irBuilder.CreateBitCast(pop(),llvmI8x16Type); - auto left = irBuilder.CreateBitCast(pop(),llvmI8x16Type); - unsigned int laneIndices[16]; - for(Uptr laneIndex = 0;laneIndex < 16;++laneIndex) - { - laneIndices[laneIndex] = imm.laneIndices[laneIndex]; - } - push(irBuilder.CreateShuffleVector(left,right,llvm::ArrayRef(laneIndices,16))); - } - - void v128_const(LiteralImm imm) - { - push(llvm::ConstantVector::get({emitLiteral(imm.value.u64[0]),emitLiteral(imm.value.u64[1])})); - } - - void v128_bitselect(NoImm) - { - auto mask = irBuilder.CreateBitCast(pop(),llvmI64x2Type); - auto falseValue = irBuilder.CreateBitCast(pop(),llvmI64x2Type); - auto trueValue = irBuilder.CreateBitCast(pop(),llvmI64x2Type); - push(irBuilder.CreateOr( - irBuilder.CreateAnd(trueValue,mask), - irBuilder.CreateAnd(falseValue,irBuilder.CreateNot(mask)) - )); - } - #endif - - #if ENABLE_THREADING_PROTOTYPE - void is_lock_free(NoImm) - { - auto numBytes = pop(); - push(emitRuntimeIntrinsic( - "wavmIntrinsics.isLockFree", - FunctionType::get(ResultType::i32,{ValueType::i32}), - {numBytes})); - } - void wake(AtomicLoadOrStoreImm<2>) - { - auto numWaiters = pop(); - auto address = pop(); - auto defaultMemoryObjectAsI64 = emitLiteral(reinterpret_cast(moduleContext.moduleInstance->defaultMemory)); - push(emitRuntimeIntrinsic( - "wavmIntrinsics.wake", - FunctionType::get(ResultType::i32,{ValueType::i32,ValueType::i32,ValueType::i64}), - {address,numWaiters,defaultMemoryObjectAsI64})); - } - void i32_wait(AtomicLoadOrStoreImm<2>) - { - auto timeout = pop(); - auto expectedValue = pop(); - auto address = pop(); - auto defaultMemoryObjectAsI64 = emitLiteral(reinterpret_cast(moduleContext.moduleInstance->defaultMemory)); - push(emitRuntimeIntrinsic( - "wavmIntrinsics.wait", - FunctionType::get(ResultType::i32,{ValueType::i32,ValueType::i32,ValueType::f64,ValueType::i64}), - {address,expectedValue,timeout,defaultMemoryObjectAsI64})); - } - void i64_wait(AtomicLoadOrStoreImm<3>) - { - auto timeout = pop(); - auto expectedValue = pop(); - auto address = pop(); - auto defaultMemoryObjectAsI64 = emitLiteral(reinterpret_cast(moduleContext.moduleInstance->defaultMemory)); - push(emitRuntimeIntrinsic( - "wavmIntrinsics.wait", - FunctionType::get(ResultType::i32,{ValueType::i32,ValueType::i64,ValueType::f64,ValueType::i64}), - {address,expectedValue,timeout,defaultMemoryObjectAsI64})); - } - - void launch_thread(LaunchThreadImm) - { - WAVM_ASSERT_THROW(moduleContext.moduleInstance->defaultTable); - auto errorFunctionIndex = pop(); - auto argument = pop(); - auto functionIndex = pop(); - auto defaultTableAsI64 = emitLiteral(reinterpret_cast(moduleContext.moduleInstance->defaultTable)); - emitRuntimeIntrinsic( - "wavmIntrinsics.launchThread", - FunctionType::get(ResultType::none,{ValueType::i32,ValueType::i32,ValueType::i32,ValueType::i64}), - {functionIndex,argument,errorFunctionIndex,defaultTableAsI64}); - } - - void trapIfMisalignedAtomic(llvm::Value* address,U32 naturalAlignmentLog2) - { - if(naturalAlignmentLog2 > 0) - { - emitConditionalTrapIntrinsic( - irBuilder.CreateICmpNE( - typedZeroConstants[(Uptr)ValueType::i32], - irBuilder.CreateAnd(address,emitLiteral((U32(1) << naturalAlignmentLog2) - 1))), - "wavmIntrinsics.misalignedAtomicTrap", - FunctionType::get(ResultType::none,{ValueType::i32}), - {address}); - } - } - - EMIT_UNARY_OP(i32,extend_s_i8,irBuilder.CreateSExt(irBuilder.CreateTrunc(operand,llvmI8Type),llvmI32Type)) - EMIT_UNARY_OP(i32,extend_s_i16,irBuilder.CreateSExt(irBuilder.CreateTrunc(operand,llvmI16Type),llvmI32Type)) - EMIT_UNARY_OP(i64,extend_s_i8,irBuilder.CreateSExt(irBuilder.CreateTrunc(operand,llvmI8Type),llvmI64Type)) - EMIT_UNARY_OP(i64,extend_s_i16,irBuilder.CreateSExt(irBuilder.CreateTrunc(operand,llvmI16Type),llvmI64Type)) - - #define EMIT_ATOMIC_LOAD_OP(valueTypeId,name,llvmMemoryType,naturalAlignmentLog2,conversionOp) \ - void valueTypeId##_##name(AtomicLoadOrStoreImm imm) \ - { \ - auto byteIndex = pop(); \ - trapIfMisalignedAtomic(byteIndex,naturalAlignmentLog2); \ - auto pointer = coerceByteIndexToPointer(byteIndex,imm.offset,llvmMemoryType); \ - auto load = irBuilder.CreateLoad(pointer); \ - load->setAlignment(1<setVolatile(true); \ - load->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent); \ - push(conversionOp(load,asLLVMType(ValueType::valueTypeId))); \ - } - #define EMIT_ATOMIC_STORE_OP(valueTypeId,name,llvmMemoryType,naturalAlignmentLog2,conversionOp) \ - void valueTypeId##_##name(AtomicLoadOrStoreImm imm) \ - { \ - auto value = pop(); \ - auto byteIndex = pop(); \ - trapIfMisalignedAtomic(byteIndex,naturalAlignmentLog2); \ - auto pointer = coerceByteIndexToPointer(byteIndex,imm.offset,llvmMemoryType); \ - auto memoryValue = conversionOp(value,llvmMemoryType); \ - auto store = irBuilder.CreateStore(memoryValue,pointer); \ - store->setVolatile(true); \ - store->setAlignment(1<setAtomic(llvm::AtomicOrdering::SequentiallyConsistent); \ - } - EMIT_ATOMIC_LOAD_OP(i32,atomic_load,llvmI32Type,2,identityConversion) - EMIT_ATOMIC_LOAD_OP(i64,atomic_load,llvmI64Type,3,identityConversion) - EMIT_ATOMIC_LOAD_OP(f32,atomic_load,llvmF32Type,2,identityConversion) - EMIT_ATOMIC_LOAD_OP(f64,atomic_load,llvmF64Type,3,identityConversion) - - EMIT_ATOMIC_LOAD_OP(i32,atomic_load8_s,llvmI8Type,0,irBuilder.CreateSExt) - EMIT_ATOMIC_LOAD_OP(i32,atomic_load8_u,llvmI8Type,0,irBuilder.CreateZExt) - EMIT_ATOMIC_LOAD_OP(i32,atomic_load16_s,llvmI16Type,1,irBuilder.CreateSExt) - EMIT_ATOMIC_LOAD_OP(i32,atomic_load16_u,llvmI16Type,1,irBuilder.CreateZExt) - EMIT_ATOMIC_LOAD_OP(i64,atomic_load8_s,llvmI8Type,0,irBuilder.CreateSExt) - EMIT_ATOMIC_LOAD_OP(i64,atomic_load8_u,llvmI8Type,0,irBuilder.CreateZExt) - EMIT_ATOMIC_LOAD_OP(i64,atomic_load16_s,llvmI16Type,1,irBuilder.CreateSExt) - EMIT_ATOMIC_LOAD_OP(i64,atomic_load16_u,llvmI16Type,1,irBuilder.CreateZExt) - EMIT_ATOMIC_LOAD_OP(i64,atomic_load32_s,llvmI32Type,2,irBuilder.CreateSExt) - EMIT_ATOMIC_LOAD_OP(i64,atomic_load32_u,llvmI32Type,2,irBuilder.CreateZExt) - - EMIT_ATOMIC_STORE_OP(i32,atomic_store,llvmI32Type,2,identityConversion) - EMIT_ATOMIC_STORE_OP(i64,atomic_store,llvmI64Type,3,identityConversion) - EMIT_ATOMIC_STORE_OP(f32,atomic_store,llvmF32Type,2,identityConversion) - EMIT_ATOMIC_STORE_OP(f64,atomic_store,llvmF64Type,3,identityConversion) - - EMIT_ATOMIC_STORE_OP(i32,atomic_store8,llvmI8Type,0,irBuilder.CreateTrunc) - EMIT_ATOMIC_STORE_OP(i32,atomic_store16,llvmI16Type,1,irBuilder.CreateTrunc) - EMIT_ATOMIC_STORE_OP(i64,atomic_store8,llvmI8Type,0,irBuilder.CreateTrunc) - EMIT_ATOMIC_STORE_OP(i64,atomic_store16,llvmI16Type,1,irBuilder.CreateTrunc) - EMIT_ATOMIC_STORE_OP(i64,atomic_store32,llvmI32Type,2,irBuilder.CreateTrunc) - - #define EMIT_ATOMIC_CMPXCHG(valueTypeId,name,llvmMemoryType,naturalAlignmentLog2,memoryToValueConversion,valueToMemoryConversion) \ - void valueTypeId##_##name(AtomicLoadOrStoreImm imm) \ - { \ - auto replacementValue = valueToMemoryConversion(pop(),llvmMemoryType); \ - auto expectedValue = valueToMemoryConversion(pop(),llvmMemoryType); \ - auto byteIndex = pop(); \ - trapIfMisalignedAtomic(byteIndex,naturalAlignmentLog2); \ - auto pointer = coerceByteIndexToPointer(byteIndex,imm.offset,llvmMemoryType); \ - auto atomicCmpXchg = irBuilder.CreateAtomicCmpXchg( \ - pointer, \ - expectedValue, \ - replacementValue, \ - llvm::AtomicOrdering::SequentiallyConsistent, \ - llvm::AtomicOrdering::SequentiallyConsistent); \ - atomicCmpXchg->setVolatile(true); \ - auto previousValue = irBuilder.CreateExtractValue(atomicCmpXchg,{0}); \ - push(memoryToValueConversion(previousValue,asLLVMType(ValueType::valueTypeId))); \ - } - - EMIT_ATOMIC_CMPXCHG(i32,atomic_rmw8_u_cmpxchg,llvmI8Type,0,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_CMPXCHG(i32,atomic_rmw16_u_cmpxchg,llvmI16Type,1,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_CMPXCHG(i32,atomic_rmw_cmpxchg,llvmI32Type,2,identityConversion,identityConversion) - - EMIT_ATOMIC_CMPXCHG(i64,atomic_rmw8_u_cmpxchg,llvmI8Type,0,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_CMPXCHG(i64,atomic_rmw16_u_cmpxchg,llvmI16Type,1,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_CMPXCHG(i64,atomic_rmw32_u_cmpxchg,llvmI32Type,2,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_CMPXCHG(i64,atomic_rmw_cmpxchg,llvmI64Type,3,identityConversion,identityConversion) - - #define EMIT_ATOMIC_RMW(valueTypeId,name,rmwOpId,llvmMemoryType,naturalAlignmentLog2,memoryToValueConversion,valueToMemoryConversion) \ - void valueTypeId##_##name(AtomicLoadOrStoreImm imm) \ - { \ - auto value = valueToMemoryConversion(pop(),llvmMemoryType); \ - auto byteIndex = pop(); \ - trapIfMisalignedAtomic(byteIndex,naturalAlignmentLog2); \ - auto pointer = coerceByteIndexToPointer(byteIndex,imm.offset,llvmMemoryType); \ - auto atomicRMW = irBuilder.CreateAtomicRMW( \ - llvm::AtomicRMWInst::BinOp::rmwOpId, \ - pointer, \ - value, \ - llvm::AtomicOrdering::SequentiallyConsistent); \ - atomicRMW->setVolatile(true); \ - push(memoryToValueConversion(atomicRMW,asLLVMType(ValueType::valueTypeId))); \ - } - - EMIT_ATOMIC_RMW(i32,atomic_rmw8_u_xchg,Xchg,llvmI8Type,0,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i32,atomic_rmw16_u_xchg,Xchg,llvmI16Type,1,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i32,atomic_rmw_xchg,Xchg,llvmI32Type,2,identityConversion,identityConversion) - - EMIT_ATOMIC_RMW(i64,atomic_rmw8_u_xchg,Xchg,llvmI8Type,0,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw16_u_xchg,Xchg,llvmI16Type,1,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw32_u_xchg,Xchg,llvmI32Type,2,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw_xchg,Xchg,llvmI64Type,3,identityConversion,identityConversion) - - EMIT_ATOMIC_RMW(i32,atomic_rmw8_u_add,Add,llvmI8Type,0,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i32,atomic_rmw16_u_add,Add,llvmI16Type,1,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i32,atomic_rmw_add,Add,llvmI32Type,2,identityConversion,identityConversion) - - EMIT_ATOMIC_RMW(i64,atomic_rmw8_u_add,Add,llvmI8Type,0,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw16_u_add,Add,llvmI16Type,1,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw32_u_add,Add,llvmI32Type,2,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw_add,Add,llvmI64Type,3,identityConversion,identityConversion) - - EMIT_ATOMIC_RMW(i32,atomic_rmw8_u_sub,Sub,llvmI8Type,0,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i32,atomic_rmw16_u_sub,Sub,llvmI16Type,1,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i32,atomic_rmw_sub,Sub,llvmI32Type,2,identityConversion,identityConversion) - - EMIT_ATOMIC_RMW(i64,atomic_rmw8_u_sub,Sub,llvmI8Type,0,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw16_u_sub,Sub,llvmI16Type,1,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw32_u_sub,Sub,llvmI32Type,2,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw_sub,Sub,llvmI64Type,3,identityConversion,identityConversion) - - EMIT_ATOMIC_RMW(i32,atomic_rmw8_u_and,And,llvmI8Type,0,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i32,atomic_rmw16_u_and,And,llvmI16Type,1,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i32,atomic_rmw_and,And,llvmI32Type,2,identityConversion,identityConversion) - - EMIT_ATOMIC_RMW(i64,atomic_rmw8_u_and,And,llvmI8Type,0,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw16_u_and,And,llvmI16Type,1,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw32_u_and,And,llvmI32Type,2,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw_and,And,llvmI64Type,3,identityConversion,identityConversion) - - EMIT_ATOMIC_RMW(i32,atomic_rmw8_u_or,Or,llvmI8Type,0,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i32,atomic_rmw16_u_or,Or,llvmI16Type,1,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i32,atomic_rmw_or,Or,llvmI32Type,2,identityConversion,identityConversion) - - EMIT_ATOMIC_RMW(i64,atomic_rmw8_u_or,Or,llvmI8Type,0,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw16_u_or,Or,llvmI16Type,1,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw32_u_or,Or,llvmI32Type,2,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw_or,Or,llvmI64Type,3,identityConversion,identityConversion) - - EMIT_ATOMIC_RMW(i32,atomic_rmw8_u_xor,Xor,llvmI8Type,0,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i32,atomic_rmw16_u_xor,Xor,llvmI16Type,1,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i32,atomic_rmw_xor,Xor,llvmI32Type,2,identityConversion,identityConversion) - - EMIT_ATOMIC_RMW(i64,atomic_rmw8_u_xor,Xor,llvmI8Type,0,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw16_u_xor,Xor,llvmI16Type,1,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw32_u_xor,Xor,llvmI32Type,2,irBuilder.CreateZExt,irBuilder.CreateTrunc) - EMIT_ATOMIC_RMW(i64,atomic_rmw_xor,Xor,llvmI64Type,3,identityConversion,identityConversion) - #endif + EMIT_FP_BINARY_OP(min,emitRuntimeIntrinsic("eosvmoc_internal.unreachable",FunctionType::get(asResultType(type),{type,type}),{left,right})) + EMIT_FP_BINARY_OP(max,emitRuntimeIntrinsic("eosvmoc_internal.unreachable",FunctionType::get(asResultType(type),{type,type}),{left,right})) + EMIT_FP_UNARY_OP(ceil,emitRuntimeIntrinsic("eosvmoc_internal.unreachable",FunctionType::get(asResultType(type),{type}),{operand})) + EMIT_FP_UNARY_OP(floor,emitRuntimeIntrinsic("eosvmoc_internal.unreachable",FunctionType::get(asResultType(type),{type}),{operand})) + EMIT_FP_UNARY_OP(trunc,emitRuntimeIntrinsic("eosvmoc_internal.unreachable",FunctionType::get(asResultType(type),{type}),{operand})) + EMIT_FP_UNARY_OP(nearest,emitRuntimeIntrinsic("eosvmoc_internal.unreachable",FunctionType::get(asResultType(type),{type}),{operand})) + EMIT_INT_UNARY_OP(trunc_s_f32,emitRuntimeIntrinsic("eosvmoc_internal.unreachable",FunctionType::get(asResultType(type),{ValueType::f32}),{operand})) + EMIT_INT_UNARY_OP(trunc_s_f64,emitRuntimeIntrinsic("eosvmoc_internal.unreachable",FunctionType::get(asResultType(type),{ValueType::f64}),{operand})) + EMIT_INT_UNARY_OP(trunc_u_f32,emitRuntimeIntrinsic("eosvmoc_internal.unreachable",FunctionType::get(asResultType(type),{ValueType::f32}),{operand})) + EMIT_INT_UNARY_OP(trunc_u_f64,emitRuntimeIntrinsic("eosvmoc_internal.unreachable",FunctionType::get(asResultType(type),{ValueType::f64}),{operand})) }; // A do-nothing visitor used to decode past unreachable operators (but supporting logging, and passing the end operator through). @@ -1472,16 +1166,6 @@ namespace LLVMJIT auto entryBasicBlock = llvm::BasicBlock::Create(context,"entry",llvmFunction); irBuilder.SetInsertPoint(entryBasicBlock); - // If enabled, emit a call to the WAVM function enter hook (for debugging). - if(ENABLE_FUNCTION_ENTER_EXIT_HOOKS) - { - emitRuntimeIntrinsic( - "wavmIntrinsics.debugEnterFunction", - FunctionType::get(ResultType::none,{ValueType::i64}), - {emitLiteral(reinterpret_cast(functionInstance))} - ); - } - // Create and initialize allocas for all the locals and parameters. auto llvmArgIt = llvmFunction->arg_begin(); for(Uptr localIndex = 0;localIndex < functionType->parameters.size() + functionDef.nonParameterLocalTypes.size();++localIndex) @@ -1505,31 +1189,37 @@ namespace LLVMJIT } } + llvm::LoadInst* depth_loadinst; + llvm::StoreInst* depth_storeinst; + llvm::Value* depth = depth_loadinst = irBuilder.CreateLoad(moduleContext.depthCounter); + depth = irBuilder.CreateSub(depth, emitLiteral((I32)1)); + depth_storeinst = irBuilder.CreateStore(depth, moduleContext.depthCounter); + emitConditionalTrapIntrinsic(irBuilder.CreateICmpEQ(depth, emitLiteral((I32)0)), "eosvmoc_internal.depth_assert", FunctionType::get(), {}); + depth_loadinst->setVolatile(true); + depth_storeinst->setVolatile(true); + // Decode the WebAssembly opcodes and emit LLVM IR for them. OperatorDecoderStream decoder(functionDef.code); UnreachableOpVisitor unreachableOpVisitor(*this); OperatorPrinter operatorPrinter(module,functionDef); - while(decoder && controlStack.size()) + while(decoder) { if(ENABLE_LOGGING) { logOperator(decoder.decodeOpWithoutConsume(operatorPrinter)); } + WAVM_ASSERT_THROW(!controlStack.empty()); if(controlStack.back().isReachable) { decoder.decodeOp(*this); } else { decoder.decodeOp(unreachableOpVisitor); } }; WAVM_ASSERT_THROW(irBuilder.GetInsertBlock() == returnBlock); - - // If enabled, emit a call to the WAVM function enter hook (for debugging). - if(ENABLE_FUNCTION_ENTER_EXIT_HOOKS) - { - emitRuntimeIntrinsic( - "wavmIntrinsics.debugExitFunction", - FunctionType::get(ResultType::none,{ValueType::i64}), - {emitLiteral(reinterpret_cast(functionInstance))} - ); - } + + depth = depth_loadinst = irBuilder.CreateLoad(moduleContext.depthCounter); + depth = irBuilder.CreateAdd(depth, emitLiteral((I32)1)); + depth_storeinst = irBuilder.CreateStore(depth, moduleContext.depthCounter); + depth_loadinst->setVolatile(true); + depth_storeinst->setVolatile(true); // Emit the function return. if(functionType->ret == ResultType::none) { irBuilder.CreateRetVoid(); } @@ -1538,63 +1228,93 @@ namespace LLVMJIT llvm::Module* EmitModuleContext::emit() { - Timing::Timer emitTimer; + defaultMemoryBase = emitLiteralPointer(0,llvmI8Type->getPointerTo(256)); - // Create literals for the default memory base and mask. - if(moduleInstance->defaultMemory) - { - defaultMemoryBase = emitLiteralPointer(moduleInstance->defaultMemory->baseAddress,llvmI8PtrType); - const Uptr defaultMemoryEndOffsetValue = Uptr(moduleInstance->defaultMemory->endOffset); - defaultMemoryEndOffset = emitLiteral(defaultMemoryEndOffsetValue); - } - else { defaultMemoryBase = defaultMemoryEndOffset = nullptr; } - - // Set up the LLVM values used to access the global table. - if(moduleInstance->defaultTable) - { - auto tableElementType = llvm::StructType::get(context,{ - llvmI8PtrType, - llvmI8PtrType - }); - defaultTablePointer = emitLiteralPointer(moduleInstance->defaultTable->baseAddress,tableElementType->getPointerTo()); - defaultTableMaxElementIndex = emitLiteral(((Uptr)moduleInstance->defaultTable->endOffset)/sizeof(TableInstance::FunctionElement)); - } - else - { - defaultTablePointer = defaultTableMaxElementIndex = nullptr; - } + depthCounter = emitLiteralPointer((void*)OFFSET_OF_CONTROL_BLOCK_MEMBER(current_call_depth_remaining), llvmI32Type->getPointerTo(256)); // Create LLVM pointer constants for the module's imported functions. for(Uptr functionIndex = 0;functionIndex < module.functions.imports.size();++functionIndex) { - const FunctionInstance* functionInstance = moduleInstance->functions[functionIndex]; - importedFunctionPointers.push_back(emitLiteralPointer(functionInstance->nativeFunction,asLLVMType(functionInstance->type)->getPointerTo())); + const intrinsic_entry& ie =get_intrinsic_map().at(module.functions.imports[functionIndex].moduleName + "." + module.functions.imports[functionIndex].exportName); + importedFunctionOffsets.push_back(ie.ordinal); } - // Create LLVM pointer constants for the module's globals. - for(auto global : moduleInstance->globals) - { globalPointers.push_back(emitLiteralPointer(&global->value,asLLVMType(global->type.valueType)->getPointerTo())); } + int current_prologue = -8; + + for(const GlobalDef& global : module.globals.defs) { + if(global.type.isMutable) { + globals.push_back(emitLiteralPointer((void*)current_prologue,asLLVMType(global.type.valueType)->getPointerTo(256))); + current_prologue -= 8; + } + else { + switch(global.type.valueType) { + case ValueType::i32: globals.push_back(emitLiteral(global.initializer.i32)); break; + case ValueType::i64: globals.push_back(emitLiteral(global.initializer.i64)); break; + case ValueType::f32: globals.push_back(emitLiteral(global.initializer.f32)); break; + case ValueType::f64: globals.push_back(emitLiteral(global.initializer.f64)); break; + default: break; //impossible + } + } + } + + if(module.tables.size()) { + current_prologue -= 8; //now pointing to LAST element + current_prologue -= 16*(module.tables.defs[0].type.size.min-1); //now pointing to FIRST element + auto tableElementType = llvm::StructType::get(context,{llvmI8PtrType, llvmI64Type}); + defaultTablePointer = emitLiteralPointer((void*)current_prologue,tableElementType->getPointerTo(256)); + defaultTableMaxElementIndex = emitLiteral((U64)module.tables.defs[0].type.size.min); + + for(const TableSegment& table_segment : module.tableSegments) + for(Uptr i = 0; i < table_segment.indices.size(); ++i) + if(table_segment.indices[i] < module.functions.imports.size()) + tableOnlyHasDefinedFuncs = false; + } // Create the LLVM functions. functionDefs.resize(module.functions.defs.size()); for(Uptr functionDefIndex = 0;functionDefIndex < module.functions.defs.size();++functionDefIndex) { auto llvmFunctionType = asLLVMType(module.types[module.functions.defs[functionDefIndex].type.index]); - auto externalName = getExternalFunctionName(moduleInstance,functionDefIndex); + auto externalName = getExternalFunctionName(functionDefIndex); functionDefs[functionDefIndex] = llvm::Function::Create(llvmFunctionType,llvm::Function::ExternalLinkage,externalName,llvmModule); } // Compile each function in the module. for(Uptr functionDefIndex = 0;functionDefIndex < module.functions.defs.size();++functionDefIndex) - { EmitFunctionContext(*this,module,module.functions.defs[functionDefIndex],moduleInstance->functionDefs[functionDefIndex],functionDefs[functionDefIndex]).emit(); } - - Timing::logRatePerSecond("Emitted LLVM IR",emitTimer,(F64)llvmModule->size(),"functions"); + { EmitFunctionContext(*this,module,module.functions.defs[functionDefIndex],functionDefs[functionDefIndex]).emit(); } return llvmModule; } - llvm::Module* emitModule(const Module& module,ModuleInstance* moduleInstance) + llvm::Module* emitModule(const Module& module) { - return EmitModuleContext(module,moduleInstance).emit(); + static bool inited; + if(!inited) { + llvmI8Type = llvm::Type::getInt8Ty(context); + llvmI16Type = llvm::Type::getInt16Ty(context); + llvmI32Type = llvm::Type::getInt32Ty(context); + llvmI64Type = llvm::Type::getInt64Ty(context); + llvmF32Type = llvm::Type::getFloatTy(context); + llvmF64Type = llvm::Type::getDoubleTy(context); + llvmVoidType = llvm::Type::getVoidTy(context); + llvmBoolType = llvm::Type::getInt1Ty(context); + llvmI8PtrType = llvmI8Type->getPointerTo(); + + llvmResultTypes[(Uptr)ResultType::none] = llvm::Type::getVoidTy(context); + llvmResultTypes[(Uptr)ResultType::i32] = llvmI32Type; + llvmResultTypes[(Uptr)ResultType::i64] = llvmI64Type; + llvmResultTypes[(Uptr)ResultType::f32] = llvmF32Type; + llvmResultTypes[(Uptr)ResultType::f64] = llvmF64Type; + + // Create zero constants of each type. + typedZeroConstants[(Uptr)ValueType::any] = nullptr; + typedZeroConstants[(Uptr)ValueType::i32] = emitLiteral((U32)0); + typedZeroConstants[(Uptr)ValueType::i64] = emitLiteral((U64)0); + typedZeroConstants[(Uptr)ValueType::f32] = emitLiteral((F32)0.0f); + typedZeroConstants[(Uptr)ValueType::f64] = emitLiteral((F64)0.0); + } + + return EmitModuleContext(module).emit(); } } +}}} \ No newline at end of file diff --git a/libraries/chain/webassembly/eos-vm-oc/LLVMJIT.cpp b/libraries/chain/webassembly/eos-vm-oc/LLVMJIT.cpp new file mode 100644 index 00000000000..d3fdb28a5d2 --- /dev/null +++ b/libraries/chain/webassembly/eos-vm-oc/LLVMJIT.cpp @@ -0,0 +1,324 @@ +/* +The EOS VM Optimized Compiler was created in part based on WAVM +https://github.com/WebAssembly/wasm-jit-prototype +subject the following: + +Copyright (c) 2016-2019, Andrew Scheidecker +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +* Neither the name of WAVM nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include "LLVMJIT.h" + +#include "llvm/ExecutionEngine/ExecutionEngine.h" +#include "llvm/ExecutionEngine/RTDyldMemoryManager.h" +#include "llvm/ExecutionEngine/Orc/CompileUtils.h" +#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h" +#include "llvm/ExecutionEngine/Orc/LambdaResolver.h" +#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h" +#include "llvm/ExecutionEngine/Orc/NullResolver.h" +#include "llvm/ExecutionEngine/Orc/Core.h" + +#include "llvm/Analysis/Passes.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/Verifier.h" +#include "llvm/IR/ValueHandle.h" +#include "llvm/IR/DebugLoc.h" +#include "llvm/Object/ObjectFile.h" +#include "llvm/Object/SymbolSize.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/DataTypes.h" +#include "llvm/Support/TargetSelect.h" +#include "llvm/Support/Host.h" +#include "llvm/Support/DynamicLibrary.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/IR/DIBuilder.h" +#include "llvm/Transforms/InstCombine/InstCombine.h" +#include "llvm/Transforms/Utils.h" +#include + +#include +#include +#include + +#include "llvm/Support/LEB128.h" + +#if LLVM_VERSION_MAJOR == 7 +namespace llvm { namespace orc { + using LegacyRTDyldObjectLinkingLayer = RTDyldObjectLinkingLayer; + template + using LegacyIRCompileLayer = IRCompileLayer; +}} +#endif + +#define DUMP_UNOPTIMIZED_MODULE 0 +#define VERIFY_MODULE 0 +#define DUMP_OPTIMIZED_MODULE 0 +#define PRINT_DISASSEMBLY 0 + +#if PRINT_DISASSEMBLY +#include "llvm-c/Disassembler.h" +static void disassembleFunction(U8* bytes,Uptr numBytes) +{ + LLVMDisasmContextRef disasmRef = LLVMCreateDisasm(llvm::sys::getProcessTriple().c_str(),nullptr,0,nullptr,nullptr); + + U8* nextByte = bytes; + Uptr numBytesRemaining = numBytes; + while(numBytesRemaining) + { + char instructionBuffer[256]; + const Uptr numInstructionBytes = LLVMDisasmInstruction( + disasmRef, + nextByte, + numBytesRemaining, + reinterpret_cast(nextByte), + instructionBuffer, + sizeof(instructionBuffer) + ); + if(numInstructionBytes == 0 || numInstructionBytes > numBytesRemaining) + break; + numBytesRemaining -= numInstructionBytes; + nextByte += numInstructionBytes; + + printf("\t\t%s\n",instructionBuffer); + }; + + LLVMDisasmDispose(disasmRef); +} +#endif + +namespace eosio { namespace chain { namespace eosvmoc { + +namespace LLVMJIT +{ + llvm::TargetMachine* targetMachine = nullptr; + + // Allocates memory for the LLVM object loader. + struct UnitMemoryManager : llvm::RTDyldMemoryManager + { + UnitMemoryManager() {} + virtual ~UnitMemoryManager() override + {} + + void registerEHFrames(U8* addr, U64 loadAddr,uintptr_t numBytes) override {} + void deregisterEHFrames() override {} + + virtual bool needsToReserveAllocationSpace() override { return true; } + virtual void reserveAllocationSpace(uintptr_t numCodeBytes,U32 codeAlignment,uintptr_t numReadOnlyBytes,U32 readOnlyAlignment,uintptr_t numReadWriteBytes,U32 readWriteAlignment) override { + code = std::make_unique>(numCodeBytes + numReadOnlyBytes + numReadWriteBytes); + ptr = code->data(); + } + virtual U8* allocateCodeSection(uintptr_t numBytes,U32 alignment,U32 sectionID,llvm::StringRef sectionName) override + { + return get_next_code_ptr(numBytes, alignment); + } + virtual U8* allocateDataSection(uintptr_t numBytes,U32 alignment,U32 sectionID,llvm::StringRef SectionName,bool isReadOnly) override + { + if(SectionName == ".eh_frame") { + dumpster.resize(numBytes); + return dumpster.data(); + } + if(SectionName == ".stack_sizes") { + return stack_sizes.emplace_back(numBytes).data(); + } + WAVM_ASSERT_THROW(isReadOnly); + + return get_next_code_ptr(numBytes, alignment); + } + + virtual bool finalizeMemory(std::string* ErrMsg = nullptr) override { + code->resize(ptr - code->data()); + return true; + } + + std::unique_ptr> code; + uint8_t* ptr; + + std::vector dumpster; + std::list> stack_sizes; + + U8* get_next_code_ptr(uintptr_t numBytes, U32 alignment) { + FC_ASSERT(alignment <= alignof(std::max_align_t), "alignment of section exceeds max_align_t"); + uintptr_t p = (uintptr_t)ptr; + p += alignment - 1LL; + p &= ~(alignment - 1LL); + uint8_t* this_section = (uint8_t*)p; + ptr = this_section + numBytes; + + return this_section; + } + + UnitMemoryManager(const UnitMemoryManager&) = delete; + void operator=(const UnitMemoryManager&) = delete; + }; + + // The JIT compilation unit for a WebAssembly module instance. + struct JITModule + { + JITModule() { + objectLayer = llvm::make_unique(ES,[this](llvm::orc::VModuleKey K) { + return llvm::orc::LegacyRTDyldObjectLinkingLayer::Resources{ + unitmemorymanager, std::make_shared() + }; + }, + [](llvm::orc::VModuleKey, const llvm::object::ObjectFile &Obj, const llvm::RuntimeDyld::LoadedObjectInfo &o) { + //nothing to do + }, + [this](llvm::orc::VModuleKey, const llvm::object::ObjectFile &Obj, const llvm::RuntimeDyld::LoadedObjectInfo &o) { + for(auto symbolSizePair : llvm::object::computeSymbolSizes(Obj)) { + auto symbol = symbolSizePair.first; + auto name = symbol.getName(); + auto address = symbol.getAddress(); + if(symbol.getType() && symbol.getType().get() == llvm::object::SymbolRef::ST_Function && name && address) { + Uptr loadedAddress = Uptr(*address); + auto symbolSection = symbol.getSection(); + if(symbolSection) + loadedAddress += (Uptr)o.getSectionLoadAddress(*symbolSection.get()); + Uptr functionDefIndex; + if(getFunctionIndexFromExternalName(name->data(),functionDefIndex)) + function_to_offsets[functionDefIndex] = loadedAddress-(uintptr_t)unitmemorymanager->code->data(); +#if PRINT_DISASSEMBLY + disassembleFunction((U8*)loadedAddress, symbolSizePair.second); +#endif + } + } + } + ); + objectLayer->setProcessAllSections(true); + compileLayer = llvm::make_unique(*objectLayer,llvm::orc::SimpleCompiler(*targetMachine)); + } + + void compile(llvm::Module* llvmModule); + + std::shared_ptr unitmemorymanager = std::make_shared(); + + std::map function_to_offsets; + std::vector final_pic_code; + + ~JITModule() + { + } + private: + typedef llvm::orc::LegacyIRCompileLayer CompileLayer; + + llvm::orc::ExecutionSession ES; + std::unique_ptr objectLayer; + std::unique_ptr compileLayer; + }; + + static Uptr printedModuleId = 0; + + void printModule(const llvm::Module* llvmModule,const char* filename) + { + std::error_code errorCode; + std::string augmentedFilename = std::string(filename) + std::to_string(printedModuleId++) + ".ll"; + llvm::raw_fd_ostream dumpFileStream(augmentedFilename,errorCode,llvm::sys::fs::OpenFlags::F_Text); + llvmModule->print(dumpFileStream,nullptr); + ///Log::printf(Log::Category::debug,"Dumped LLVM module to: %s\n",augmentedFilename.c_str()); + } + + void JITModule::compile(llvm::Module* llvmModule) + { + // Get a target machine object for this host, and set the module to use its data layout. + llvmModule->setDataLayout(targetMachine->createDataLayout()); + + // Verify the module. + if(DUMP_UNOPTIMIZED_MODULE) { printModule(llvmModule,"llvmDump"); } + if(VERIFY_MODULE) + { + std::string verifyOutputString; + llvm::raw_string_ostream verifyOutputStream(verifyOutputString); + if(llvm::verifyModule(*llvmModule,&verifyOutputStream)) + { Errors::fatalf("LLVM verification errors:\n%s\n",verifyOutputString.c_str()); } + ///Log::printf(Log::Category::debug,"Verified LLVM module\n"); + } + + auto fpm = new llvm::legacy::FunctionPassManager(llvmModule); + fpm->add(llvm::createPromoteMemoryToRegisterPass()); + fpm->add(llvm::createInstructionCombiningPass()); + fpm->add(llvm::createCFGSimplificationPass()); + fpm->add(llvm::createJumpThreadingPass()); + fpm->add(llvm::createConstantPropagationPass()); + fpm->doInitialization(); + + for(auto functionIt = llvmModule->begin();functionIt != llvmModule->end();++functionIt) + { fpm->run(*functionIt); } + delete fpm; + + if(DUMP_OPTIMIZED_MODULE) { printModule(llvmModule,"llvmOptimizedDump"); } + + llvm::orc::VModuleKey K = ES.allocateVModule(); + std::unique_ptr mod(llvmModule); + WAVM_ASSERT_THROW(!compileLayer->addModule(K, std::move(mod))); + WAVM_ASSERT_THROW(!compileLayer->emitAndFinalize(K)); + + final_pic_code = std::move(*unitmemorymanager->code); + } + + instantiated_code instantiateModule(const IR::Module& module) + { + static bool inited; + if(!inited) { + inited = true; + llvm::InitializeNativeTarget(); + llvm::InitializeNativeTargetAsmPrinter(); + llvm::InitializeNativeTargetAsmParser(); + llvm::InitializeNativeTargetDisassembler(); + llvm::sys::DynamicLibrary::LoadLibraryPermanently(nullptr); + + auto targetTriple = llvm::sys::getProcessTriple(); + + llvm::TargetOptions to; + to.EmitStackSizeSection = 1; + + targetMachine = llvm::EngineBuilder().setRelocationModel(llvm::Reloc::PIC_).setCodeModel(llvm::CodeModel::Small).setTargetOptions(to).selectTarget( + llvm::Triple(targetTriple),"","",llvm::SmallVector() + ); + } + + // Emit LLVM IR for the module. + auto llvmModule = emitModule(module); + + // Construct the JIT compilation pipeline for this module. + auto jitModule = new JITModule(); + // Compile the module. + jitModule->compile(llvmModule); + + unsigned num_functions_stack_size_found = 0; + for(const auto& stacksizes : jitModule->unitmemorymanager->stack_sizes) { + fc::datastream ds(stacksizes.data(), stacksizes.size()); + while(ds.remaining()) { + uint64_t funcaddr; + fc::unsigned_int stack_size; + fc::raw::unpack(ds, funcaddr); + fc::raw::unpack(ds, stack_size); + + ++num_functions_stack_size_found; + if(stack_size > 16u*1024u) + _exit(1); + } + } + if(num_functions_stack_size_found != module.functions.defs.size()) + _exit(1); + if(jitModule->final_pic_code.size() >= 16u*1024u*1024u) + _exit(1); + + instantiated_code ret; + ret.code = jitModule->final_pic_code; + ret.function_offsets = jitModule->function_to_offsets; + return ret; + } +} +}}} \ No newline at end of file diff --git a/libraries/chain/webassembly/eos-vm-oc/LLVMJIT.h b/libraries/chain/webassembly/eos-vm-oc/LLVMJIT.h new file mode 100644 index 00000000000..5b1152f56d6 --- /dev/null +++ b/libraries/chain/webassembly/eos-vm-oc/LLVMJIT.h @@ -0,0 +1,25 @@ +#pragma once + +#include "Inline/BasicTypes.h" +#include "IR/Module.h" + +#pragma push_macro("N") +#undef N +#include "llvm/IR/Module.h" +#pragma pop_macro("N") +#include +#include + +namespace eosio { namespace chain { namespace eosvmoc { + +struct instantiated_code { + std::vector code; + std::map function_offsets; +}; + +namespace LLVMJIT { + bool getFunctionIndexFromExternalName(const char* externalName,Uptr& outFunctionDefIndex); + llvm::Module* emitModule(const IR::Module& module); + instantiated_code instantiateModule(const IR::Module& module); +} +}}} \ No newline at end of file diff --git a/libraries/chain/webassembly/eos-vm-oc/code_cache.cpp b/libraries/chain/webassembly/eos-vm-oc/code_cache.cpp new file mode 100644 index 00000000000..b12388eb3da --- /dev/null +++ b/libraries/chain/webassembly/eos-vm-oc/code_cache.cpp @@ -0,0 +1,382 @@ +#include //set_thread_name + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "IR/Module.h" +#include "IR/Validate.h" +#include "WASM/WASM.h" +#include "LLVMJIT.h" + +using namespace IR; +using namespace Runtime; + +namespace eosio { namespace chain { namespace eosvmoc { + +static constexpr size_t header_offset = 512u; +static constexpr size_t header_size = 512u; +static constexpr size_t total_header_size = header_offset + header_size; +static constexpr uint64_t header_id = 0x32434f4d56534f45ULL; //"EOSVMOC2" little endian + +struct code_cache_header { + uint64_t id = header_id; + bool dirty = false; + uintptr_t serialized_descriptor_index = 0; +} __attribute__ ((packed)); +static constexpr size_t header_dirty_bit_offset_from_file_start = header_offset + offsetof(code_cache_header, dirty); +static constexpr size_t descriptor_ptr_from_file_start = header_offset + offsetof(code_cache_header, serialized_descriptor_index); + +static_assert(sizeof(code_cache_header) <= header_size, "code_cache_header too big"); + +code_cache_async::code_cache_async(const bfs::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) : + code_cache_base(data_dir, eosvmoc_config, db), + _result_queue(eosvmoc_config.threads * 2), + _threads(eosvmoc_config.threads) +{ + FC_ASSERT(_threads, "EOS VM OC requires at least 1 compile thread"); + + wait_on_compile_monitor_message(); + + _monitor_reply_thread = std::thread([this]() { + fc::set_os_thread_name("oc-monitor"); + _ctx.run(); + }); +} + +code_cache_async::~code_cache_async() { + _compile_monitor_write_socket.shutdown(local::datagram_protocol::socket::shutdown_send); + _monitor_reply_thread.join(); + consume_compile_thread_queue(); +} + +//remember again: wait_on_compile_monitor_message's callback is non-main thread! +void code_cache_async::wait_on_compile_monitor_message() { + _compile_monitor_read_socket.async_wait(local::datagram_protocol::socket::wait_read, [this](auto ec) { + if(ec) { + _ctx.stop(); + return; + } + + auto [success, message, fds] = read_message_with_fds(_compile_monitor_read_socket); + if(!success || !message.contains()) { + _ctx.stop(); + return; + } + + _result_queue.push(message.get()); + + wait_on_compile_monitor_message(); + }); +} + + +//number processed, bytes available (only if number processed > 0) +std::tuple code_cache_async::consume_compile_thread_queue() { + size_t bytes_remaining = 0; + size_t gotsome = _result_queue.consume_all([&](const wasm_compilation_result_message& result) { + if(_outstanding_compiles_and_poison[result.code] == false) { + result.result.visit(overloaded { + [&](const code_descriptor& cd) { + _cache_index.push_front(cd); + }, + [&](const compilation_result_unknownfailure&) { + wlog("code ${c} failed to tier-up with EOS VM OC", ("c", result.code.code_id)); + _blacklist.emplace(result.code); + }, + [&](const compilation_result_toofull&) { + run_eviction_round(); + } + }); + } + _outstanding_compiles_and_poison.erase(result.code); + bytes_remaining = result.cache_free_bytes; + }); + + return {gotsome, bytes_remaining}; +} + +const code_descriptor* const code_cache_async::get_descriptor_for_code(const digest_type& code_id, const uint8_t& vm_version) { + //if there are any outstanding compiles, process the result queue now + if(_outstanding_compiles_and_poison.size()) { + auto [count_processed, bytes_remaining] = consume_compile_thread_queue(); + + if(count_processed) + check_eviction_threshold(bytes_remaining); + + while(count_processed && _queued_compiles.size()) { + auto nextup = _queued_compiles.begin(); + + //it's not clear this check is required: if apply() was called for code then it existed in the code_index; and then + // if we got notification of it no longer existing we would have removed it from queued_compiles + const code_object* const codeobject = _db.find(boost::make_tuple(nextup->code_id, 0, nextup->vm_version)); + if(codeobject) { + _outstanding_compiles_and_poison.emplace(*nextup, false); + std::vector fds_to_pass; + fds_to_pass.emplace_back(memfd_for_bytearray(codeobject->code)); + FC_ASSERT(write_message_with_fds(_compile_monitor_write_socket, compile_wasm_message{ *nextup }, fds_to_pass), "EOS VM failed to communicate to OOP manager"); + --count_processed; + } + _queued_compiles.erase(nextup); + } + } + + //check for entry in cache + code_cache_index::index::type::iterator it = _cache_index.get().find(boost::make_tuple(code_id, vm_version)); + if(it != _cache_index.get().end()) { + _cache_index.relocate(_cache_index.begin(), _cache_index.project<0>(it)); + return &*it; + } + + const code_tuple ct = code_tuple{code_id, vm_version}; + + if(_blacklist.find(ct) != _blacklist.end()) + return nullptr; + if(auto it = _outstanding_compiles_and_poison.find(ct); it != _outstanding_compiles_and_poison.end()) { + it->second = false; + return nullptr; + } + if(_queued_compiles.find(ct) != _queued_compiles.end()) + return nullptr; + + if(_outstanding_compiles_and_poison.size() >= _threads) { + _queued_compiles.emplace(ct); + return nullptr; + } + + const code_object* const codeobject = _db.find(boost::make_tuple(code_id, 0, vm_version)); + if(!codeobject) //should be impossible right? + return nullptr; + + _outstanding_compiles_and_poison.emplace(ct, false); + std::vector fds_to_pass; + fds_to_pass.emplace_back(memfd_for_bytearray(codeobject->code)); + write_message_with_fds(_compile_monitor_write_socket, compile_wasm_message{ ct }, fds_to_pass); + return nullptr; +} + +code_cache_sync::~code_cache_sync() { + //it's exceedingly critical that we wait for the compile monitor to be done with all its work + //This is easy in the sync case + _compile_monitor_write_socket.shutdown(local::datagram_protocol::socket::shutdown_send); + auto [success, message, fds] = read_message_with_fds(_compile_monitor_read_socket); + if(success) + elog("unexpected response from EOS VM OC compile monitor during shutdown"); +} + +const code_descriptor* const code_cache_sync::get_descriptor_for_code_sync(const digest_type& code_id, const uint8_t& vm_version) { + //check for entry in cache + code_cache_index::index::type::iterator it = _cache_index.get().find(boost::make_tuple(code_id, vm_version)); + if(it != _cache_index.get().end()) { + _cache_index.relocate(_cache_index.begin(), _cache_index.project<0>(it)); + return &*it; + } + + const code_object* const codeobject = _db.find(boost::make_tuple(code_id, 0, vm_version)); + if(!codeobject) //should be impossible right? + return nullptr; + + std::vector fds_to_pass; + fds_to_pass.emplace_back(memfd_for_bytearray(codeobject->code)); + + write_message_with_fds(_compile_monitor_write_socket, compile_wasm_message{ {code_id, vm_version} }, fds_to_pass); + auto [success, message, fds] = read_message_with_fds(_compile_monitor_read_socket); + EOS_ASSERT(success, wasm_execution_error, "failed to read response from monitor process"); + EOS_ASSERT(message.contains(), wasm_execution_error, "unexpected response from monitor process"); + + wasm_compilation_result_message result = message.get(); + EOS_ASSERT(result.result.contains(), wasm_execution_error, "failed to compile wasm"); + + check_eviction_threshold(result.cache_free_bytes); + + return &*_cache_index.push_front(std::move(result.result.get())).first; +} + +code_cache_base::code_cache_base(const boost::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) : + _db(db), + _cache_file_path(data_dir/"code_cache.bin") +{ + static_assert(sizeof(allocator_t) <= header_offset, "header offset intersects with allocator"); + + bfs::create_directories(data_dir); + + if(!bfs::exists(_cache_file_path)) { + EOS_ASSERT(eosvmoc_config.cache_size >= allocator_t::get_min_size(total_header_size), database_exception, "configured code cache size is too small"); + std::ofstream ofs(_cache_file_path.generic_string(), std::ofstream::trunc); + EOS_ASSERT(ofs.good(), database_exception, "unable to create EOS VM Optimized Compiler code cache"); + bfs::resize_file(_cache_file_path, eosvmoc_config.cache_size); + bip::file_mapping creation_mapping(_cache_file_path.generic_string().c_str(), bip::read_write); + bip::mapped_region creation_region(creation_mapping, bip::read_write); + new (creation_region.get_address()) allocator_t(eosvmoc_config.cache_size, total_header_size); + new ((char*)creation_region.get_address() + header_offset) code_cache_header; + } + + code_cache_header cache_header; + { + char header_buff[total_header_size]; + std::ifstream hs(_cache_file_path.generic_string(), std::ifstream::binary); + hs.read(header_buff, sizeof(header_buff)); + EOS_ASSERT(!hs.fail(), bad_database_version_exception, "failed to read code cache header"); + memcpy((char*)&cache_header, header_buff + header_offset, sizeof(cache_header)); + } + + EOS_ASSERT(cache_header.id == header_id, bad_database_version_exception, "existing EOS VM OC code cache not compatible with this version"); + EOS_ASSERT(!cache_header.dirty, database_exception, "code cache is dirty"); + + set_on_disk_region_dirty(true); + + auto existing_file_size = bfs::file_size(_cache_file_path); + if(eosvmoc_config.cache_size > existing_file_size) { + bfs::resize_file(_cache_file_path, eosvmoc_config.cache_size); + + bip::file_mapping resize_mapping(_cache_file_path.generic_string().c_str(), bip::read_write); + bip::mapped_region resize_region(resize_mapping, bip::read_write); + + allocator_t* resize_allocator = reinterpret_cast(resize_region.get_address()); + resize_allocator->grow(eosvmoc_config.cache_size - existing_file_size); + } + + _cache_fd = ::open(_cache_file_path.generic_string().c_str(), O_RDWR | O_CLOEXEC); + EOS_ASSERT(_cache_fd >= 0, database_exception, "failure to open code cache"); + + //load up the previous cache index + char* code_mapping = (char*)mmap(nullptr, eosvmoc_config.cache_size, PROT_READ|PROT_WRITE, MAP_SHARED, _cache_fd, 0); + EOS_ASSERT(code_mapping != MAP_FAILED, database_exception, "failure to mmap code cache"); + + allocator_t* allocator = reinterpret_cast(code_mapping); + + if(cache_header.serialized_descriptor_index) { + fc::datastream ds(code_mapping + cache_header.serialized_descriptor_index, eosvmoc_config.cache_size - cache_header.serialized_descriptor_index); + unsigned number_entries; + fc::raw::unpack(ds, number_entries); + for(unsigned i = 0; i < number_entries; ++i) { + code_descriptor cd; + fc::raw::unpack(ds, cd); + if(cd.codegen_version != 0) + continue; + _cache_index.push_back(std::move(cd)); + } + allocator->deallocate(code_mapping + cache_header.serialized_descriptor_index); + + ilog("EOS VM Optimized Compiler code cache loaded with ${c} entries; ${f} of ${t} bytes free", ("c", number_entries)("f", allocator->get_free_memory())("t", allocator->get_size())); + } + munmap(code_mapping, eosvmoc_config.cache_size); + + _free_bytes_eviction_threshold = eosvmoc_config.cache_size * .1; + + wrapped_fd compile_monitor_conn = get_connection_to_compile_monitor(_cache_fd); + + //okay, let's do this by the book: we're not allowed to write & read on different threads to the same asio socket. So create two fds + //representing the same unix socket. we'll read on one and write on the other + int duped = dup(compile_monitor_conn); + _compile_monitor_write_socket.assign(local::datagram_protocol(), duped); + _compile_monitor_read_socket.assign(local::datagram_protocol(), compile_monitor_conn.release()); +} + +void code_cache_base::set_on_disk_region_dirty(bool dirty) { + bip::file_mapping dirty_mapping(_cache_file_path.generic_string().c_str(), bip::read_write); + bip::mapped_region dirty_region(dirty_mapping, bip::read_write); + + *((char*)dirty_region.get_address()+header_dirty_bit_offset_from_file_start) = dirty; + if(dirty_region.flush(0, 0, false) == false) + elog("Syncing code cache failed"); +} + +template +void code_cache_base::serialize_cache_index(fc::datastream& ds) { + unsigned entries = _cache_index.size(); + fc::raw::pack(ds, entries); + for(const code_descriptor& cd : _cache_index) + fc::raw::pack(ds, cd); +} + +code_cache_base::~code_cache_base() { + //reopen the code cache in our process + struct stat st; + if(fstat(_cache_fd, &st)) + return; + char* code_mapping = (char*)mmap(nullptr, st.st_size, PROT_READ|PROT_WRITE, MAP_SHARED, _cache_fd, 0); + if(code_mapping == MAP_FAILED) + return; + + allocator_t* allocator = reinterpret_cast(code_mapping); + + //serialize out the cache index + fc::datastream dssz; + serialize_cache_index(dssz); + size_t sz = dssz.tellp(); + + char* p = nullptr; + while(_cache_index.size()) { + p = (char*)allocator->allocate(sz); + if(p != nullptr) + break; + //in theory, there could be too little free space avaiable to store the cache index + //try to free up some space + for(unsigned int i = 0; i < 25 && _cache_index.size(); ++i) { + allocator->deallocate(code_mapping + _cache_index.back().code_begin); + allocator->deallocate(code_mapping + _cache_index.back().initdata_begin); + _cache_index.pop_back(); + } + } + + if(p) { + fc::datastream ds(p, sz); + serialize_cache_index(ds); + + uintptr_t ptr_offset = p-code_mapping; + *((uintptr_t*)(code_mapping+descriptor_ptr_from_file_start)) = ptr_offset; + } + else + *((uintptr_t*)(code_mapping+descriptor_ptr_from_file_start)) = 0; + + msync(code_mapping, allocator->get_size(), MS_SYNC); + munmap(code_mapping, allocator->get_size()); + close(_cache_fd); + set_on_disk_region_dirty(false); + +} + +void code_cache_base::free_code(const digest_type& code_id, const uint8_t& vm_version) { + code_cache_index::index::type::iterator it = _cache_index.get().find(boost::make_tuple(code_id, vm_version)); + if(it != _cache_index.get().end()) { + write_message_with_fds(_compile_monitor_write_socket, evict_wasms_message{ {*it} }); + _cache_index.get().erase(it); + } + + //if it's in the queued list, erase it + _queued_compiles.erase({code_id, vm_version}); + + //however, if it's currently being compiled there is no way to cancel the compile, + //so instead set a poison boolean that indicates not to insert the code in to the cache + //once the compile is complete + const std::unordered_map::iterator compiling_it = _outstanding_compiles_and_poison.find({code_id, vm_version}); + if(compiling_it != _outstanding_compiles_and_poison.end()) + compiling_it->second = true; +} + +void code_cache_base::run_eviction_round() { + evict_wasms_message evict_msg; + for(unsigned int i = 0; i < 25 && _cache_index.size() > 1; ++i) { + evict_msg.codes.emplace_back(_cache_index.back()); + _cache_index.pop_back(); + } + write_message_with_fds(_compile_monitor_write_socket, evict_msg); +} + +void code_cache_base::check_eviction_threshold(size_t free_bytes) { + if(free_bytes < _free_bytes_eviction_threshold) + run_eviction_round(); +} + +}}} \ No newline at end of file diff --git a/libraries/chain/webassembly/eos-vm-oc/compile_monitor.cpp b/libraries/chain/webassembly/eos-vm-oc/compile_monitor.cpp new file mode 100644 index 00000000000..67b85c5ded4 --- /dev/null +++ b/libraries/chain/webassembly/eos-vm-oc/compile_monitor.cpp @@ -0,0 +1,326 @@ +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include + +namespace eosio { namespace chain { namespace eosvmoc { + +using namespace boost::asio; + +static size_t get_size_of_fd(int fd) { + struct stat st; + FC_ASSERT(fstat(fd, &st) == 0, "failed to get size of fd"); + return st.st_size; +} + +static void copy_memfd_contents_to_pointer(void* dst, int fd) { + struct stat st; + FC_ASSERT(fstat(fd, &st) == 0, "failed to get size of fd"); + if(st.st_size == 0) + return; + void* contents = mmap(nullptr, st.st_size, PROT_READ, MAP_SHARED, fd, 0); + FC_ASSERT(contents != MAP_FAILED, "failed to map memfd file"); + memcpy(dst, contents, st.st_size); + munmap(contents, st.st_size); +} + +struct compile_monitor_session { + compile_monitor_session(boost::asio::io_context& context, local::datagram_protocol::socket&& n, wrapped_fd&& c, wrapped_fd& t) : + _ctx(context), + _nodeos_instance_socket(std::move(n)), + _cache_fd(std::move(c)), + _trampoline_socket(t) { + + struct stat st; + FC_ASSERT(fstat(_cache_fd, &st) == 0, "failed to stat cache fd"); + _code_size = st.st_size; + _code_mapping = (char*)mmap(nullptr, _code_size, PROT_READ|PROT_WRITE, MAP_SHARED, _cache_fd, 0); + FC_ASSERT(_code_mapping != MAP_FAILED, "failed to mmap cache file"); + _allocator = reinterpret_cast(_code_mapping); + + read_message_from_nodeos(); + } + + ~compile_monitor_session() { + munmap(_code_mapping, _code_size); + } + + void read_message_from_nodeos() { + _nodeos_instance_socket.async_wait(local::datagram_protocol::socket::wait_read, [this](auto ec) { + if(ec) { + connection_dead_signal(); + return; + } + auto [success, message, fds] = read_message_with_fds(_nodeos_instance_socket); + if(!success) { + connection_dead_signal(); + return; + } + + message.visit(overloaded { + [&, &fds=fds](const compile_wasm_message& compile) { + if(fds.size() != 1) { + connection_dead_signal(); + return; + } + kick_compile_off(compile.code, std::move(fds[0])); + }, + [&](const evict_wasms_message& evict) { + for(const code_descriptor& cd : evict.codes) { + _allocator->deallocate(_code_mapping + cd.code_begin); + _allocator->deallocate(_code_mapping + cd.initdata_begin); + } + }, + [&](const auto&) { + //anything else is an error + connection_dead_signal(); + return; + } + }); + + read_message_from_nodeos(); + }); + } + + void kick_compile_off(const code_tuple& code_id, wrapped_fd&& wasm_code) { + //prepare a requst to go out to the trampoline + int socks[2]; + socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, socks); + local::datagram_protocol::socket response_socket(_ctx); + response_socket.assign(local::datagram_protocol(), socks[0]); + std::vector fds_pass_to_trampoline; + fds_pass_to_trampoline.emplace_back(socks[1]); + fds_pass_to_trampoline.emplace_back(std::move(wasm_code)); + + eosvmoc_message trampoline_compile_request = compile_wasm_message{code_id}; + if(write_message_with_fds(_trampoline_socket, trampoline_compile_request, fds_pass_to_trampoline) == false) { + wasm_compilation_result_message reply{code_id, compilation_result_unknownfailure{}, _allocator->get_free_memory()}; + write_message_with_fds(_nodeos_instance_socket, reply); + return; + } + + current_compiles.emplace_front(code_id, std::move(response_socket)); + read_message_from_compile_task(current_compiles.begin()); + } + + void read_message_from_compile_task(std::list>::iterator current_compile_it) { + auto& [code, socket] = *current_compile_it; + socket.async_wait(local::datagram_protocol::socket::wait_read, [this, current_compile_it](auto ec) { + //at this point we only expect 1 of 2 things to happen: we either get a reply (success), or we get no reply (failure) + auto& [code, socket] = *current_compile_it; + auto [success, message, fds] = read_message_with_fds(socket); + + wasm_compilation_result_message reply{code, compilation_result_unknownfailure{}, _allocator->get_free_memory()}; + + void* code_ptr = nullptr; + void* mem_ptr = nullptr; + try { + if(success && message.contains() && fds.size() == 2) { + code_compilation_result_message& result = message.get(); + code_ptr = _allocator->allocate(get_size_of_fd(fds[0])); + mem_ptr = _allocator->allocate(get_size_of_fd(fds[1])); + + if(code_ptr == nullptr || mem_ptr == nullptr) { + _allocator->deallocate(code_ptr); + _allocator->deallocate(mem_ptr); + reply.result = compilation_result_toofull(); + } + else { + copy_memfd_contents_to_pointer(code_ptr, fds[0]); + copy_memfd_contents_to_pointer(mem_ptr, fds[1]); + + reply.result = code_descriptor { + code.code_id, + code.vm_version, + 0, + (uintptr_t)code_ptr - (uintptr_t)_code_mapping, + result.start, + result.apply_offset, + result.starting_memory_pages, + (uintptr_t)mem_ptr - (uintptr_t)_code_mapping, + (unsigned)get_size_of_fd(fds[1]), + result.initdata_prologue_size + }; + } + } + } + catch(...) { + _allocator->deallocate(code_ptr); + _allocator->deallocate(mem_ptr); + } + + write_message_with_fds(_nodeos_instance_socket, reply); + + //either way, we are done + _ctx.post([this, current_compile_it]() { + current_compiles.erase(current_compile_it); + }); + }); + + } + + boost::signals2::signal connection_dead_signal; + +private: + boost::asio::io_context& _ctx; + local::datagram_protocol::socket _nodeos_instance_socket; + wrapped_fd _cache_fd; + wrapped_fd& _trampoline_socket; + + char* _code_mapping; + size_t _code_size; + allocator_t* _allocator; + + std::list> current_compiles; +}; + +struct compile_monitor { + compile_monitor(boost::asio::io_context& ctx, local::datagram_protocol::socket&& n, wrapped_fd&& t) : _nodeos_socket(std::move(n)), _trampoline_socket(std::move(t)) { + //the only duty of compile_monitor is to create a compile_monitor_session when a code_cache instance + // in nodeos wants one + wait_for_new_incomming_session(ctx); + } + + void wait_for_new_incomming_session(boost::asio::io_context& ctx) { + _nodeos_socket.async_wait(boost::asio::local::datagram_protocol::socket::wait_read, [this, &ctx](auto ec) { + if(ec) { + ctx.stop(); + return; + } + auto [success, message, fds] = read_message_with_fds(_nodeos_socket); + if(!success) { //failure reading indicates that nodeos has shut down + ctx.stop(); + return; + } + if(!message.contains() || fds.size() != 2) { + ctx.stop(); + return; + } + try { + local::datagram_protocol::socket _socket_for_comm(ctx); + _socket_for_comm.assign(local::datagram_protocol(), fds[0].release()); + _compile_sessions.emplace_front(ctx, std::move(_socket_for_comm), std::move(fds[1]), _trampoline_socket); + _compile_sessions.front().connection_dead_signal.connect([&, it = _compile_sessions.begin()]() { + ctx.post([&]() { + _compile_sessions.erase(it); + }); + }); + write_message_with_fds(_nodeos_socket, initalize_response_message()); + } + catch(const fc::exception& e) { + write_message_with_fds(_nodeos_socket, initalize_response_message{e.what()}); + } + catch(...) { + write_message_with_fds(_nodeos_socket, initalize_response_message{"Failed to create compile process"}); + } + + wait_for_new_incomming_session(ctx); + }); + } + + local::datagram_protocol::socket _nodeos_socket; + wrapped_fd _trampoline_socket; + + std::list _compile_sessions; +}; + +void launch_compile_monitor(int nodeos_fd) { + prctl(PR_SET_NAME, "oc-monitor"); + prctl(PR_SET_PDEATHSIG, SIGKILL); + + //first off, let's disable shutdown signals to us; we want all shutdown indicators to come from + // nodeos shutting us down + sigset_t set; + sigemptyset(&set); + sigaddset(&set, SIGHUP); + sigaddset(&set, SIGTERM); + sigaddset(&set, SIGPIPE); + sigaddset(&set, SIGINT); + sigaddset(&set, SIGQUIT); + sigprocmask(SIG_BLOCK, &set, nullptr); + + int socks[2]; //0: local trampoline socket, 1: the one we give to trampoline + socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, socks); + pid_t child = fork(); + if(child == 0) { + close(socks[0]); + run_compile_trampoline(socks[1]); + } + close(socks[1]); + + { + boost::asio::io_context ctx; + boost::asio::local::datagram_protocol::socket nodeos_socket(ctx); + nodeos_socket.assign(boost::asio::local::datagram_protocol(), nodeos_fd); + wrapped_fd trampoline_socket(socks[0]); + compile_monitor monitor(ctx, std::move(nodeos_socket), std::move(trampoline_socket)); + ctx.run(); + if(monitor._compile_sessions.size()) + std::cerr << "ERROR: EOS VM OC compiler monitor exiting with active sessions" << std::endl; + } + + _exit(0); +} + +struct compile_monitor_trampoline { + void start() { + //create communication socket; let's hold off on asio usage until all forks are done + int socks[2]; + socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, socks); + compile_manager_fd = socks[0]; + + compile_manager_pid = fork(); + if(compile_manager_pid == 0) { + close(socks[0]); + launch_compile_monitor(socks[1]); + } + close(socks[1]); + } + + pid_t compile_manager_pid = -1; + int compile_manager_fd = -1; +}; + +static compile_monitor_trampoline the_compile_monitor_trampoline; +extern "C" int __real_main(int, char*[]); +extern "C" int __wrap_main(int argc, char* argv[]) { + the_compile_monitor_trampoline.start(); + return __real_main(argc, argv); +} + +wrapped_fd get_connection_to_compile_monitor(int cache_fd) { + FC_ASSERT(the_compile_monitor_trampoline.compile_manager_pid >= 0, "EOS VM oop connection doesn't look active"); + + int socks[2]; //0: our socket to compile_manager_session, 1: socket we'll give to compile_maanger_session + socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, socks); + wrapped_fd socket_to_monitor_session(socks[0]); + wrapped_fd socket_to_hand_to_monitor_session(socks[1]); + + //we don't own cache_fd, so try to be extra careful not to accidentally close it: don't stick it in a wrapped_fd + // to hand off to write_message_with_fds even temporarily. make a copy of it. + int dup_of_cache_fd = dup(cache_fd); + FC_ASSERT(dup_of_cache_fd != -1, "failed to dup cache_fd"); + wrapped_fd dup_cache_fd(dup_of_cache_fd); + + std::vector fds_to_pass; + fds_to_pass.emplace_back(std::move(socket_to_hand_to_monitor_session)); + fds_to_pass.emplace_back(std::move(dup_cache_fd)); + write_message_with_fds(the_compile_monitor_trampoline.compile_manager_fd, initialize_message(), fds_to_pass); + + auto [success, message, fds] = read_message_with_fds(the_compile_monitor_trampoline.compile_manager_fd); + EOS_ASSERT(success, misc_exception, "failed to read response from monitor process"); + EOS_ASSERT(message.contains(), misc_exception, "unexpected response from monitor process"); + EOS_ASSERT(!message.get().error_message, misc_exception, "Error message from monitor process: ${e}", ("e", *message.get().error_message)); + return socket_to_monitor_session; +} + +}}} \ No newline at end of file diff --git a/libraries/chain/webassembly/eos-vm-oc/compile_trampoline.cpp b/libraries/chain/webassembly/eos-vm-oc/compile_trampoline.cpp new file mode 100644 index 00000000000..07e9ba6b086 --- /dev/null +++ b/libraries/chain/webassembly/eos-vm-oc/compile_trampoline.cpp @@ -0,0 +1,188 @@ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "IR/Module.h" +#include "IR/Validate.h" +#include "WASM/WASM.h" +#include "LLVMJIT.h" + +using namespace IR; + +namespace eosio { namespace chain { namespace eosvmoc { + +void run_compile(wrapped_fd&& response_sock, wrapped_fd&& wasm_code) noexcept { //noexcept; we'll just blow up if anything tries to cross this boundry + std::vector wasm = vector_for_memfd(wasm_code); + + //ideally we catch exceptions and sent them upstream as strings for easier reporting + + Module module; + Serialization::MemoryInputStream stream(wasm.data(), wasm.size()); + WASM::serialize(stream, module); + module.userSections.clear(); + wasm_injections::wasm_binary_injection injector(module); + injector.inject(); + + instantiated_code code = LLVMJIT::instantiateModule(module); + + code_compilation_result_message result_message; + + const std::map& function_to_offsets = code.function_offsets; + + if(module.startFunctionIndex == UINTPTR_MAX) + result_message.start = no_offset{}; + else if(module.startFunctionIndex < module.functions.imports.size()) { + const auto& f = module.functions.imports[module.startFunctionIndex]; + const intrinsic_entry& ie = get_intrinsic_map().at(f.moduleName + "." + f.exportName); + result_message.start = intrinsic_ordinal{ie.ordinal}; + } + else + result_message.start = code_offset{function_to_offsets.at(module.startFunctionIndex-module.functions.imports.size())}; + + for(const Export& exprt : module.exports) { + if(exprt.name == "apply") + result_message.apply_offset = function_to_offsets.at(exprt.index-module.functions.imports.size()); + } + + result_message.starting_memory_pages = -1; + if(module.memories.size()) + result_message.starting_memory_pages = module.memories.defs.at(0).type.size.min; + + std::vector prologue(memory::cb_offset); //getting the control block offset gets us as large as table+globals as possible + std::vector::iterator prologue_it = prologue.end(); + + //set up mutable globals + union global_union { + int64_t i64; + int32_t i32; + float f32; + double f64; + }; + + for(const GlobalDef& global : module.globals.defs) { + if(!global.type.isMutable) + continue; + prologue_it -= 8; + global_union* const u = (global_union* const)&*prologue_it; + + switch(global.initializer.type) { + case InitializerExpression::Type::i32_const: u->i32 = global.initializer.i32; break; + case InitializerExpression::Type::i64_const: u->i64 = global.initializer.i64; break; + case InitializerExpression::Type::f32_const: u->f32 = global.initializer.f32; break; + case InitializerExpression::Type::f64_const: u->f64 = global.initializer.f64; break; + default: break; //impossible + } + } + + struct table_entry { + uintptr_t type; + int64_t func; //>= 0 means offset to code in wasm; < 0 means intrinsic call at offset address + }; + + if(module.tables.size()) + prologue_it -= sizeof(table_entry) * module.tables.defs[0].type.size.min; + + for(const TableSegment& table_segment : module.tableSegments) { + struct table_entry* table_index_0 = (struct table_entry*)&*prologue_it; + + if(table_segment.baseOffset.i32 > module.tables.defs[0].type.size.min) + return; + + for(Uptr i = 0; i < table_segment.indices.size(); ++i) { + const Uptr function_index = table_segment.indices[i]; + const long int effective_table_index = table_segment.baseOffset.i32 + i; + + if(effective_table_index >= module.tables.defs[0].type.size.min) + return; + + if(function_index < module.functions.imports.size()) { + const auto& f = module.functions.imports[function_index]; + const intrinsic_entry& ie = get_intrinsic_map().at(f.moduleName + "." + f.exportName); + table_index_0[effective_table_index].func = ie.ordinal*-8; + table_index_0[effective_table_index].type = (uintptr_t)module.types[module.functions.imports[function_index].type.index]; + } + else { + table_index_0[effective_table_index].func = function_to_offsets.at(function_index - module.functions.imports.size()); + table_index_0[effective_table_index].type = (uintptr_t)module.types[module.functions.defs[function_index - module.functions.imports.size()].type.index]; + } + } + } + + //this is somewhat copy pasta from wasm_interface_private, with the asserts removed + std::vector initial_mem; + for(const DataSegment& data_segment : module.dataSegments) { + const U32 base_offset = data_segment.baseOffset.i32; + + if(base_offset + data_segment.data.size() > initial_mem.size()) + initial_mem.resize(base_offset + data_segment.data.size(), 0x00); + memcpy(initial_mem.data() + base_offset, data_segment.data.data(), data_segment.data.size()); + } + + result_message.initdata_prologue_size = prologue.end() - prologue_it; + std::vector initdata_prep; + std::move(prologue_it, prologue.end(), std::back_inserter(initdata_prep)); + std::move(initial_mem.begin(), initial_mem.end(), std::back_inserter(initdata_prep)); + + std::vector fds_to_send; + fds_to_send.emplace_back(memfd_for_bytearray(code.code)); + fds_to_send.emplace_back(memfd_for_bytearray(initdata_prep)); + write_message_with_fds(response_sock, result_message, fds_to_send); +} + +void run_compile_trampoline(int fd) { + prctl(PR_SET_NAME, "oc-trampoline"); + prctl(PR_SET_PDEATHSIG, SIGKILL); + + //squelching this for now, but it means we won't have ability to get compile metrics + struct sigaction act; + sigset_t set; + sigemptyset(&set); + act.sa_handler = SIG_IGN; + act.sa_mask = set; + act.sa_flags = SA_NOCLDWAIT; + act.sa_sigaction = nullptr; + sigaction(SIGCHLD, &act, nullptr); + + while(true) { + auto [success, message, fds] = read_message_with_fds(fd); + if(!success) + break; + + if(!message.contains() || fds.size() != 2) { + std::cerr << "EOS VM OC compile trampoline got unexpected message; ignoring" << std::endl; + continue; + } + + pid_t pid = fork(); + if(pid == 0) { + prctl(PR_SET_NAME, "oc-compile"); + prctl(PR_SET_PDEATHSIG, SIGKILL); + + struct rlimit cpu_limits = {20u, 20u}; + setrlimit(RLIMIT_CPU, &cpu_limits); + + struct rlimit vm_limits = {512u*1024u*1024u, 512u*1024u*1024u}; + setrlimit(RLIMIT_AS, &vm_limits); + + struct rlimit core_limits = {0u, 0u}; + setrlimit(RLIMIT_CORE, &core_limits); + + run_compile(std::move(fds[0]), std::move(fds[1])); + _exit(0); + } + else if(pid == -1) + std::cerr << "EOS VM OC compile trampoline failed to spawn compile task" << std::endl; + } + + _exit(0); +} + +}}} + + diff --git a/libraries/chain/webassembly/eos-vm-oc/executor.cpp b/libraries/chain/webassembly/eos-vm-oc/executor.cpp new file mode 100644 index 00000000000..1dad9744e09 --- /dev/null +++ b/libraries/chain/webassembly/eos-vm-oc/executor.cpp @@ -0,0 +1,230 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include + +#if defined(__has_feature) +#if __has_feature(shadow_call_stack) +#error EOS VM OC is not compatible with Clang ShadowCallStack +#endif +#endif + +extern "C" int arch_prctl(int code, unsigned long* addr); + +namespace eosio { namespace chain { namespace eosvmoc { + +static constexpr auto signal_sentinel = 0x4D56534F45534559ul; + +static void(*chained_handler)(int,siginfo_t*,void*); +static void segv_handler(int sig, siginfo_t* info, void* ctx) { + control_block* cb_in_main_segment; + + //a 0 GS value is an indicator an executor hasn't been active on this thread recently + uint64_t current_gs; + syscall(SYS_arch_prctl, ARCH_GET_GS, ¤t_gs); + if(current_gs == 0) + goto notus; + + cb_in_main_segment = reinterpret_cast(current_gs - memory::cb_offset); + + //as a double check that the control block pointer is what we expect, look for the magic + if(cb_in_main_segment->magic != signal_sentinel) + goto notus; + + //was wasm running? If not, this SEGV was not due to us + if(cb_in_main_segment->is_running == false) + goto notus; + + //was the segfault within code? + if((uintptr_t)info->si_addr >= cb_in_main_segment->execution_thread_code_start && + (uintptr_t)info->si_addr < cb_in_main_segment->execution_thread_code_start+cb_in_main_segment->execution_thread_code_length) + siglongjmp(*cb_in_main_segment->jmp, EOSVMOC_EXIT_CHECKTIME_FAIL); + + //was the segfault within data? + if((uintptr_t)info->si_addr >= cb_in_main_segment->execution_thread_memory_start && + (uintptr_t)info->si_addr < cb_in_main_segment->execution_thread_memory_start+cb_in_main_segment->execution_thread_memory_length) + siglongjmp(*cb_in_main_segment->jmp, EOSVMOC_EXIT_SEGV); + +notus: + if(chained_handler) { + chained_handler(sig, info, ctx); + return; + } + ::signal(sig, SIG_DFL); + ::raise(sig); + __builtin_unreachable(); +} + +static intrinsic grow_memory_intrinsic EOSVMOC_INTRINSIC_INIT_PRIORITY("eosvmoc_internal.grow_memory", IR::FunctionType::get(IR::ResultType::i32,{IR::ValueType::i32,IR::ValueType::i32}), + (void*)&eos_vm_oc_grow_memory, + boost::hana::index_if(intrinsic_table, ::boost::hana::equal.to(BOOST_HANA_STRING("eosvmoc_internal.grow_memory"))).value() +); + +//This is effectively overriding the eosio_exit intrinsic in wasm_interface +static void eosio_exit(int32_t code) { + siglongjmp(*eos_vm_oc_get_jmp_buf(), EOSVMOC_EXIT_CLEAN_EXIT); + __builtin_unreachable(); +} +static intrinsic eosio_exit_intrinsic("env.eosio_exit", IR::FunctionType::get(IR::ResultType::none,{IR::ValueType::i32}), (void*)&eosio_exit, + boost::hana::index_if(intrinsic_table, ::boost::hana::equal.to(BOOST_HANA_STRING("env.eosio_exit"))).value() +); + +static void throw_internal_exception(const char* const s) { + *reinterpret_cast(eos_vm_oc_get_exception_ptr()) = std::make_exception_ptr(wasm_execution_error(FC_LOG_MESSAGE(error, s))); + siglongjmp(*eos_vm_oc_get_jmp_buf(), EOSVMOC_EXIT_EXCEPTION); + __builtin_unreachable(); +} + +#define DEFINE_EOSVMOC_TRAP_INTRINSIC(module,name) \ + void name(); \ + static intrinsic name##Function EOSVMOC_INTRINSIC_INIT_PRIORITY(#module "." #name,IR::FunctionType::get(),(void*)&name, \ + boost::hana::index_if(intrinsic_table, ::boost::hana::equal.to(BOOST_HANA_STRING(#module "." #name))).value() \ + ); \ + void name() + +DEFINE_EOSVMOC_TRAP_INTRINSIC(eosvmoc_internal,depth_assert) { + throw_internal_exception("Exceeded call depth maximum"); +} + +DEFINE_EOSVMOC_TRAP_INTRINSIC(eosvmoc_internal,div0_or_overflow) { + throw_internal_exception("Division by 0 or integer overflow trapped"); +} + +DEFINE_EOSVMOC_TRAP_INTRINSIC(eosvmoc_internal,indirect_call_mismatch) { + throw_internal_exception("Indirect call function type mismatch"); +} + +DEFINE_EOSVMOC_TRAP_INTRINSIC(eosvmoc_internal,indirect_call_oob) { + throw_internal_exception("Indirect call index out of bounds"); +} + +DEFINE_EOSVMOC_TRAP_INTRINSIC(eosvmoc_internal,unreachable) { + throw_internal_exception("Unreachable reached"); +} + +struct executor_signal_init { + executor_signal_init() { + struct sigaction sig_action, old_sig_action; + sig_action.sa_sigaction = segv_handler; + sigemptyset(&sig_action.sa_mask); + sig_action.sa_flags = SA_SIGINFO | SA_NODEFER; + sigaction(SIGSEGV, &sig_action, &old_sig_action); + if(old_sig_action.sa_flags & SA_SIGINFO) + chained_handler = old_sig_action.sa_sigaction; + else if(old_sig_action.sa_handler != SIG_IGN && old_sig_action.sa_handler != SIG_DFL) + chained_handler = (void (*)(int,siginfo_t*,void*))old_sig_action.sa_handler; + } +}; + +executor::executor(const code_cache_base& cc) { + //if we're the first executor created, go setup the signal handling. For now we'll just leave this attached forever + static executor_signal_init the_executor_signal_init; + + uint64_t current_gs; + if(arch_prctl(ARCH_GET_GS, ¤t_gs) || current_gs) + wlog("x86_64 GS register is not set as expected. EOS VM OC may not run correctly on this platform"); + + struct stat s; + FC_ASSERT(fstat(cc.fd(), &s) == 0, "executor failed to get code cache size"); + code_mapping = (uint8_t*)mmap(nullptr, s.st_size, PROT_EXEC|PROT_READ, MAP_SHARED, cc.fd(), 0); + FC_ASSERT(code_mapping != MAP_FAILED, "failed to map code cache in to executor"); + code_mapping_size = s.st_size; + mapping_is_executable = true; +} + +void executor::execute(const code_descriptor& code, const memory& mem, apply_context& context) { + if(mapping_is_executable == false) { + mprotect(code_mapping, code_mapping_size, PROT_EXEC|PROT_READ); + mapping_is_executable = true; + } + + //prepare initial memory, mutable globals, and table data + if(code.starting_memory_pages > 0 ) { + arch_prctl(ARCH_SET_GS, (unsigned long*)(mem.zero_page_memory_base()+code.starting_memory_pages*memory::stride)); + memset(mem.full_page_memory_base(), 0, 64u*1024u*code.starting_memory_pages); + } + else + arch_prctl(ARCH_SET_GS, (unsigned long*)mem.zero_page_memory_base()); + memcpy(mem.full_page_memory_base() - code.initdata_prologue_size, code_mapping + code.initdata_begin, code.initdata_size); + + control_block* const cb = mem.get_control_block(); + cb->magic = signal_sentinel; + cb->execution_thread_code_start = (uintptr_t)code_mapping; + cb->execution_thread_code_length = code_mapping_size; + cb->execution_thread_memory_start = (uintptr_t)mem.start_of_memory_slices(); + cb->execution_thread_memory_length = mem.size_of_memory_slice_mapping(); + cb->ctx = &context; + executors_exception_ptr = nullptr; + cb->eptr = &executors_exception_ptr; + cb->current_call_depth_remaining = eosio::chain::wasm_constraints::maximum_call_depth+2; + cb->current_linear_memory_pages = code.starting_memory_pages; + cb->first_invalid_memory_address = code.starting_memory_pages*64*1024; + cb->full_linear_memory_start = (char*)mem.full_page_memory_base(); + cb->jmp = &executors_sigjmp_buf; + cb->bounce_buffers = &executors_bounce_buffers; + cb->running_code_base = (uintptr_t)(code_mapping + code.code_begin); + cb->is_running = true; + + context.trx_context.transaction_timer.set_expiration_callback([](void* user) { + executor* self = (executor*)user; + syscall(SYS_mprotect, self->code_mapping, self->code_mapping_size, PROT_NONE); + self->mapping_is_executable = false; + }, this); + context.trx_context.checktime(); //catch any expiration that might have occurred before setting up callback + + auto cleanup = fc::make_scoped_exit([cb, &tt=context.trx_context.transaction_timer](){ + cb->is_running = false; + cb->bounce_buffers->clear(); + tt.set_expiration_callback(nullptr, nullptr); + }); + + void(*apply_func)(uint64_t, uint64_t, uint64_t) = (void(*)(uint64_t, uint64_t, uint64_t))(cb->running_code_base + code.apply_offset); + + switch(sigsetjmp(*cb->jmp, 0)) { + case 0: + code.start.visit(overloaded { + [&](const no_offset&) {}, + [&](const intrinsic_ordinal& i) { + void(*start_func)() = (void(*)())(*(uintptr_t*)((uintptr_t)mem.zero_page_memory_base() - memory::first_intrinsic_offset - i.ordinal*8)); + start_func(); + }, + [&](const code_offset& offs) { + void(*start_func)() = (void(*)())(cb->running_code_base + offs.offset); + start_func(); + } + }); + apply_func(context.get_receiver().to_uint64_t(), context.get_action().account.to_uint64_t(), context.get_action().name.to_uint64_t()); + break; + //case 1: clean eosio_exit + case EOSVMOC_EXIT_CHECKTIME_FAIL: + context.trx_context.checktime(); + break; + case EOSVMOC_EXIT_SEGV: + EOS_ASSERT(false, wasm_execution_error, "access violation"); + break; + case EOSVMOC_EXIT_EXCEPTION: //exception + std::rethrow_exception(*cb->eptr); + break; + } +} + +executor::~executor() { + arch_prctl(ARCH_SET_GS, nullptr); +} + +}}} diff --git a/libraries/chain/webassembly/eos-vm-oc/gs_seg_helpers.c b/libraries/chain/webassembly/eos-vm-oc/gs_seg_helpers.c new file mode 100644 index 00000000000..db4f1014ec6 --- /dev/null +++ b/libraries/chain/webassembly/eos-vm-oc/gs_seg_helpers.c @@ -0,0 +1,46 @@ +#include + +#include +#include + +int arch_prctl(int code, unsigned long* addr); + +#define EOSVMOC_MEMORY_PTR_cb_ptr GS_PTR struct eos_vm_oc_control_block* const cb_ptr = ((GS_PTR struct eos_vm_oc_control_block* const)(EOS_VM_OC_CONTROL_BLOCK_OFFSET)); + +int32_t eos_vm_oc_grow_memory(int32_t grow, int32_t max) { + EOSVMOC_MEMORY_PTR_cb_ptr; + uint64_t previous_page_count = cb_ptr->current_linear_memory_pages; + int32_t grow_amount = grow; + uint64_t max_pages = max; + if(grow == 0) + return (int32_t)cb_ptr->current_linear_memory_pages; + if(previous_page_count + grow_amount > max_pages) + return (int32_t)-1; + + uint64_t current_gs; + arch_prctl(ARCH_GET_GS, ¤t_gs); + current_gs += grow_amount * EOS_VM_OC_MEMORY_STRIDE; + arch_prctl(ARCH_SET_GS, (unsigned long*)current_gs); + cb_ptr->current_linear_memory_pages += grow_amount; + cb_ptr->first_invalid_memory_address += grow_amount*64*1024; + + if(grow_amount > 0) + memset(cb_ptr->full_linear_memory_start + previous_page_count*64u*1024u, 0, grow_amount*64u*1024u); + + return (int32_t)previous_page_count; +} + +sigjmp_buf* eos_vm_oc_get_jmp_buf() { + EOSVMOC_MEMORY_PTR_cb_ptr; + return cb_ptr->jmp; +} + +void* eos_vm_oc_get_exception_ptr() { + EOSVMOC_MEMORY_PTR_cb_ptr; + return cb_ptr->eptr; +} + +void* eos_vm_oc_get_bounce_buffer_list() { + EOSVMOC_MEMORY_PTR_cb_ptr; + return cb_ptr->bounce_buffers; +} \ No newline at end of file diff --git a/libraries/chain/webassembly/eos-vm-oc/intrinsic.cpp b/libraries/chain/webassembly/eos-vm-oc/intrinsic.cpp new file mode 100644 index 00000000000..f4e15780412 --- /dev/null +++ b/libraries/chain/webassembly/eos-vm-oc/intrinsic.cpp @@ -0,0 +1,19 @@ +#include + +namespace eosio { namespace chain { namespace eosvmoc { + +static intrinsic_map_t& the_intrinsic_map() { + static intrinsic_map_t intrinsic_map; + return intrinsic_map; +} + +const intrinsic_map_t& get_intrinsic_map() { + return the_intrinsic_map(); +} + +intrinsic::intrinsic(const char* n, const IR::FunctionType* t, void* f, size_t o) { + the_intrinsic_map().erase(n); + the_intrinsic_map().emplace(n, intrinsic_entry{t, f, o}); +} + +}}} \ No newline at end of file diff --git a/libraries/chain/webassembly/eos-vm-oc/ipc_helpers.cpp b/libraries/chain/webassembly/eos-vm-oc/ipc_helpers.cpp new file mode 100644 index 00000000000..cc694240957 --- /dev/null +++ b/libraries/chain/webassembly/eos-vm-oc/ipc_helpers.cpp @@ -0,0 +1,135 @@ +#include +#include + +namespace eosio { namespace chain { namespace eosvmoc { + +static constexpr size_t max_message_size = 8192; +static constexpr size_t max_num_fds = 4; + +std::tuple> read_message_with_fds(boost::asio::local::datagram_protocol::socket& s) { + return read_message_with_fds(s.native_handle()); +} + +std::tuple> read_message_with_fds(int fd) { + char buff[max_message_size]; + + struct msghdr msg = {}; + struct cmsghdr* cmsg; + + eosvmoc_message message; + std::vector fds; + + struct iovec io = { + .iov_base = buff, + .iov_len = sizeof(buff) + }; + union { + char buf[CMSG_SPACE(max_num_fds * sizeof(int))]; + struct cmsghdr align; + } u; + + msg.msg_iov = &io; + msg.msg_iovlen = 1; + msg.msg_control = u.buf; + msg.msg_controllen = sizeof(u.buf); + + int red; + do { + red = recvmsg(fd, &msg, 0); + } while(red == -1 && errno == EINTR); + if(red < 1 || red >= sizeof(buff)) + return {false, message, std::move(fds)}; + + try { + fc::datastream ds(buff, red); + fc::raw::unpack(ds, message); + } + catch(...) { + return {false, message, std::move(fds)}; + } + + if(msg.msg_controllen) { + cmsg = CMSG_FIRSTHDR(&msg); + unsigned num_of_fds = (cmsg->cmsg_len - CMSG_LEN(0))/sizeof(int); + if(num_of_fds > max_num_fds) + return {false, message, std::move(fds)}; + int* fd_ptr = (int*)CMSG_DATA(cmsg); + for(unsigned i = 0; i < num_of_fds; ++i) + fds.push_back(*fd_ptr++); + } + + return {true, message, std::move(fds)}; +} + +bool write_message_with_fds(boost::asio::local::datagram_protocol::socket& s, const eosvmoc_message& message, const std::vector& fds) { + return write_message_with_fds(s.native_handle(), message, fds); +} + +bool write_message_with_fds(int fd_to_send_to, const eosvmoc_message& message, const std::vector& fds) { + struct msghdr msg = {}; + struct cmsghdr* cmsg; + + size_t sz = fc::raw::pack_size(message); + if(sz > max_message_size) + return false; + char buff[max_message_size]; + try { + fc::datastream ds(buff, max_message_size); + fc::raw::pack(ds, message); + } + catch(...) { + return false; + } + + if(fds.size() > max_num_fds) + return false; + + struct iovec io = { + .iov_base = buff, + .iov_len = sz + }; + union { + char buf[CMSG_SPACE(max_num_fds * sizeof(int))]; + struct cmsghdr align; + } u; + + msg.msg_iov = &io; + msg.msg_iovlen = 1; + if(fds.size()) { + msg.msg_control = u.buf; + msg.msg_controllen = sizeof(u.buf); + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + cmsg->cmsg_len = CMSG_LEN(sizeof(int) * fds.size()); + unsigned char* p = CMSG_DATA(cmsg); + for(const wrapped_fd& fd : fds) { + int thisfd = fd; + memcpy(p, &thisfd, sizeof(thisfd)); + p += sizeof(thisfd); + } + } + + int wrote; + do { + wrote = sendmsg(fd_to_send_to, &msg, 0); + } while(wrote == -1 && errno == EINTR); + + return wrote >= 0; +} + +std::vector vector_for_memfd(const wrapped_fd& memfd) { + struct stat st; + FC_ASSERT(fstat(memfd, &st) == 0, "failed to get memfd size"); + + if(st.st_size == 0) + return std::vector(); + + uint8_t* p = (uint8_t*)mmap(nullptr, st.st_size, PROT_READ|PROT_WRITE, MAP_SHARED, memfd, 0); + FC_ASSERT(p != MAP_FAILED, "failed to map memfd"); + std::vector ret(p, p+st.st_size); + munmap(p, st.st_size); + return ret; +} + +}}} diff --git a/libraries/chain/webassembly/eos-vm-oc/llvmWARshim.cpp b/libraries/chain/webassembly/eos-vm-oc/llvmWARshim.cpp new file mode 100644 index 00000000000..d0b99beb996 --- /dev/null +++ b/libraries/chain/webassembly/eos-vm-oc/llvmWARshim.cpp @@ -0,0 +1,14 @@ +#include +namespace eosio { namespace chain { namespace eosvmoc { +namespace LLVMJIT { + +llvm::Value* CreateInBoundsGEPWAR(llvm::IRBuilder<>& irBuilder, llvm::Value* Ptr, llvm::Value* v1, llvm::Value* v2) { + if(!v2) + return irBuilder.CreateInBoundsGEP(Ptr, v1); + else + return irBuilder.CreateInBoundsGEP(Ptr, {v1, v2}); +} + +} + +}}} \ No newline at end of file diff --git a/libraries/chain/webassembly/eos-vm-oc/llvmWARshim.llvmwar b/libraries/chain/webassembly/eos-vm-oc/llvmWARshim.llvmwar new file mode 120000 index 00000000000..1e2596579da --- /dev/null +++ b/libraries/chain/webassembly/eos-vm-oc/llvmWARshim.llvmwar @@ -0,0 +1 @@ +llvmWARshim.cpp \ No newline at end of file diff --git a/libraries/chain/webassembly/eos-vm-oc/memory.cpp b/libraries/chain/webassembly/eos-vm-oc/memory.cpp new file mode 100644 index 00000000000..6bf56f01d06 --- /dev/null +++ b/libraries/chain/webassembly/eos-vm-oc/memory.cpp @@ -0,0 +1,47 @@ +#include +#include + +#include + +#include +#include +#include +#include + +namespace eosio { namespace chain { namespace eosvmoc { + +memory::memory() { + int fd = syscall(SYS_memfd_create, "eosvmoc_mem", MFD_CLOEXEC); + FC_ASSERT(fd >= 0, "Failed to create memory memfd"); + auto cleanup_fd = fc::make_scoped_exit([&fd](){close(fd);}); + int ret = ftruncate(fd, wasm_memory_size+memory_prologue_size); + FC_ASSERT(!ret, "Failed to grow memory memfd"); + + mapsize = total_memory_per_slice*number_slices; + mapbase = (uint8_t*)mmap(nullptr, mapsize, PROT_NONE, MAP_PRIVATE|MAP_ANON, 0, 0); + FC_ASSERT(mapbase != MAP_FAILED, "Failed to mmap memory"); + + uint8_t* next_slice = mapbase; + uint8_t* last; + + for(unsigned int p = 0; p < number_slices; ++p) { + last = (uint8_t*)mmap(next_slice, memory_prologue_size+64u*1024u*p, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_FIXED, fd, 0); + FC_ASSERT(last != MAP_FAILED, "Failed to mmap memory"); + next_slice += total_memory_per_slice; + } + + zeropage_base = mapbase + memory_prologue_size; + fullpage_base = last + memory_prologue_size; + + //layout the intrinsic jump table + uintptr_t* const intrinsic_jump_table = reinterpret_cast(zeropage_base - first_intrinsic_offset); + const intrinsic_map_t& intrinsics = get_intrinsic_map(); + for(const auto& intrinsic : intrinsics) + intrinsic_jump_table[-intrinsic.second.ordinal] = (uintptr_t)intrinsic.second.function_ptr; +} + +memory::~memory() { + munmap(mapbase, mapsize); +} + +}}} \ No newline at end of file diff --git a/libraries/chain/webassembly/eos-vm.cpp b/libraries/chain/webassembly/eos-vm.cpp new file mode 100644 index 00000000000..99ca29ed72e --- /dev/null +++ b/libraries/chain/webassembly/eos-vm.cpp @@ -0,0 +1,114 @@ +#include +#include +#include +#include +//eos-vm includes +#include + +namespace eosio { namespace chain { namespace webassembly { namespace eos_vm_runtime { + +using namespace eosio::vm; + +namespace wasm_constraints = eosio::chain::wasm_constraints; + +namespace { + + struct checktime_watchdog { + checktime_watchdog(transaction_checktime_timer& timer) : _timer(timer) {} + template + struct guard { + guard(transaction_checktime_timer& timer, F&& func) + : _timer(timer), _func(static_cast(func)) { + _timer.set_expiration_callback(&callback, this); + if(_timer.expired) { + _func(); // it's harmless if _func is invoked twice + } + } + ~guard() { + _timer.set_expiration_callback(nullptr, nullptr); + } + static void callback(void* data) { + guard* self = static_cast(data); + self->_func(); + } + transaction_checktime_timer& _timer; + F _func; + }; + template + guard scoped_run(F&& func) { + return guard{_timer, static_cast(func)}; + } + transaction_checktime_timer& _timer; + }; + +} + +template +class eos_vm_instantiated_module : public wasm_instantiated_module_interface { + using backend_t = backend; + public: + + eos_vm_instantiated_module(eos_vm_runtime* runtime, std::unique_ptr mod) : + _runtime(runtime), + _instantiated_module(std::move(mod)) {} + + void apply(apply_context& context) override { + _instantiated_module->set_wasm_allocator(&context.control.get_wasm_allocator()); + _runtime->_bkend = _instantiated_module.get(); + auto fn = [&]() { + _runtime->_bkend->initialize(&context); + const auto& res = _runtime->_bkend->call( + &context, "env", "apply", context.get_receiver().to_uint64_t(), + context.get_action().account.to_uint64_t(), + context.get_action().name.to_uint64_t()); + }; + try { + checktime_watchdog wd(context.trx_context.transaction_timer); + _runtime->_bkend->timed_run(wd, fn); + } catch(eosio::vm::timeout_exception&) { + context.trx_context.checktime(); + } catch(eosio::vm::wasm_memory_exception& e) { + FC_THROW_EXCEPTION(wasm_execution_error, "access violation"); + } catch(eosio::vm::exception& e) { + // FIXME: Do better translation + FC_THROW_EXCEPTION(wasm_execution_error, "something went wrong..."); + } + _runtime->_bkend = nullptr; + } + + private: + eos_vm_runtime* _runtime; + std::unique_ptr _instantiated_module; +}; + +template +eos_vm_runtime::eos_vm_runtime() {} + +template +void eos_vm_runtime::immediately_exit_currently_running_module() { + throw wasm_exit{}; +} + +template +bool eos_vm_runtime::inject_module(IR::Module& module) { + return false; +} + +template +std::unique_ptr eos_vm_runtime::instantiate_module(const char* code_bytes, size_t code_size, std::vector, + const digest_type&, const uint8_t&, const uint8_t&) { + using backend_t = backend; + try { + wasm_code_ptr code((uint8_t*)code_bytes, code_size); + std::unique_ptr bkend = std::make_unique(code, code_size); + registered_host_functions::resolve(bkend->get_module()); + return std::make_unique>(this, std::move(bkend)); + } catch(eosio::vm::exception& e) { + FC_THROW_EXCEPTION(wasm_execution_error, "Error building eos-vm interp: ${e}", ("e", e.what())); + } +} + +template class eos_vm_runtime; +template class eos_vm_runtime; + +}}}} diff --git a/libraries/chain/webassembly/wabt.cpp b/libraries/chain/webassembly/wabt.cpp index a23919e0ec6..853960d312b 100644 --- a/libraries/chain/webassembly/wabt.cpp +++ b/libraries/chain/webassembly/wabt.cpp @@ -1,6 +1,7 @@ #include #include #include +#include //wabt includes #include @@ -46,13 +47,13 @@ class wabt_instantiated_module : public wasm_instantiated_module_interface { Memory* memory = this_run_vars.memory = _env->GetMemory(0); memory->page_limits = _initial_memory_configuration; memory->data.resize(_initial_memory_configuration.initial * WABT_PAGE_SIZE); - memset(memory->data.data(), 0, memory->data.size()); memcpy(memory->data.data(), _initial_memory.data(), _initial_memory.size()); + memset(memory->data.data() + _initial_memory.size(), 0, memory->data.size() - _initial_memory.size()); } - _params[0].set_i64(uint64_t(context.get_receiver())); - _params[1].set_i64(uint64_t(context.get_action().account)); - _params[2].set_i64(uint64_t(context.get_action().name)); + _params[0].set_i64(context.get_receiver().to_uint64_t()); + _params[1].set_i64(context.get_action().account.to_uint64_t()); + _params[2].set_i64(context.get_action().name.to_uint64_t()); ExecResult res = _executor.RunStartFunction(_instatiated_module); EOS_ASSERT( res.result == interp::Result::Ok, wasm_execution_error, "wabt start function failure (${s})", ("s", ResultToString(res.result)) ); @@ -73,7 +74,14 @@ class wabt_instantiated_module : public wasm_instantiated_module_interface { wabt_runtime::wabt_runtime() {} -std::unique_ptr wabt_runtime::instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) { +bool wabt_runtime::inject_module(IR::Module& module) { + wasm_injections::wasm_binary_injection injector(module); + injector.inject(); + return true; +} + +std::unique_ptr wabt_runtime::instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory, + const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version) { std::unique_ptr env = std::make_unique(); for(auto it = intrinsic_registrator::get_map().begin() ; it != intrinsic_registrator::get_map().end(); ++it) { interp::HostModule* host_module = env->AppendHostModule(it->first); diff --git a/libraries/chain/webassembly/wavm.cpp b/libraries/chain/webassembly/wavm.cpp deleted file mode 100644 index e006d237c5c..00000000000 --- a/libraries/chain/webassembly/wavm.cpp +++ /dev/null @@ -1,163 +0,0 @@ -#include -#include -#include -#include -#include - -#include "IR/Module.h" -#include "Platform/Platform.h" -#include "WAST/WAST.h" -#include "IR/Operators.h" -#include "IR/Validate.h" -#include "Runtime/Linker.h" -#include "Runtime/Intrinsics.h" - -#include -#include - -using namespace IR; -using namespace Runtime; - -namespace eosio { namespace chain { namespace webassembly { namespace wavm { - -running_instance_context the_running_instance_context; - -namespace detail { -struct wavm_runtime_initializer { - wavm_runtime_initializer() { - Runtime::init(); - } -}; - -using live_module_ref = std::list::iterator; - -struct wavm_live_modules { - live_module_ref add_live_module(ModuleInstance* module_instance) { - return live_modules.insert(live_modules.begin(), asObject(module_instance)); - } - - void remove_live_module(live_module_ref it) { - live_modules.erase(it); - run_wavm_garbage_collection(); - } - - void run_wavm_garbage_collection() { - //need to pass in a mutable list of root objects we want the garbage collector to retain - std::vector root; - std::copy(live_modules.begin(), live_modules.end(), std::back_inserter(root)); - Runtime::freeUnreferencedObjects(std::move(root)); - } - - std::list live_modules; -}; - -static wavm_live_modules the_wavm_live_modules; - -} - -class wavm_instantiated_module : public wasm_instantiated_module_interface { - public: - wavm_instantiated_module(ModuleInstance* instance, std::unique_ptr module, std::vector initial_mem) : - _initial_memory(initial_mem), - _instance(instance), - _module_ref(detail::the_wavm_live_modules.add_live_module(instance)) - { - //The memory instance is reused across all wavm_instantiated_modules, but for wasm instances - // that didn't declare "memory", getDefaultMemory() won't see it. It would also be possible - // to say something like if(module->memories.size()) here I believe - if(getDefaultMemory(_instance)) - _initial_memory_config = module->memories.defs.at(0).type; - } - - ~wavm_instantiated_module() { - detail::the_wavm_live_modules.remove_live_module(_module_ref); - } - - void apply(apply_context& context) override { - vector args = {Value(uint64_t(context.get_receiver())), - Value(uint64_t(context.get_action().account)), - Value(uint64_t(context.get_action().name))}; - - call("apply", args, context); - } - - private: - void call(const string &entry_point, const vector &args, apply_context &context) { - try { - FunctionInstance* call = asFunctionNullable(getInstanceExport(_instance,entry_point)); - if( !call ) - return; - - EOS_ASSERT( getFunctionType(call)->parameters.size() == args.size(), wasm_exception, "" ); - - //The memory instance is reused across all wavm_instantiated_modules, but for wasm instances - // that didn't declare "memory", getDefaultMemory() won't see it - MemoryInstance* default_mem = getDefaultMemory(_instance); - if(default_mem) { - //reset memory resizes the sandbox'ed memory to the module's init memory size and then - // (effectively) memzeros it all - resetMemory(default_mem, _initial_memory_config); - - char* memstart = &memoryRef(getDefaultMemory(_instance), 0); - memcpy(memstart, _initial_memory.data(), _initial_memory.size()); - } - - the_running_instance_context.memory = default_mem; - the_running_instance_context.apply_ctx = &context; - - resetGlobalInstances(_instance); - runInstanceStartFunc(_instance); - Runtime::invokeFunction(call,args); - } catch( const wasm_exit& e ) { - } catch( const Runtime::Exception& e ) { - FC_THROW_EXCEPTION(wasm_execution_error, - "cause: ${cause}\n${callstack}", - ("cause", string(describeExceptionCause(e.cause))) - ("callstack", e.callStack)); - } FC_CAPTURE_AND_RETHROW() - } - - - std::vector _initial_memory; - //naked pointer because ModuleInstance is opaque - //_instance is deleted via WAVM's object garbage collection when wavm_rutime is deleted - ModuleInstance* _instance; - detail::live_module_ref _module_ref; - MemoryType _initial_memory_config; -}; - -wavm_runtime::wavm_runtime() { - static detail::wavm_runtime_initializer the_wavm_runtime_initializer; -} - -wavm_runtime::~wavm_runtime() { -} - -std::unique_ptr wavm_runtime::instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) { - std::unique_ptr module = std::make_unique(); - try { - Serialization::MemoryInputStream stream((const U8*)code_bytes, code_size); - WASM::serialize(stream, *module); - } catch(const Serialization::FatalSerializationException& e) { - EOS_ASSERT(false, wasm_serialization_error, e.message.c_str()); - } catch(const IR::ValidationException& e) { - EOS_ASSERT(false, wasm_serialization_error, e.message.c_str()); - } - - eosio::chain::webassembly::common::root_resolver resolver; - LinkResult link_result = linkModule(*module, resolver); - ModuleInstance *instance = instantiateModule(*module, std::move(link_result.resolvedImports)); - EOS_ASSERT(instance != nullptr, wasm_exception, "Fail to Instantiate WAVM Module"); - - return std::make_unique(instance, std::move(module), initial_memory); -} - -void wavm_runtime::immediately_exit_currently_running_module() { -#ifdef _WIN32 - throw wasm_exit(); -#else - Platform::immediately_exit(nullptr); -#endif -} - -}}}} diff --git a/libraries/chain/whitelisted_intrinsics.cpp b/libraries/chain/whitelisted_intrinsics.cpp index 6a4756bf502..cd3974d1ce5 100644 --- a/libraries/chain/whitelisted_intrinsics.cpp +++ b/libraries/chain/whitelisted_intrinsics.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include diff --git a/libraries/chainbase b/libraries/chainbase index 759ca20e908..d6632fcc1e7 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 759ca20e908b60e54eed463d0b2889f6e2108e71 +Subproject commit d6632fcc1e71b0ee2022621059a432ec216b6661 diff --git a/libraries/eos-vm b/libraries/eos-vm new file mode 160000 index 00000000000..c5afecd4705 --- /dev/null +++ b/libraries/eos-vm @@ -0,0 +1 @@ +Subproject commit c5afecd4705317e28d97eb0d950dc50ea1bb58c7 diff --git a/libraries/fc b/libraries/fc index 25a24852325..e95a03eed17 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 25a24852325f9f4c8510e671d351af007fae3713 +Subproject commit e95a03eed1796a3054e02e67f1171f8c9fdb57e5 diff --git a/libraries/softfloat b/libraries/softfloat index 138dda49cea..94dac6e56c9 160000 --- a/libraries/softfloat +++ b/libraries/softfloat @@ -1 +1 @@ -Subproject commit 138dda49cead84a93d052a241807694f5d1b0750 +Subproject commit 94dac6e56c980a99e3e38bdff89a94600de0066d diff --git a/libraries/testing/CMakeLists.txt b/libraries/testing/CMakeLists.txt index b03c85760d1..0c71359ec44 100644 --- a/libraries/testing/CMakeLists.txt +++ b/libraries/testing/CMakeLists.txt @@ -7,7 +7,7 @@ add_library( eosio_testing ${HEADERS} ) -target_link_libraries( eosio_testing eosio_chain fc chainbase Logging IR WAST WASM Runtime ) +target_link_libraries( eosio_testing eosio_chain fc chainbase Logging IR WAST WASM Runtime Boost::unit_test_framework ) target_include_directories( eosio_testing PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/../wasm-jit/Include" diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 9875ebb4618..6df94c8862b 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -56,7 +57,7 @@ namespace boost { namespace test_tools { namespace tt_detail { } } } namespace eosio { namespace testing { - enum class setup_policy { + enum class setup_policy { none, old_bios_only, preactivate_feature_only, @@ -67,6 +68,10 @@ namespace eosio { namespace testing { std::vector read_wasm( const char* fn ); std::vector read_abi( const char* fn ); std::string read_wast( const char* fn ); + + std::string read_binary_snapshot( const char* fn ); + fc::variant read_json_snapshot( const char* fn ); + using namespace eosio::chain; fc::variant_object filter_fields(const fc::variant_object& filter, const fc::variant_object& value); @@ -79,6 +84,59 @@ namespace eosio { namespace testing { protocol_feature_set make_protocol_feature_set(const subjective_restriction_map& custom_subjective_restrictions = {}); + namespace mock { + using namespace fc::crypto; + struct webauthn_private_key { + explicit webauthn_private_key(r1::private_key&& priv_key) + :priv_key(std::move(priv_key)) + { + } + + webauthn_private_key(webauthn_private_key&&) = default; + webauthn_private_key(const webauthn_private_key&) = default; + + static auto regenerate(const fc::sha256& secret) { + return webauthn_private_key(r1::private_key::regenerate(secret)); + } + + public_key get_public_key(webauthn::public_key::user_presence_t presence = webauthn::public_key::user_presence_t::USER_PRESENCE_NONE) const { + return public_key_type(webauthn::public_key(priv_key.get_public_key().serialize(), presence, _origin)); + } + + signature sign( const sha256& digest, bool = true) const { + auto json = std::string("{\"origin\":\"https://") + + _origin + + "\",\"type\":\"webauthn.get\",\"challenge\":\"" + + fc::base64url_encode(digest.data(), digest.data_size()) + + "\"}"; + std::vector auth_data(37); + memcpy(auth_data.data(), _origin_hash.data(), sizeof(_origin_hash)); + + auto client_data_hash = fc::sha256::hash(json); + fc::sha256::encoder e; + e.write((char*)auth_data.data(), auth_data.size()); + e.write(client_data_hash.data(), client_data_hash.data_size()); + auto sig = priv_key.sign_compact(e.result()); + + char serialized_sig[4096]; + datastream sig_ds(serialized_sig, sizeof(serialized_sig)); + fc::raw::pack(sig_ds, (uint8_t)signature::storage_type::position()); + fc::raw::pack(sig_ds, sig); + fc::raw::pack(sig_ds, auth_data); + fc::raw::pack(sig_ds, json); + sig_ds.seekp(0); + + signature ret; + fc::raw::unpack(sig_ds, ret); + return ret; + } + + r1::private_key priv_key; + static const std::string _origin; + static const fc::sha256 _origin_hash; + }; + } + /** * @class tester * @brief provides utility function to simplify the creation of unit tests @@ -95,13 +153,23 @@ namespace eosio { namespace testing { virtual ~base_tester() {}; void init(const setup_policy policy = setup_policy::full, db_read_mode read_mode = db_read_mode::SPECULATIVE); - void init(controller::config config, const snapshot_reader_ptr& snapshot = nullptr); - void init(controller::config config, protocol_feature_set&& pfs, const snapshot_reader_ptr& snapshot = nullptr); + void init(controller::config config, const snapshot_reader_ptr& snapshot); + void init(controller::config config, const genesis_state& genesis); + void init(controller::config config); + void init(controller::config config, protocol_feature_set&& pfs, const snapshot_reader_ptr& snapshot); + void init(controller::config config, protocol_feature_set&& pfs, const genesis_state& genesis); + void init(controller::config config, protocol_feature_set&& pfs); void execute_setup_policy(const setup_policy policy); void close(); - void open( protocol_feature_set&& pfs, const snapshot_reader_ptr& snapshot); - void open( const snapshot_reader_ptr& snapshot); + template + void open( protocol_feature_set&& pfs, fc::optional expected_chain_id, Lambda lambda ); + void open( protocol_feature_set&& pfs, const snapshot_reader_ptr& snapshot ); + void open( protocol_feature_set&& pfs, const genesis_state& genesis ); + void open( protocol_feature_set&& pfs, fc::optional expected_chain_id = {} ); + void open( const snapshot_reader_ptr& snapshot ); + void open( const genesis_state& genesis ); + void open( fc::optional expected_chain_id = {} ); bool is_same_chain( base_tester& other ); virtual signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) ) = 0; @@ -112,7 +180,7 @@ namespace eosio { namespace testing { void produce_blocks_for_n_rounds(const uint32_t num_of_rounds = 1); // Produce minimal number of blocks as possible to spend the given time without having any producer become inactive void produce_min_num_of_blocks_to_spend_time_wo_inactive_prod(const fc::microseconds target_elapsed_time = fc::microseconds()); - signed_block_ptr push_block(signed_block_ptr b); + void push_block(signed_block_ptr b); /** * These transaction IDs represent transactions available in the head chain state as scheduled @@ -124,9 +192,12 @@ namespace eosio { namespace testing { * @return */ vector get_scheduled_transactions() const; + unapplied_transaction_queue& get_unapplied_transaction_queue() { return unapplied_transactions; } transaction_trace_ptr push_transaction( packed_transaction& trx, fc::time_point deadline = fc::time_point::maximum(), uint32_t billed_cpu_time_us = DEFAULT_BILLED_CPU_TIME_US ); transaction_trace_ptr push_transaction( signed_transaction& trx, fc::time_point deadline = fc::time_point::maximum(), uint32_t billed_cpu_time_us = DEFAULT_BILLED_CPU_TIME_US, bool no_throw = false ); + + [[nodiscard]] action_result push_action(action&& cert_act, uint64_t authorizer); // TODO/QUESTION: Is this needed? transaction_trace_ptr push_action( const account_name& code, @@ -168,12 +239,16 @@ namespace eosio { namespace testing { } void set_before_preactivate_bios_contract(); + void set_before_producer_authority_bios_contract(); void set_bios_contract(); - vector get_producer_keys( const vector& producer_names )const; - transaction_trace_ptr set_producers(const vector& producer_names); - void link_authority( account_name account, account_name code, permission_name req, action_name type = "" ); - void unlink_authority( account_name account, account_name code, action_name type = "" ); + vector get_producer_authorities( const vector& producer_names )const; + transaction_trace_ptr set_producers(const vector& producer_names); + transaction_trace_ptr set_producer_schedule(const vector& schedule); + transaction_trace_ptr set_producers_legacy(const vector& producer_names); + + void link_authority( account_name account, account_name code, permission_name req, action_name type = {} ); + void unlink_authority( account_name account, account_name code, action_name type = {} ); void set_authority( account_name account, permission_name perm, authority auth, permission_name parent, const vector& auths, const vector& keys ); void set_authority( account_name account, permission_name perm, authority auth, @@ -211,12 +286,17 @@ namespace eosio { namespace testing { } template< typename KeyType = fc::ecc::private_key_shim > - static private_key_type get_private_key( name keyname, string role = "owner" ) { - return private_key_type::regenerate(fc::sha256::hash(string(keyname)+role)); + static auto get_private_key( name keyname, string role = "owner" ) { + auto secret = fc::sha256::hash(keyname.to_string() + role); + if constexpr (std::is_same_v) { + return mock::webauthn_private_key::regenerate(secret); + } else { + return private_key_type::regenerate(secret); + } } template< typename KeyType = fc::ecc::private_key_shim > - static public_key_type get_public_key( name keyname, string role = "owner" ) { + static auto get_public_key( name keyname, string role = "owner" ) { return get_private_key( keyname, role ).get_public_key(); } @@ -231,7 +311,7 @@ namespace eosio { namespace testing { const symbol& asset_symbol, const account_name& account ) const; - vector get_row_by_account( uint64_t code, uint64_t scope, uint64_t table, const account_name& act ) const; + vector get_row_by_account( name code, name scope, name table, const account_name& act ) const; map get_last_produced_block_map()const { return last_produced_block; }; void set_last_produced_block_map( const map& lpb ) { last_produced_block = lpb; } @@ -300,6 +380,38 @@ namespace eosio { namespace testing { void preactivate_protocol_features(const vector feature_digests); void preactivate_all_builtin_protocol_features(); + static genesis_state default_genesis() { + genesis_state genesis; + genesis.initial_timestamp = fc::time_point::from_iso_string("2020-01-01T00:00:00.000"); + genesis.initial_key = get_public_key( config::system_account_name, "active" ); + + return genesis; + } + + static std::pair default_config(const fc::temp_directory& tempdir) { + controller::config cfg; + cfg.blocks_dir = tempdir.path() / config::default_blocks_dir_name; + cfg.state_dir = tempdir.path() / config::default_state_dir_name; + cfg.state_size = 1024*1024*16; + cfg.state_guard_size = 0; + cfg.reversible_cache_size = 1024*1024*8; + cfg.reversible_guard_size = 0; + cfg.contracts_console = true; + cfg.eosvmoc_config.cache_size = 1024*1024*8; + + for(int i = 0; i < boost::unit_test::framework::master_test_suite().argc; ++i) { + if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wabt")) + cfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; + else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--eos-vm")) + cfg.wasm_runtime = chain::wasm_interface::vm_type::eos_vm; + else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--eos-vm-jit")) + cfg.wasm_runtime = chain::wasm_interface::vm_type::eos_vm_jit; + else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--eos-vm-oc")) + cfg.wasm_runtime = chain::wasm_interface::vm_type::eos_vm_oc; + } + return {cfg, default_genesis()}; + } + protected: signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs = false ); void _start_block(fc::time_point block_time); @@ -316,6 +428,8 @@ namespace eosio { namespace testing { controller::config cfg; map chain_transactions; map last_produced_block; + unapplied_transaction_queue unapplied_transactions; + public: vector protocol_features_to_be_activated_wo_preactivation; }; @@ -326,12 +440,42 @@ namespace eosio { namespace testing { init(policy, read_mode); } + tester(controller::config config, const genesis_state& genesis) { + init(config, genesis); + } + tester(controller::config config) { init(config); } - tester(controller::config config, protocol_feature_set&& pfs) { - init(config, std::move(pfs)); + tester(controller::config config, protocol_feature_set&& pfs, const genesis_state& genesis) { + init(config, std::move(pfs), genesis); + } + + tester(const fc::temp_directory& tempdir, bool use_genesis) { + auto def_conf = default_config(tempdir); + cfg = def_conf.first; + + if (use_genesis) { + init(cfg, def_conf.second); + } + else { + init(cfg); + } + } + + template + tester(const fc::temp_directory& tempdir, Lambda conf_edit, bool use_genesis) { + auto def_conf = default_config(tempdir); + cfg = def_conf.first; + conf_edit(cfg); + + if (use_genesis) { + init(cfg, def_conf.second); + } + else { + init(cfg); + } } signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { @@ -339,7 +483,7 @@ namespace eosio { namespace testing { } signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { - control->abort_block(); + unapplied_transactions.add_aborted( control->abort_block() ); return _produce_block(skip_time, true); } @@ -368,60 +512,75 @@ namespace eosio { namespace testing { } controller::config vcfg; - static controller::config default_config() { - fc::temp_directory tempdir; - controller::config vcfg; - vcfg.blocks_dir = tempdir.path() / std::string("v_").append(config::default_blocks_dir_name); - vcfg.state_dir = tempdir.path() / std::string("v_").append(config::default_state_dir_name); - vcfg.state_size = 1024*1024*16; - vcfg.state_guard_size = 0; - vcfg.reversible_cache_size = 1024*1024*8; - vcfg.reversible_guard_size = 0; - vcfg.contracts_console = false; - - vcfg.genesis.initial_timestamp = fc::time_point::from_iso_string("2020-01-01T00:00:00.000"); - vcfg.genesis.initial_key = get_public_key( config::system_account_name, "active" ); - - for(int i = 0; i < boost::unit_test::framework::master_test_suite().argc; ++i) { - if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) - vcfg.wasm_runtime = chain::wasm_interface::vm_type::wavm; - else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wabt")) - vcfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; - } - return vcfg; - } - validating_tester(const flat_set& trusted_producers = flat_set()) { - vcfg = default_config(); + auto def_conf = default_config(tempdir); + vcfg = def_conf.first; + config_validator(vcfg); vcfg.trusted_producers = trusted_producers; - validating_node = std::make_unique(vcfg, make_protocol_feature_set()); - validating_node->add_indices(); - validating_node->startup( []() { return false; } ); + validating_node = create_validating_node(vcfg, def_conf.second, true); - init(); + init(def_conf.first, def_conf.second); + execute_setup_policy(setup_policy::full); } - validating_tester(controller::config config) { - FC_ASSERT( config.blocks_dir.filename().generic_string() != "." - && config.state_dir.filename().generic_string() != ".", "invalid path names in controller::config" ); + static void config_validator(controller::config& vcfg) { + FC_ASSERT( vcfg.blocks_dir.filename().generic_string() != "." + && vcfg.state_dir.filename().generic_string() != ".", "invalid path names in controller::config" ); - vcfg = config; vcfg.blocks_dir = vcfg.blocks_dir.parent_path() / std::string("v_").append( vcfg.blocks_dir.filename().generic_string() ); vcfg.state_dir = vcfg.state_dir.parent_path() / std::string("v_").append( vcfg.state_dir.filename().generic_string() ); - validating_node = std::make_unique(vcfg, make_protocol_feature_set()); + vcfg.contracts_console = false; + } + + static unique_ptr create_validating_node(controller::config vcfg, const genesis_state& genesis, bool use_genesis) { + unique_ptr validating_node = std::make_unique(vcfg, make_protocol_feature_set(), genesis.compute_chain_id()); validating_node->add_indices(); - validating_node->startup( []() { return false; } ); + if (use_genesis) { + validating_node->startup( []() { return false; }, genesis ); + } + else { + validating_node->startup( []() { return false; } ); + } + return validating_node; + } - init(config); + validating_tester(const fc::temp_directory& tempdir, bool use_genesis) { + auto def_conf = default_config(tempdir); + vcfg = def_conf.first; + config_validator(vcfg); + validating_node = create_validating_node(vcfg, def_conf.second, use_genesis); + + if (use_genesis) { + init(def_conf.first, def_conf.second); + } + else { + init(def_conf.first); + } + } + + template + validating_tester(const fc::temp_directory& tempdir, Lambda conf_edit, bool use_genesis) { + auto def_conf = default_config(tempdir); + conf_edit(def_conf.first); + vcfg = def_conf.first; + config_validator(vcfg); + validating_node = create_validating_node(vcfg, def_conf.second, use_genesis); + + if (use_genesis) { + init(def_conf.first, def_conf.second); + } + else { + init(def_conf.first); + } } signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { auto sb = _produce_block(skip_time, false); - auto bs = validating_node->create_block_state_future( sb ); - validating_node->push_block( bs ); + auto bsf = validating_node->create_block_state_future( sb ); + validating_node->push_block( bsf, forked_branch_callback{}, trx_meta_cache_lookup{} ); return sb; } @@ -432,14 +591,14 @@ namespace eosio { namespace testing { void validate_push_block(const signed_block_ptr& sb) { auto bs = validating_node->create_block_state_future( sb ); - validating_node->push_block( bs ); + validating_node->push_block( bs, forked_branch_callback{}, trx_meta_cache_lookup{} ); } signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { - control->abort_block(); + unapplied_transactions.add_aborted( control->abort_block() ); auto sb = _produce_block(skip_time, true); - auto bs = validating_node->create_block_state_future( sb ); - validating_node->push_block( bs ); + auto bsf = validating_node->create_block_state_future( sb ); + validating_node->push_block( bsf, forked_branch_callback{}, trx_meta_cache_lookup{} ); return sb; } @@ -461,7 +620,7 @@ namespace eosio { namespace testing { hbh.producer == vn_hbh.producer; validating_node.reset(); - validating_node = std::make_unique(vcfg, make_protocol_feature_set()); + validating_node = std::make_unique(vcfg, make_protocol_feature_set(), control->get_chain_id()); validating_node->add_indices(); validating_node->startup( []() { return false; } ); diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 5cc105a9ed6..074de397217 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -1,14 +1,20 @@ #include #include #include +#include #include #include #include +#include +#include +#include #include #include +namespace bio = boost::iostreams; + eosio::chain::asset core_from_string(const std::string& s) { return eosio::chain::asset::from_string(s + " " CORE_SYMBOL_NAME); } @@ -58,6 +64,28 @@ namespace eosio { namespace testing { return abi; } + namespace { + std::string read_gzipped_snapshot( const char* fn ) { + std::ifstream file(fn, std::ios_base::in | std::ios_base::binary); + bio::filtering_streambuf in; + + in.push(bio::gzip_decompressor()); + in.push(file); + + std::stringstream decompressed; + bio::copy(in, decompressed); + return decompressed.str(); + } + } + + std::string read_binary_snapshot( const char* fn ) { + return read_gzipped_snapshot(fn); + } + + fc::variant read_json_snapshot( const char* fn ) { + return fc::json::from_string( read_gzipped_snapshot(fn) ); + } + const fc::microseconds base_tester::abi_serializer_max_time{1000*1000}; // 1s for slow test machines bool expect_assert_message(const fc::exception& ex, string expected) { @@ -123,26 +151,11 @@ namespace eosio { namespace testing { } void base_tester::init(const setup_policy policy, db_read_mode read_mode) { - cfg.blocks_dir = tempdir.path() / config::default_blocks_dir_name; - cfg.state_dir = tempdir.path() / config::default_state_dir_name; - cfg.state_size = 1024*1024*16; - cfg.state_guard_size = 0; - cfg.reversible_cache_size = 1024*1024*8; - cfg.reversible_guard_size = 0; - cfg.contracts_console = true; - cfg.read_mode = read_mode; - - cfg.genesis.initial_timestamp = fc::time_point::from_iso_string("2020-01-01T00:00:00.000"); - cfg.genesis.initial_key = get_public_key( config::system_account_name, "active" ); - - for(int i = 0; i < boost::unit_test::framework::master_test_suite().argc; ++i) { - if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) - cfg.wasm_runtime = chain::wasm_interface::vm_type::wavm; - else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wabt")) - cfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; - } + auto def_conf = default_config(tempdir); + def_conf.first.read_mode = read_mode; + cfg = def_conf.first; - open(nullptr); + open(def_conf.second); execute_setup_policy(policy); } @@ -151,11 +164,31 @@ namespace eosio { namespace testing { open(snapshot); } + void base_tester::init(controller::config config, const genesis_state& genesis) { + cfg = config; + open(genesis); + } + + void base_tester::init(controller::config config) { + cfg = config; + open(default_genesis().compute_chain_id()); + } + void base_tester::init(controller::config config, protocol_feature_set&& pfs, const snapshot_reader_ptr& snapshot) { cfg = config; open(std::move(pfs), snapshot); } + void base_tester::init(controller::config config, protocol_feature_set&& pfs, const genesis_state& genesis) { + cfg = config; + open(std::move(pfs), genesis); + } + + void base_tester::init(controller::config config, protocol_feature_set&& pfs) { + cfg = config; + open(std::move(pfs), default_genesis().compute_chain_id()); + } + void base_tester::execute_setup_policy(const setup_policy policy) { const auto& pfm = control->get_protocol_feature_manager(); @@ -178,15 +211,16 @@ namespace eosio { namespace testing { case setup_policy::preactivate_feature_and_new_bios: { schedule_preactivate_protocol_feature(); produce_block(); - set_bios_contract(); + set_before_producer_authority_bios_contract(); break; } case setup_policy::full: { schedule_preactivate_protocol_feature(); produce_block(); - set_bios_contract(); + set_before_producer_authority_bios_contract(); preactivate_all_builtin_protocol_features(); produce_block(); + set_bios_contract(); break; } case setup_policy::none: @@ -204,10 +238,30 @@ namespace eosio { namespace testing { open( make_protocol_feature_set(), snapshot ); } - void base_tester::open( protocol_feature_set&& pfs, const snapshot_reader_ptr& snapshot ) { - control.reset( new controller(cfg, std::move(pfs)) ); + void base_tester::open( const genesis_state& genesis ) { + open( make_protocol_feature_set(), genesis ); + } + + void base_tester::open( fc::optional expected_chain_id ) { + open( make_protocol_feature_set(), expected_chain_id ); + } + + template + void base_tester::open( protocol_feature_set&& pfs, fc::optional expected_chain_id, Lambda lambda ) { + if( !expected_chain_id ) { + expected_chain_id = controller::extract_chain_id_from_db( cfg.state_dir ); + if( !expected_chain_id ) { + if( fc::is_regular_file( cfg.blocks_dir / "blocks.log" ) ) { + expected_chain_id = block_log::extract_chain_id( cfg.blocks_dir ); + } else { + expected_chain_id = genesis_state().compute_chain_id(); + } + } + } + + control.reset( new controller(cfg, std::move(pfs), *expected_chain_id) ); control->add_indices(); - control->startup( []() { return false; }, snapshot); + lambda(); chain_transactions.clear(); control->accepted_block.connect([this]( const block_state_ptr& block_state ){ FC_ASSERT( block_state->block ); @@ -223,17 +277,39 @@ namespace eosio { namespace testing { }); } - signed_block_ptr base_tester::push_block(signed_block_ptr b) { - auto bs = control->create_block_state_future(b); - control->abort_block(); - control->push_block(bs); + void base_tester::open( protocol_feature_set&& pfs, const snapshot_reader_ptr& snapshot ) { + const auto& snapshot_chain_id = controller::extract_chain_id( *snapshot ); + snapshot->return_to_header(); + open(std::move(pfs), snapshot_chain_id, [&snapshot,&control=this->control]() { + control->startup([]() { return false; }, snapshot ); + }); + } + + void base_tester::open( protocol_feature_set&& pfs, const genesis_state& genesis ) { + open(std::move(pfs), genesis.compute_chain_id(), [&genesis,&control=this->control]() { + control->startup( []() { return false; }, genesis ); + }); + } + + void base_tester::open( protocol_feature_set&& pfs, fc::optional expected_chain_id ) { + open(std::move(pfs), expected_chain_id, [&control=this->control]() { + control->startup( []() { return false; } ); + }); + } + + void base_tester::push_block(signed_block_ptr b) { + auto bsf = control->create_block_state_future(b); + unapplied_transactions.add_aborted( control->abort_block() ); + control->push_block( bsf, [this]( const branch_type& forked_branch ) { + unapplied_transactions.add_forked( forked_branch ); + }, [this]( const transaction_id_type& id ) { + return unapplied_transactions.get_trx( id ); + } ); auto itr = last_produced_block.find(b->producer); if (itr == last_produced_block.end() || block_header::num_from_id(b->id()) > block_header::num_from_id(itr->second)) { last_produced_block[b->producer] = b->id(); } - - return b; } signed_block_ptr base_tester::_produce_block( fc::microseconds skip_time, bool skip_pending_trxs) { @@ -246,19 +322,19 @@ namespace eosio { namespace testing { } if( !skip_pending_trxs ) { - unapplied_transactions_type unapplied_trxs = control->get_unapplied_transactions(); // make copy of map - for (const auto& entry : unapplied_trxs ) { - auto trace = control->push_transaction(entry.second, fc::time_point::maximum(), DEFAULT_BILLED_CPU_TIME_US ); + for( auto itr = unapplied_transactions.begin(); itr != unapplied_transactions.end(); ) { + auto trace = control->push_transaction( itr->trx_meta, fc::time_point::maximum(), DEFAULT_BILLED_CPU_TIME_US ); if(trace->except) { trace->except->dynamic_rethrow_exception(); } + itr = unapplied_transactions.erase( itr ); } vector scheduled_trxs; - while( (scheduled_trxs = get_scheduled_transactions() ).size() > 0 ) { - for (const auto& trx : scheduled_trxs ) { - auto trace = control->push_scheduled_transaction(trx, fc::time_point::maximum(), DEFAULT_BILLED_CPU_TIME_US); - if(trace->except) { + while ((scheduled_trxs = get_scheduled_transactions()).size() > 0 ) { + for( const auto& trx : scheduled_trxs ) { + auto trace = control->push_scheduled_transaction( trx, fc::time_point::maximum(), DEFAULT_BILLED_CPU_TIME_US ); + if( trace->except ) { trace->except->dynamic_rethrow_exception(); } } @@ -281,7 +357,7 @@ namespace eosio { namespace testing { last_produced_block_num = std::max(control->last_irreversible_block_num(), block_header::num_from_id(itr->second)); } - control->abort_block(); + unapplied_transactions.add_aborted( control->abort_block() ); vector feature_to_be_activated; // First add protocol features to be activated WITHOUT preactivation @@ -308,18 +384,25 @@ namespace eosio { namespace testing { FC_ASSERT( control->is_building_block(), "must first start a block before it can be finished" ); auto producer = control->head_block_state()->get_scheduled_producer( control->pending_block_time() ); - private_key_type priv_key; - // Check if signing private key exist in the list - auto private_key_itr = block_signing_private_keys.find( producer.block_signing_key ); - if( private_key_itr == block_signing_private_keys.end() ) { - // If it's not found, default to active k1 key - priv_key = get_private_key( producer.producer_name, "active" ); - } else { - priv_key = private_key_itr->second; - } + vector signing_keys; + + auto default_active_key = get_public_key( producer.producer_name, "active"); + producer.for_each_key([&](const public_key_type& key){ + const auto& iter = block_signing_private_keys.find(key); + if(iter != block_signing_private_keys.end()) { + signing_keys.push_back(iter->second); + } else if (key == default_active_key) { + signing_keys.emplace_back( get_private_key( producer.producer_name, "active") ); + } + }); control->finalize_block( [&]( digest_type d ) { - return priv_key.sign(d); + std::vector result; + result.reserve(signing_keys.size()); + for (const auto& k: signing_keys) + result.emplace_back(k.sign(d)); + + return result; } ); control->commit_block(); @@ -447,12 +530,12 @@ namespace eosio { namespace testing { if( !control->is_building_block() ) _start_block(control->head_block_time() + fc::microseconds(config::block_interval_us)); - auto mtrx = std::make_shared( std::make_shared(trx) ); + auto ptrx = std::make_shared(trx); auto time_limit = deadline == fc::time_point::maximum() ? fc::microseconds::maximum() : fc::microseconds( deadline - fc::time_point::now() ); - transaction_metadata::start_recover_keys( mtrx, control->get_thread_pool(), control->get_chain_id(), time_limit ); - auto r = control->push_transaction( mtrx, deadline, billed_cpu_time_us ); + auto fut = transaction_metadata::start_recover_keys( ptrx, control->get_thread_pool(), control->get_chain_id(), time_limit ); + auto r = control->push_transaction( fut.get(), deadline, billed_cpu_time_us ); if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); if( r->except ) throw *r->except; return r; @@ -466,18 +549,18 @@ namespace eosio { namespace testing { { try { if( !control->is_building_block() ) _start_block(control->head_block_time() + fc::microseconds(config::block_interval_us)); - auto c = packed_transaction::none; + auto c = packed_transaction::compression_type::none; if( fc::raw::pack_size(trx) > 1000 ) { - c = packed_transaction::zlib; + c = packed_transaction::compression_type::zlib; } auto time_limit = deadline == fc::time_point::maximum() ? fc::microseconds::maximum() : fc::microseconds( deadline - fc::time_point::now() ); - auto mtrx = std::make_shared(trx, c); - transaction_metadata::start_recover_keys( mtrx, control->get_thread_pool(), control->get_chain_id(), time_limit ); - auto r = control->push_transaction( mtrx, deadline, billed_cpu_time_us ); + auto ptrx = std::make_shared( trx, c ); + auto fut = transaction_metadata::start_recover_keys( ptrx, control->get_thread_pool(), control->get_chain_id(), time_limit ); + auto r = control->push_transaction( fut.get(), deadline, billed_cpu_time_us ); if (no_throw) return r; if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); if( r->except) throw *r->except; @@ -489,12 +572,12 @@ namespace eosio { namespace testing { typename base_tester::action_result base_tester::push_action(action&& act, uint64_t authorizer) { signed_transaction trx; if (authorizer) { - act.authorization = vector{{authorizer, config::active_name}}; + act.authorization = vector{{account_name(authorizer), config::active_name}}; } trx.actions.emplace_back(std::move(act)); set_transaction_headers(trx); if (authorizer) { - trx.sign(get_private_key(authorizer, "active"), control->get_chain_id()); + trx.sign(get_private_key(account_name(authorizer), "active"), control->get_chain_id()); } try { push_transaction(trx); @@ -863,7 +946,7 @@ namespace eosio { namespace testing { } - vector base_tester::get_row_by_account( uint64_t code, uint64_t scope, uint64_t table, const account_name& act ) const { + vector base_tester::get_row_by_account( name code, name scope, name table, const account_name& act ) const { vector data; const auto& db = control->db(); const auto* t_id = db.find( boost::make_tuple( code, scope, table ) ); @@ -874,8 +957,8 @@ namespace eosio { namespace testing { const auto& idx = db.get_index(); - auto itr = idx.lower_bound( boost::make_tuple( t_id->id, act ) ); - if ( itr == idx.end() || itr->t_id != t_id->id || act.value != itr->primary_key ) { + auto itr = idx.lower_bound( boost::make_tuple( t_id->id, act.to_uint64_t() ) ); + if ( itr == idx.end() || itr->t_id != t_id->id || act.to_uint64_t() != itr->primary_key ) { return data; } @@ -929,9 +1012,9 @@ namespace eosio { namespace testing { auto block = a.control->fetch_block_by_number(i); if( block ) { //&& !b.control->is_known_block(block->id()) ) { - auto bs = b.control->create_block_state_future( block ); + auto bsf = b.control->create_block_state_future( block ); b.control->abort_block(); - b.control->push_block(bs); //, eosio::chain::validation_steps::created_block); + b.control->push_block(bsf, forked_branch_callback{}, trx_meta_cache_lookup{}); //, eosio::chain::validation_steps::created_block); } } }; @@ -945,29 +1028,63 @@ namespace eosio { namespace testing { set_abi(config::system_account_name, contracts::before_preactivate_eosio_bios_abi().data()); } + void base_tester::set_before_producer_authority_bios_contract() { + set_code(config::system_account_name, contracts::before_producer_authority_eosio_bios_wasm()); + set_abi(config::system_account_name, contracts::before_producer_authority_eosio_bios_abi().data()); + } + void base_tester::set_bios_contract() { set_code(config::system_account_name, contracts::eosio_bios_wasm()); set_abi(config::system_account_name, contracts::eosio_bios_abi().data()); } - vector base_tester::get_producer_keys( const vector& producer_names )const { + vector base_tester::get_producer_authorities( const vector& producer_names )const { // Create producer schedule - vector schedule; + vector schedule; for (auto& producer_name: producer_names) { - producer_key pk = { producer_name, get_public_key( producer_name, "active" )}; - schedule.emplace_back(pk); + schedule.emplace_back(producer_authority{ producer_name, block_signing_authority_v0{1, {{ get_public_key( producer_name, "active" ), 1}} } }); } return schedule; } transaction_trace_ptr base_tester::set_producers(const vector& producer_names) { - auto schedule = get_producer_keys( producer_names ); + auto schedule = get_producer_authorities( producer_names ); + + return set_producer_schedule(schedule); + } + + transaction_trace_ptr base_tester::set_producer_schedule(const vector& schedule ) { + // FC reflection does not create variants that are compatible with ABI 1.1 so we manually translate. + fc::variants schedule_variant; + schedule_variant.reserve(schedule.size()); + for( const auto& e: schedule ) { + schedule_variant.emplace_back(e.get_abi_variant()); + } + + return push_action( config::system_account_name, N(setprods), config::system_account_name, + fc::mutable_variant_object()("schedule", schedule_variant)); + + } + + transaction_trace_ptr base_tester::set_producers_legacy(const vector& producer_names) { + auto schedule = get_producer_authorities( producer_names ); + // down-rank to old version + + vector legacy_keys; + legacy_keys.reserve(schedule.size()); + for (const auto &p : schedule) { + p.authority.visit([&legacy_keys, &p](const auto& auth){ + legacy_keys.emplace_back(legacy::producer_key{p.producer_name, auth.keys.front().key}); + }); + } return push_action( config::system_account_name, N(setprods), config::system_account_name, - fc::mutable_variant_object()("schedule", schedule)); + fc::mutable_variant_object()("schedule", legacy_keys)); + } + const table_id_object* base_tester::find_table( name code, name scope, name table ) { auto tid = control->db().find(boost::make_tuple(code, scope, table)); return tid; @@ -1098,6 +1215,8 @@ namespace eosio { namespace testing { return match; } + const std::string mock::webauthn_private_key::_origin = "mock.webauthn.invalid"; + const sha256 mock::webauthn_private_key::_origin_hash = fc::sha256::hash(mock::webauthn_private_key::_origin); } } /// eosio::testing std::ostream& operator<<( std::ostream& osm, const fc::variant& v ) { diff --git a/libraries/version/CMakeLists.txt b/libraries/version/CMakeLists.txt new file mode 100644 index 00000000000..239c479d92b --- /dev/null +++ b/libraries/version/CMakeLists.txt @@ -0,0 +1,49 @@ +cmake_minimum_required(VERSION 3.5) +project(Version) + +# Define the version metadata by default, in case `git` cannot be found. +set(_VERSION_MAJOR_ "unknown") +set(_VERSION_MINOR_ "") +set(_VERSION_PATCH_ "") +set(_VERSION_SUFFIX_ "") +set(_VERSION_HASH_ "") +set(_VERSION_DIRTY_ "") + +# Construct the library target. +add_library( + version + "${CMAKE_CURRENT_SOURCE_DIR}/src/version.cpp" + "${CMAKE_CURRENT_BINARY_DIR}/src/version_impl.cpp") + +# Make dependencies visible to the given target library to be constructed. +target_include_directories( + version + PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include/" + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src/" ) + +# Create a custom target to update the version metadata upon every build. +find_package(Git) +if(EXISTS ${CMAKE_SOURCE_DIR}/.git AND ${GIT_FOUND}) + add_custom_target( + evaluate_every_build ALL + COMMAND ${CMAKE_COMMAND} -DGIT_EXEC=${GIT_EXECUTABLE} + -DCUR_BIN_DIR=${CMAKE_CURRENT_BINARY_DIR} + -DCUR_SRC_DIR=${CMAKE_CURRENT_SOURCE_DIR} + -DSRC_DIR=${CMAKE_SOURCE_DIR} + -DV_MAJOR=${VERSION_MAJOR} + -DV_MINOR=${VERSION_MINOR} + -DV_PATCH=${VERSION_PATCH} + -DV_SUFFIX=${VERSION_SUFFIX} + -P ${CMAKE_SOURCE_DIR}/CMakeModules/VersionUtils.cmake + BYPRODUCTS ${CMAKE_CURRENT_BINARY_DIR}/src/version_impl.cpp + COMMENT "Updating version metadata..." VERBATIM ) + + # Create a dependency for the given library target. + add_dependencies(version evaluate_every_build) +else() + # Modify and substitute the `.cpp.in` file for a `.cpp` in the build directory. + configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/src/version_impl.cpp.in + ${CMAKE_CURRENT_BINARY_DIR}/src/version_impl.cpp + @ONLY ) +endif() diff --git a/libraries/version/include/eosio/version/version.hpp b/libraries/version/include/eosio/version/version.hpp new file mode 100644 index 00000000000..5934abdcb53 --- /dev/null +++ b/libraries/version/include/eosio/version/version.hpp @@ -0,0 +1,13 @@ +#pragma once + +#include // std::string + +namespace eosio { namespace version { + + ///< Grab the basic version information of the client; example: `v1.8.0-rc1` + const std::string& version_client(); + + ///< Grab the full version information of the client; example: `v1.8.0-rc1-7de458254[-dirty]` + const std::string& version_full(); + +} } diff --git a/libraries/version/src/version.cpp b/libraries/version/src/version.cpp new file mode 100644 index 00000000000..f391c78ead3 --- /dev/null +++ b/libraries/version/src/version.cpp @@ -0,0 +1,15 @@ +#include "version_impl.hpp" + +namespace eosio { namespace version { + + const std::string& version_client() { + static const std::string version{_version_client()}; + return version; + } + + const std::string& version_full() { + static const std::string version{_version_full()}; + return version; + } + +} } diff --git a/libraries/version/src/version_impl.cpp.in b/libraries/version/src/version_impl.cpp.in new file mode 100644 index 00000000000..a71b9da3339 --- /dev/null +++ b/libraries/version/src/version_impl.cpp.in @@ -0,0 +1,40 @@ +/** + * \warning This file is machine generated. DO NOT EDIT. See version_impl.cpp.in for changes. + */ + +#include "version_impl.hpp" + +namespace eosio { namespace version { + + const std::string version_major {"@_VERSION_MAJOR_@" }; + const std::string version_minor {"@_VERSION_MINOR_@" }; + const std::string version_patch {"@_VERSION_PATCH_@" }; + const std::string version_suffix{"@_VERSION_SUFFIX_@"}; + const std::string version_hash {"@_VERSION_HASH_@" }; + const bool version_dirty { @_VERSION_DIRTY_@ }; + + std::string _version_client() { + if( version_major == "unknown" || version_major.empty() || version_minor == "unknown" || version_minor.empty()) { + return "unknown"; + } else { + std::string version{'v' + version_major + '.' + version_minor}; + if( !version_patch.empty() ) version += '.' + version_patch; + if( !version_suffix.empty() ) version += '-' + version_suffix; + return version; + } + } + + std::string _version_full() { + if( version_major == "unknown" || version_major.empty() || version_minor == "unknown" || version_minor.empty()) { + return "unknown"; + } else { + std::string version{'v' + version_major + '.' + version_minor}; + if( !version_patch.empty() ) version += '.' + version_patch; + if( !version_suffix.empty() ) version += '-' + version_suffix; + if( !version_hash.empty() ) version += '-' + version_hash; + if( version_dirty ) version += "-dirty"; + return version; + } + } + +} } diff --git a/libraries/version/src/version_impl.hpp b/libraries/version/src/version_impl.hpp new file mode 100644 index 00000000000..54ffae1b5d1 --- /dev/null +++ b/libraries/version/src/version_impl.hpp @@ -0,0 +1,13 @@ +#pragma once + +#include // std::string + +namespace eosio { namespace version { + + ///< Helper function for `version_client()` + std::string _version_client(); + + ///< Helper function for `version_full()` + std::string _version_full(); + +} } diff --git a/libraries/wasm-jit/CMakeLists.txt b/libraries/wasm-jit/CMakeLists.txt index 8fcdec4e9ce..7697f6ee59f 100644 --- a/libraries/wasm-jit/CMakeLists.txt +++ b/libraries/wasm-jit/CMakeLists.txt @@ -67,11 +67,9 @@ endif() add_subdirectory(Include/Inline) -#add_subdirectory(Source/Emscripten) add_subdirectory(Source/IR) add_subdirectory(Source/Logging) add_subdirectory(Source/Platform) -#add_subdirectory(Source/Programs) add_subdirectory(Source/Runtime) add_subdirectory(Source/WASM) add_subdirectory(Source/WAST) diff --git a/libraries/wasm-jit/Include/Runtime/Intrinsics.h b/libraries/wasm-jit/Include/Runtime/Intrinsics.h index fa3e6f2b8a0..e5d37f3b7d9 100644 --- a/libraries/wasm-jit/Include/Runtime/Intrinsics.h +++ b/libraries/wasm-jit/Include/Runtime/Intrinsics.h @@ -17,78 +17,6 @@ namespace Intrinsics private: const char* name; }; - - // The base class of Intrinsic globals. - struct Global - { - Runtime::GlobalInstance* global; - - RUNTIME_API Global(const char* inName,IR::GlobalType inType); - RUNTIME_API ~Global(); - - RUNTIME_API void reset(); - - protected: - void* value; - private: - const char* name; - IR::GlobalType globalType; - }; - - // A partially specialized template for Intrinsic globals: - // Provides access via implicit coercion to a value, and for mutable globals an assignment operator. - template - struct GenericGlobal : Global - { - typedef typename IR::ValueTypeInfo::Value Value; - - GenericGlobal(const char* inName,Value inValue) - : Global(inName,IR::GlobalType(type,isMutable)) { *(Value*)value = inValue; } - - operator Value() const { return *(Value*)value; } - void operator=(Value newValue) { *(Value*)value = newValue; } - }; - template - struct GenericGlobal : Global - { - typedef typename IR::ValueTypeInfo::Value Value; - - GenericGlobal(const char* inName,Value inValue) - : Global(inName,IR::GlobalType(type,false)) { *(Value*)value = inValue; } - - operator Value() const { return *(Value*)value; } - - void reset(Value inValue) - { - Global::reset(); - *(Value*)value = inValue; - } - }; - - // Intrinsic memories and tables - struct Memory - { - RUNTIME_API Memory(const char* inName,const IR::MemoryType& inType); - RUNTIME_API ~Memory(); - - operator Runtime::MemoryInstance*() const { return memory; } - - private: - const char* name; - Runtime::MemoryInstance* const memory; - }; - - struct Table - { - RUNTIME_API Table(const char* inName,const IR::TableType& inType); - RUNTIME_API ~Table(); - - operator Runtime::TableInstance*() const { return table; } - - private: - const char* name; - Runtime::TableInstance* const table; - }; // Finds an intrinsic object by name and type. RUNTIME_API Runtime::ObjectInstance* find(const std::string& name,const IR::ObjectType& type); diff --git a/libraries/wasm-jit/Source/Emscripten/CMakeLists.txt b/libraries/wasm-jit/Source/Emscripten/CMakeLists.txt deleted file mode 100644 index 1999cabd51f..00000000000 --- a/libraries/wasm-jit/Source/Emscripten/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -set(Sources Emscripten.cpp) -set(PublicHeaders ${WAVM_INCLUDE_DIR}/Emscripten/Emscripten.h) -include_directories(${WAVM_INCLUDE_DIR}/Emscripten) - -add_library(Emscripten STATIC ${Sources} ${PublicHeaders}) - -add_definitions(-DEMSCRIPTEN_API=DLL_EXPORT) - -target_link_libraries(Emscripten Logging Platform Runtime) diff --git a/libraries/wasm-jit/Source/Emscripten/Emscripten.cpp b/libraries/wasm-jit/Source/Emscripten/Emscripten.cpp deleted file mode 100644 index 969d7566642..00000000000 --- a/libraries/wasm-jit/Source/Emscripten/Emscripten.cpp +++ /dev/null @@ -1,451 +0,0 @@ -#include "Inline/BasicTypes.h" -#include "Logging/Logging.h" -#include "IR/IR.h" -#include "IR/Module.h" -#include "Runtime/Runtime.h" -#include "Runtime/Intrinsics.h" -#include "Emscripten.h" -#include -#include -#include -#include - -#ifndef _WIN32 -#include -#endif - -namespace Emscripten -{ - using namespace IR; - using namespace Runtime; - - static U32 coerce32bitAddress(Uptr address) - { - if(address >= UINT32_MAX) { causeException(Exception::Cause::accessViolation); } - return (U32)address; - } - - DEFINE_INTRINSIC_GLOBAL(env,STACKTOP,STACKTOP,i32,false,0); - DEFINE_INTRINSIC_GLOBAL(env,STACK_MAX,STACK_MAX,i32,false,0); - DEFINE_INTRINSIC_GLOBAL(env,tempDoublePtr,tempDoublePtr,i32,false,0); - DEFINE_INTRINSIC_GLOBAL(env,ABORT,ABORT,i32,false,0); - DEFINE_INTRINSIC_GLOBAL(env,cttz_i8,cttz_i8,i32,false,0); - DEFINE_INTRINSIC_GLOBAL(env,___dso_handle,___dso_handle,i32,false,0); - DEFINE_INTRINSIC_GLOBAL(env,_stderr,_stderr,i32,false,0); - DEFINE_INTRINSIC_GLOBAL(env,_stdin,_stdin,i32,false,0); - DEFINE_INTRINSIC_GLOBAL(env,_stdout,_stdout,i32,false,0); - - DEFINE_INTRINSIC_MEMORY(env,emscriptenMemory,memory,MemoryType(false,SizeConstraints({256,UINT64_MAX}))); - DEFINE_INTRINSIC_TABLE(env,table,table,TableType(TableElementType::anyfunc,false,SizeConstraints({1024*1024,UINT64_MAX}))); - - DEFINE_INTRINSIC_GLOBAL(env,memoryBase,memoryBase,i32,false,1024); - DEFINE_INTRINSIC_GLOBAL(env,tableBase,tableBase,i32,false,0); - - DEFINE_INTRINSIC_GLOBAL(env,DYNAMICTOP_PTR,DYNAMICTOP_PTR,i32,false,0) - DEFINE_INTRINSIC_GLOBAL(env,em_environ,_environ,i32,false,0) - DEFINE_INTRINSIC_GLOBAL(env,EMTSTACKTOP,EMTSTACKTOP,i32,false,0) - DEFINE_INTRINSIC_GLOBAL(env,EMT_STACK_MAX,EMT_STACK_MAX,i32,false,0) - DEFINE_INTRINSIC_GLOBAL(env,eb,eb,i32,false,0) - - Platform::Mutex* sbrkMutex = Platform::createMutex(); - bool hasSbrkBeenCalled = false; - Uptr sbrkNumPages = 0; - U32 sbrkMinBytes = 0; - U32 sbrkNumBytes = 0; - - static U32 sbrk(I32 numBytes) - { - Platform::Lock sbrkLock(sbrkMutex); - - if(!hasSbrkBeenCalled) - { - // Do some first time initialization. - sbrkNumPages = getMemoryNumPages(emscriptenMemory); - sbrkMinBytes = sbrkNumBytes = coerce32bitAddress(sbrkNumPages << numBytesPerPageLog2); - hasSbrkBeenCalled = true; - } - else - { - // Ensure that nothing else is calling growMemory/shrinkMemory. - if(getMemoryNumPages(emscriptenMemory) != sbrkNumPages) - { causeException(Exception::Cause::unknown); } - } - - const U32 previousNumBytes = sbrkNumBytes; - - // Round the absolute value of numBytes to an alignment boundary, and ensure it won't allocate too much or too little memory. - numBytes = (numBytes + 7) & ~7; - if(numBytes > 0 && previousNumBytes > UINT32_MAX - numBytes) { causeException(Exception::Cause::accessViolation); } - else if(numBytes < 0 && previousNumBytes < sbrkMinBytes - numBytes) { causeException(Exception::Cause::accessViolation); } - - // Update the number of bytes allocated, and compute the number of pages needed for it. - sbrkNumBytes += numBytes; - const Uptr numDesiredPages = (sbrkNumBytes + numBytesPerPage - 1) >> numBytesPerPageLog2; - - // Grow or shrink the memory object to the desired number of pages. - if(numDesiredPages > sbrkNumPages) { growMemory(emscriptenMemory,numDesiredPages - sbrkNumPages); } - else if(numDesiredPages < sbrkNumPages) { shrinkMemory(emscriptenMemory,sbrkNumPages - numDesiredPages); } - sbrkNumPages = numDesiredPages; - - return previousNumBytes; - } - - DEFINE_INTRINSIC_FUNCTION1(env,_sbrk,_sbrk,i32,i32,numBytes) - { - return sbrk(numBytes); - } - - DEFINE_INTRINSIC_FUNCTION1(env,_time,_time,i32,i32,address) - { - time_t t = time(nullptr); - if(address) - { - memoryRef(emscriptenMemory,address) = (I32)t; - } - return (I32)t; - } - - DEFINE_INTRINSIC_FUNCTION0(env,___errno_location,___errno_location,i32) - { - return 0; - } - - DEFINE_INTRINSIC_FUNCTION1(env,_sysconf,_sysconf,i32,i32,a) - { - enum { sysConfPageSize = 30 }; - switch(a) - { - case sysConfPageSize: return IR::numBytesPerPage; - default: causeException(Runtime::Exception::Cause::calledUnimplementedIntrinsic); - } - } - - DEFINE_INTRINSIC_FUNCTION2(env,_pthread_cond_wait,_pthread_cond_wait,i32,i32,a,i32,b) { return 0; } - DEFINE_INTRINSIC_FUNCTION1(env,_pthread_cond_broadcast,_pthread_cond_broadcast,i32,i32,a) { return 0; } - DEFINE_INTRINSIC_FUNCTION2(env,_pthread_key_create,_pthread_key_create,i32,i32,a,i32,b) { causeException(Runtime::Exception::Cause::calledUnimplementedIntrinsic); } - DEFINE_INTRINSIC_FUNCTION1(env,_pthread_mutex_lock,_pthread_mutex_lock,i32,i32,a) { return 0; } - DEFINE_INTRINSIC_FUNCTION1(env,_pthread_mutex_unlock,_pthread_mutex_unlock,i32,i32,a) { return 0; } - DEFINE_INTRINSIC_FUNCTION2(env,_pthread_setspecific,_pthread_setspecific,i32,i32,a,i32,b) { causeException(Runtime::Exception::Cause::calledUnimplementedIntrinsic); } - DEFINE_INTRINSIC_FUNCTION1(env,_pthread_getspecific,_pthread_getspecific,i32,i32,a) { causeException(Runtime::Exception::Cause::calledUnimplementedIntrinsic); } - DEFINE_INTRINSIC_FUNCTION2(env,_pthread_once,_pthread_once,i32,i32,a,i32,b) { causeException(Runtime::Exception::Cause::calledUnimplementedIntrinsic); } - DEFINE_INTRINSIC_FUNCTION2(env,_pthread_cleanup_push,_pthread_cleanup_push,none,i32,a,i32,b) { } - DEFINE_INTRINSIC_FUNCTION1(env,_pthread_cleanup_pop,_pthread_cleanup_pop,none,i32,a) { } - DEFINE_INTRINSIC_FUNCTION0(env,_pthread_self,_pthread_self,i32) { return 0; } - - DEFINE_INTRINSIC_FUNCTION0(env,___ctype_b_loc,___ctype_b_loc,i32) - { - unsigned short data[384] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,2,2,2,2,2,2,8195,8194,8194,8194,8194,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,24577,49156,49156,49156,49156,49156,49156,49156,49156,49156,49156,49156,49156,49156,49156,49156,55304,55304,55304,55304,55304,55304,55304,55304,55304,55304,49156,49156,49156,49156,49156,49156,49156,54536,54536,54536,54536,54536,54536,50440,50440,50440,50440,50440,50440,50440,50440,50440,50440,50440,50440,50440,50440,50440,50440,50440,50440,50440,50440,49156,49156,49156,49156,49156,49156,54792,54792,54792,54792,54792,54792,50696,50696,50696,50696,50696,50696,50696,50696,50696,50696,50696,50696,50696,50696,50696,50696,50696,50696,50696,50696,49156,49156,49156,49156,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; - static U32 vmAddress = 0; - if(vmAddress == 0) - { - vmAddress = coerce32bitAddress(sbrk(sizeof(data))); - memcpy(memoryArrayPtr(emscriptenMemory,vmAddress,sizeof(data)),data,sizeof(data)); - } - return vmAddress + sizeof(short)*128; - } - DEFINE_INTRINSIC_FUNCTION0(env,___ctype_toupper_loc,___ctype_toupper_loc,i32) - { - I32 data[384] = {128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255}; - static U32 vmAddress = 0; - if(vmAddress == 0) - { - vmAddress = coerce32bitAddress(sbrk(sizeof(data))); - memcpy(memoryArrayPtr(emscriptenMemory,vmAddress,sizeof(data)),data,sizeof(data)); - } - return vmAddress + sizeof(I32)*128; - } - DEFINE_INTRINSIC_FUNCTION0(env,___ctype_tolower_loc,___ctype_tolower_loc,i32) - { - I32 data[384] = {128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255}; - static U32 vmAddress = 0; - if(vmAddress == 0) - { - vmAddress = coerce32bitAddress(sbrk(sizeof(data))); - memcpy(memoryArrayPtr(emscriptenMemory,vmAddress,sizeof(data)),data,sizeof(data)); - } - return vmAddress + sizeof(I32)*128; - } - DEFINE_INTRINSIC_FUNCTION4(env,___assert_fail,___assert_fail,none,i32,condition,i32,filename,i32,line,i32,function) - { - causeException(Runtime::Exception::Cause::calledAbort); - } - - DEFINE_INTRINSIC_FUNCTION3(env,___cxa_atexit,___cxa_atexit,i32,i32,a,i32,b,i32,c) - { - return 0; - } - DEFINE_INTRINSIC_FUNCTION1(env,___cxa_guard_acquire,___cxa_guard_acquire,i32,i32,address) - { - if(!memoryRef(emscriptenMemory,address)) - { - memoryRef(emscriptenMemory,address) = 1; - return 1; - } - else - { - return 0; - } - } - DEFINE_INTRINSIC_FUNCTION1(env,___cxa_guard_release,___cxa_guard_release,none,i32,a) - {} - DEFINE_INTRINSIC_FUNCTION3(env,___cxa_throw,___cxa_throw,none,i32,a,i32,b,i32,c) - { - causeException(Runtime::Exception::Cause::calledUnimplementedIntrinsic); - } - DEFINE_INTRINSIC_FUNCTION1(env,___cxa_begin_catch,___cxa_begin_catch,i32,i32,a) - { - causeException(Runtime::Exception::Cause::calledUnimplementedIntrinsic); - } - DEFINE_INTRINSIC_FUNCTION1(env,___cxa_allocate_exception,___cxa_allocate_exception,i32,i32,size) - { - return coerce32bitAddress(sbrk(size)); - } - DEFINE_INTRINSIC_FUNCTION0(env,__ZSt18uncaught_exceptionv,__ZSt18uncaught_exceptionv,i32) - { - causeException(Runtime::Exception::Cause::calledUnimplementedIntrinsic); - } - DEFINE_INTRINSIC_FUNCTION0(env,_abort,_abort,none) - { - causeException(Runtime::Exception::Cause::calledAbort); - } - DEFINE_INTRINSIC_FUNCTION1(env,_exit,_exit,none,i32,code) - { - causeException(Runtime::Exception::Cause::calledAbort); - } - DEFINE_INTRINSIC_FUNCTION1(env,abort,abort,none,i32,code) - { - Log::printf(Log::Category::error,"env.abort(%i)\n",code); - causeException(Runtime::Exception::Cause::calledAbort); - } - - static U32 currentLocale = 0; - DEFINE_INTRINSIC_FUNCTION1(env,_uselocale,_uselocale,i32,i32,locale) - { - auto oldLocale = currentLocale; - currentLocale = locale; - return oldLocale; - } - DEFINE_INTRINSIC_FUNCTION3(env,_newlocale,_newlocale,i32,i32,mask,i32,locale,i32,base) - { - if(!base) - { - base = coerce32bitAddress(sbrk(4)); - } - return base; - } - DEFINE_INTRINSIC_FUNCTION1(env,_freelocale,_freelocale,none,i32,a) - {} - - DEFINE_INTRINSIC_FUNCTION5(env,_strftime_l,_strftime_l,i32,i32,a,i32,b,i32,c,i32,d,i32,e) { causeException(Runtime::Exception::Cause::calledUnimplementedIntrinsic); } - DEFINE_INTRINSIC_FUNCTION1(env,_strerror,_strerror,i32,i32,a) { causeException(Runtime::Exception::Cause::calledUnimplementedIntrinsic); } - - DEFINE_INTRINSIC_FUNCTION2(env,_catopen,_catopen,i32,i32,a,i32,b) { return (U32)-1; } - DEFINE_INTRINSIC_FUNCTION4(env,_catgets,_catgets,i32,i32,catd,i32,set_id,i32,msg_id,i32,s) { return s; } - DEFINE_INTRINSIC_FUNCTION1(env,_catclose,_catclose,i32,i32,a) { return 0; } - - DEFINE_INTRINSIC_FUNCTION3(env,_emscripten_memcpy_big,_emscripten_memcpy_big,i32,i32,a,i32,b,i32,c) - { - memcpy(memoryArrayPtr(emscriptenMemory,a,c),memoryArrayPtr(emscriptenMemory,b,c),U32(c)); - return a; - } - - enum class ioStreamVMHandle - { - StdErr = 1, - StdIn = 2, - StdOut = 3 - }; - FILE* vmFile(U32 vmHandle) - { - switch((ioStreamVMHandle)vmHandle) - { - case ioStreamVMHandle::StdErr: return stderr; - case ioStreamVMHandle::StdIn: return stdin; - case ioStreamVMHandle::StdOut: return stdout; - default: return stdout;//std::cerr << "invalid file handle " << vmHandle << std::endl; throw; - } - } - - DEFINE_INTRINSIC_FUNCTION3(env,_vfprintf,_vfprintf,i32,i32,file,i32,formatPointer,i32,argList) - { - causeException(Runtime::Exception::Cause::calledUnimplementedIntrinsic); - } - DEFINE_INTRINSIC_FUNCTION1(env,_getc,_getc,i32,i32,file) - { - return getc(vmFile(file)); - } - DEFINE_INTRINSIC_FUNCTION2(env,_ungetc,_ungetc,i32,i32,character,i32,file) - { - return ungetc(character,vmFile(file)); - } - DEFINE_INTRINSIC_FUNCTION4(env,_fread,_fread,i32,i32,pointer,i32,size,i32,count,i32,file) - { - return (I32)fread(memoryArrayPtr(emscriptenMemory,pointer,U64(size) * U64(count)),U64(size),U64(count),vmFile(file)); - } - DEFINE_INTRINSIC_FUNCTION4(env,_fwrite,_fwrite,i32,i32,pointer,i32,size,i32,count,i32,file) - { - return (I32)fwrite(memoryArrayPtr(emscriptenMemory,pointer,U64(size) * U64(count)),U64(size),U64(count),vmFile(file)); - } - DEFINE_INTRINSIC_FUNCTION2(env,_fputc,_fputc,i32,i32,character,i32,file) - { - return fputc(character,vmFile(file)); - } - DEFINE_INTRINSIC_FUNCTION1(env,_fflush,_fflush,i32,i32,file) - { - return fflush(vmFile(file)); - } - - DEFINE_INTRINSIC_FUNCTION1(env,___lock,___lock,none,i32,a) - { - } - DEFINE_INTRINSIC_FUNCTION1(env,___unlock,___unlock,none,i32,a) - { - } - DEFINE_INTRINSIC_FUNCTION1(env,___lockfile,___lockfile,i32,i32,a) - { - return 1; - } - DEFINE_INTRINSIC_FUNCTION1(env,___unlockfile,___unlockfile,none,i32,a) - { - } - - DEFINE_INTRINSIC_FUNCTION2(env,___syscall6,___syscall6,i32,i32,a,i32,b) - { - // close - causeException(Runtime::Exception::Cause::calledUnimplementedIntrinsic); - } - - DEFINE_INTRINSIC_FUNCTION2(env,___syscall54,___syscall54,i32,i32,a,i32,b) - { - // ioctl - return 0; - } - - DEFINE_INTRINSIC_FUNCTION2(env,___syscall140,___syscall140,i32,i32,a,i32,b) - { - // llseek - causeException(Runtime::Exception::Cause::calledUnimplementedIntrinsic); - } - - DEFINE_INTRINSIC_FUNCTION2(env,___syscall145,___syscall145,i32,i32,file,i32,argsPtr) - { - // readv - causeException(Runtime::Exception::Cause::calledUnimplementedIntrinsic); - } - - DEFINE_INTRINSIC_FUNCTION2(env,___syscall146,___syscall146,i32,i32,file,i32,argsPtr) - { - // writev - U32* args = memoryArrayPtr(emscriptenMemory,argsPtr,3); - U32 iov = args[1]; - U32 iovcnt = args[2]; -#ifdef _WIN32 - U32 count = 0; - for(U32 i = 0; i < iovcnt; i++) - { - U32 base = memoryRef(emscriptenMemory,iov + i * 8); - U32 len = memoryRef(emscriptenMemory,iov + i * 8 + 4); - U32 size = (U32)fwrite(memoryArrayPtr(emscriptenMemory,base,len), 1, len, vmFile(file)); - count += size; - if (size < len) - break; - } -#else - struct iovec *native_iovec = new(alloca(sizeof(iovec)*iovcnt)) struct iovec [iovcnt]; - for(U32 i = 0; i < iovcnt; i++) - { - U32 base = memoryRef(emscriptenMemory,iov + i * 8); - U32 len = memoryRef(emscriptenMemory,iov + i * 8 + 4); - - native_iovec[i].iov_base = memoryArrayPtr(emscriptenMemory,base,len); - native_iovec[i].iov_len = len; - } - Iptr count = writev(fileno(vmFile(file)), native_iovec, iovcnt); -#endif - return count; - } - - DEFINE_INTRINSIC_FUNCTION1(asm2wasm,f64_to_int,f64-to-int,i32,f64,f) { return (I32)f; } - - static F64 zero = 0.0; - - static F64 makeNaN() { return zero / zero; } - static F64 makeInf() { return 1.0/zero; } - - DEFINE_INTRINSIC_GLOBAL(global,NaN,NaN,f64,false,makeNaN()) - DEFINE_INTRINSIC_GLOBAL(global,Infinity,Infinity,f64,false,makeInf()) - - DEFINE_INTRINSIC_FUNCTION2(asm2wasm,i32_remu,i32u-rem,i32,i32,left,i32,right) - { - return (I32)((U32)left % (U32)right); - } - DEFINE_INTRINSIC_FUNCTION2(asm2wasm,i32_rems,i32s-rem,i32,i32,left,i32,right) - { - return left % right; - } - DEFINE_INTRINSIC_FUNCTION2(asm2wasm,i32_divu,i32u-div,i32,i32,left,i32,right) - { - return (I32)((U32)left / (U32)right); - } - DEFINE_INTRINSIC_FUNCTION2(asm2wasm,i32_divs,i32s-div,i32,i32,left,i32,right) - { - return left / right; - } - - EMSCRIPTEN_API void initInstance(const Module& module,ModuleInstance* moduleInstance) - { - // Only initialize the module as an Emscripten module if it uses the emscripten memory by default. - if(getDefaultMemory(moduleInstance) == emscriptenMemory) - { - // Allocate a 5MB stack. - STACKTOP.reset(coerce32bitAddress(sbrk(5*1024*1024))); - STACK_MAX.reset(coerce32bitAddress(sbrk(0))); - - // Allocate some 8 byte memory region for tempDoublePtr. - tempDoublePtr.reset(coerce32bitAddress(sbrk(8))); - - // Setup IO stream handles. - _stderr.reset(coerce32bitAddress(sbrk(sizeof(U32)))); - _stdin.reset(coerce32bitAddress(sbrk(sizeof(U32)))); - _stdout.reset(coerce32bitAddress(sbrk(sizeof(U32)))); - memoryRef(emscriptenMemory,_stderr) = (U32)ioStreamVMHandle::StdErr; - memoryRef(emscriptenMemory,_stdin) = (U32)ioStreamVMHandle::StdIn; - memoryRef(emscriptenMemory,_stdout) = (U32)ioStreamVMHandle::StdOut; - - // Call the establishStackSpace function to set the Emscripten module's internal stack pointers. - FunctionInstance* establishStackSpace = asFunctionNullable(getInstanceExport(moduleInstance,"establishStackSpace")); - if(establishStackSpace && getFunctionType(establishStackSpace) == FunctionType::get(ResultType::none,{ValueType::i32,ValueType::i32})) - { - std::vector parameters = {Runtime::Value(STACKTOP),Runtime::Value(STACK_MAX)}; - Runtime::invokeFunction(establishStackSpace,parameters); - } - - // Call the global initializer functions. - for(Uptr exportIndex = 0;exportIndex < module.exports.size();++exportIndex) - { - const Export& functionExport = module.exports[exportIndex]; - if(functionExport.kind == ObjectKind::function && !strncmp(functionExport.name.c_str(),"__GLOBAL__",10)) - { - FunctionInstance* functionInstance = asFunctionNullable(getInstanceExport(moduleInstance,functionExport.name)); - if(functionInstance) { Runtime::invokeFunction(functionInstance,{}); } - } - } - } - } - - EMSCRIPTEN_API void injectCommandArgs(const std::vector& argStrings,std::vector& outInvokeArgs) - { - U8* emscriptenMemoryBase = getMemoryBaseAddress(emscriptenMemory); - - U32* argvOffsets = (U32*)(emscriptenMemoryBase + sbrk((U32)(sizeof(U32) * (argStrings.size() + 1)))); - for(Uptr argIndex = 0;argIndex < argStrings.size();++argIndex) - { - auto stringSize = strlen(argStrings[argIndex])+1; - auto stringMemory = emscriptenMemoryBase + sbrk((U32)stringSize); - memcpy(stringMemory,argStrings[argIndex],stringSize); - argvOffsets[argIndex] = (U32)(stringMemory - emscriptenMemoryBase); - } - argvOffsets[argStrings.size()] = 0; - outInvokeArgs = {(U32)argStrings.size(), (U32)((U8*)argvOffsets - emscriptenMemoryBase) }; - } -} diff --git a/libraries/wasm-jit/Source/Programs/Assemble.cpp b/libraries/wasm-jit/Source/Programs/Assemble.cpp deleted file mode 100644 index 60ca42cf0f9..00000000000 --- a/libraries/wasm-jit/Source/Programs/Assemble.cpp +++ /dev/null @@ -1,50 +0,0 @@ -#include "Inline/BasicTypes.h" -#include "CLI.h" -#include "WAST/WAST.h" -#include "WASM/WASM.h" - -int commandMain(int argc,char** argv) -{ - if(argc < 3) - { - std::cerr << "Usage: Assemble in.wast out.wasm [switches]" << std::endl; - std::cerr << " -n|--omit-names\t\tOmits WAST function and local names from the output" << std::endl; - return EXIT_FAILURE; - } - const char* inputFilename = argv[1]; - const char* outputFilename = argv[2]; - bool omitNames = false; - if(argc > 3) - { - for(Iptr argumentIndex = 3;argumentIndex < argc;++argumentIndex) - { - if(!strcmp(argv[argumentIndex],"-n") || !strcmp(argv[argumentIndex],"--omit-names")) - { - omitNames = true; - } - else - { - std::cerr << "Unrecognized argument: " << argv[argumentIndex] << std::endl; - return EXIT_FAILURE; - } - } - } - - // Load the WAST module. - IR::Module module; - if(!loadTextModule(inputFilename,module)) { return EXIT_FAILURE; } - - // If the command-line switch to omit names was specified, strip the name section. - if(omitNames) - { - for(auto sectionIt = module.userSections.begin();sectionIt != module.userSections.end();++sectionIt) - { - if(sectionIt->name == "name") { module.userSections.erase(sectionIt); break; } - } - } - - // Write the binary module. - if(!saveBinaryModule(outputFilename,module)) { return EXIT_FAILURE; } - - return EXIT_SUCCESS; -} diff --git a/libraries/wasm-jit/Source/Programs/CLI.h b/libraries/wasm-jit/Source/Programs/CLI.h deleted file mode 100644 index 18dce3b3670..00000000000 --- a/libraries/wasm-jit/Source/Programs/CLI.h +++ /dev/null @@ -1,184 +0,0 @@ -#pragma once - -#include "Inline/BasicTypes.h" -#include "Inline/Floats.h" -#include "Inline/Timing.h" -#include "WAST/WAST.h" -#include "WASM/WASM.h" -#include "IR/Module.h" -#include "IR/Validate.h" -#include "Runtime/Runtime.h" - -#include -#include -#include -#include -#include - -inline std::string loadFile(const char* filename) -{ - Timing::Timer timer; - std::ifstream stream(filename,std::ios::binary | std::ios::ate); - if(!stream.is_open()) - { - std::cerr << "Failed to open " << filename << ": " << std::strerror(errno) << std::endl; - return std::string(); - } - std::string data; - data.resize((unsigned int)stream.tellg()); - stream.seekg(0); - stream.read(const_cast(data.data()),data.size()); - stream.close(); - Timing::logRatePerSecond("loaded file",timer,data.size() / 1024.0 / 1024.0,"MB"); - return data; -} - -inline bool loadTextModule(const char* filename,const std::string& wastString,IR::Module& outModule) -{ - std::vector parseErrors; - WAST::parseModule(wastString.c_str(),wastString.size(),outModule,parseErrors); - if(!parseErrors.size()) { return true; } - else - { - // Print any parse errors; - std::cerr << "Error parsing WebAssembly text file:" << std::endl; - for(auto& error : parseErrors) - { - std::cerr << filename << ":" << error.locus.describe() << ": " << error.message.c_str() << std::endl; - std::cerr << error.locus.sourceLine << std::endl; - std::cerr << std::setw(error.locus.column(8)) << "^" << std::endl; - } - return false; - } -} - -inline bool loadTextModule(const char* filename,IR::Module& outModule) -{ - // Read the file into a string. - auto wastBytes = loadFile(filename); - if(!wastBytes.size()) { return false; } - const std::string wastString = std::move(wastBytes); - - return loadTextModule(filename,wastString,outModule); -} - -inline bool loadBinaryModule(const std::string& wasmBytes,IR::Module& outModule) -{ - Timing::Timer loadTimer; - - // Load the module from a binary WebAssembly file. - try - { - Serialization::MemoryInputStream stream((const U8*)wasmBytes.data(),wasmBytes.size()); - WASM::serialize(stream,outModule); - } - catch(Serialization::FatalSerializationException exception) - { - std::cerr << "Error deserializing WebAssembly binary file:" << std::endl; - std::cerr << exception.message << std::endl; - return false; - } - catch(IR::ValidationException exception) - { - std::cerr << "Error validating WebAssembly binary file:" << std::endl; - std::cerr << exception.message << std::endl; - return false; - } - catch(std::bad_alloc) - { - std::cerr << "Memory allocation failed: input is likely malformed" << std::endl; - return false; - } - - Timing::logRatePerSecond("Loaded WASM",loadTimer,wasmBytes.size()/1024.0/1024.0,"MB"); - return true; -} - -inline bool loadBinaryModule(const char* wasmFilename,IR::Module& outModule) -{ - // Read in packed .wasm file bytes. - auto wasmBytes = loadFile(wasmFilename); - if(!wasmBytes.size()) { return false; } - - return loadBinaryModule(wasmBytes,outModule); -} - -inline bool loadModule(const char* filename,IR::Module& outModule) -{ - // Read the specified file into an array. - auto fileBytes = loadFile(filename); - if(!fileBytes.size()) { return false; } - - // If the file starts with the WASM binary magic number, load it as a binary module. - if(*(U32*)fileBytes.data() == 0x6d736100) { return loadBinaryModule(fileBytes,outModule); } - else - { - // Otherwise, load it as a text module. - auto wastString = std::move(fileBytes); - return loadTextModule(filename,wastString,outModule); - } -} - -inline bool saveBinaryModule(const char* wasmFilename,const IR::Module& module) -{ - Timing::Timer saveTimer; - - std::vector wasmBytes; - try - { - // Serialize the WebAssembly module. - Serialization::ArrayOutputStream stream; - WASM::serialize(stream,module); - wasmBytes = stream.getBytes(); - } - catch(Serialization::FatalSerializationException exception) - { - std::cerr << "Error serializing WebAssembly binary file:" << std::endl; - std::cerr << exception.message << std::endl; - return false; - } - - Timing::logRatePerSecond("Saved WASM",saveTimer,wasmBytes.size()/1024.0/1024.0,"MB"); - - // Write the serialized data to the output file. - std::ofstream outputStream(wasmFilename,std::ios::binary); - outputStream.write((char*)wasmBytes.data(),wasmBytes.size()); - outputStream.close(); - - return true; -} - -inline bool endsWith(const char *str, const char *suffix) -{ - if(!str || !suffix) { return false; } - Uptr lenstr = strlen(str); - Uptr lensuffix = strlen(suffix); - if(lenstr < lensuffix) { return false; } - return (strncmp(str+lenstr-lensuffix, suffix, lensuffix) == 0); -} - -int commandMain(int argc,char** argv); - -int main(int argc,char** argv) -{ - try - { - return commandMain(argc,argv); - } - catch(IR::ValidationException exception) - { - std::cerr << "Failed to validate module: " << exception.message << std::endl; - return EXIT_FAILURE; - } - catch(Runtime::Exception exception) - { - std::cerr << "Runtime exception: " << describeExceptionCause(exception.cause) << std::endl; - for(auto calledFunction : exception.callStack) { std::cerr << " " << calledFunction << std::endl; } - return EXIT_FAILURE; - } - catch(Serialization::FatalSerializationException exception) - { - std::cerr << "Fatal serialization exception: " << exception.message << std::endl; - return EXIT_FAILURE; - } -} \ No newline at end of file diff --git a/libraries/wasm-jit/Source/Programs/CMakeLists.txt b/libraries/wasm-jit/Source/Programs/CMakeLists.txt deleted file mode 100644 index 260f4c1092c..00000000000 --- a/libraries/wasm-jit/Source/Programs/CMakeLists.txt +++ /dev/null @@ -1,15 +0,0 @@ -add_executable(Assemble Assemble.cpp CLI.h) -target_link_libraries(Assemble Logging IR WAST WASM) -set_target_properties(Assemble PROPERTIES FOLDER Programs) - -add_executable(Disassemble Disassemble.cpp CLI.h) -target_link_libraries(Disassemble Logging IR WAST WASM) -set_target_properties(Disassemble PROPERTIES FOLDER Programs) - -add_executable(Test Test.cpp CLI.h) -target_link_libraries(Test Logging IR WAST Runtime) -set_target_properties(Test PROPERTIES FOLDER Programs) - -add_executable(wavm wavm.cpp CLI.h) -target_link_libraries(wavm Logging IR WAST WASM Runtime Emscripten) -set_target_properties(wavm PROPERTIES FOLDER Programs) diff --git a/libraries/wasm-jit/Source/Programs/Disassemble.cpp b/libraries/wasm-jit/Source/Programs/Disassemble.cpp deleted file mode 100644 index 16c6af5cfe8..00000000000 --- a/libraries/wasm-jit/Source/Programs/Disassemble.cpp +++ /dev/null @@ -1,29 +0,0 @@ -#include "Inline/BasicTypes.h" -#include "CLI.h" -#include "WAST/WAST.h" -#include "WASM/WASM.h" - -int commandMain(int argc,char** argv) -{ - if(argc != 3) - { - std::cerr << "Usage: Disassemble in.wasm out.wast" << std::endl; - return EXIT_FAILURE; - } - const char* inputFilename = argv[1]; - const char* outputFilename = argv[2]; - - // Load the WASM file. - IR::Module module; - if(!loadBinaryModule(inputFilename,module)) { return EXIT_FAILURE; } - - // Print the module to WAST. - const std::string wastString = WAST::print(module); - - // Write the serialized data to the output file. - std::ofstream outputStream(outputFilename); - outputStream.write(wastString.data(),wastString.size()); - outputStream.close(); - - return EXIT_SUCCESS; -} diff --git a/libraries/wasm-jit/Source/Programs/Test.cpp b/libraries/wasm-jit/Source/Programs/Test.cpp deleted file mode 100644 index d6c4f49ac8f..00000000000 --- a/libraries/wasm-jit/Source/Programs/Test.cpp +++ /dev/null @@ -1,379 +0,0 @@ -#include "Inline/BasicTypes.h" -#include "Inline/Serialization.h" -#include "Platform/Platform.h" -#include "WAST/WAST.h" -#include "WAST/TestScript.h" -#include "WASM/WASM.h" -#include "Runtime/Runtime.h" -#include "Runtime/Linker.h" -#include "Runtime/Intrinsics.h" - -#include "CLI.h" - -#include -#include -#include -#include - -using namespace WAST; -using namespace IR; -using namespace Runtime; - -struct TestScriptState -{ - bool hasInstantiatedModule; - ModuleInstance* lastModuleInstance; - - std::map moduleInternalNameToInstanceMap; - std::map moduleNameToInstanceMap; - - std::vector errors; - - TestScriptState() : hasInstantiatedModule(false), lastModuleInstance(nullptr) {} -}; - -struct TestScriptResolver : Resolver -{ - TestScriptResolver(const TestScriptState& inState): state(inState) {} - bool resolve(const std::string& moduleName,const std::string& exportName,ObjectType type,ObjectInstance*& outObject) override - { - // Try to resolve an intrinsic first. - if(IntrinsicResolver::singleton.resolve(moduleName,exportName,type,outObject)) { return true; } - - // Then look for a named module. - auto mapIt = state.moduleNameToInstanceMap.find(moduleName); - if(mapIt != state.moduleNameToInstanceMap.end()) - { - outObject = getInstanceExport(mapIt->second,exportName); - return outObject != nullptr && isA(outObject,type); - } - - return false; - } -private: - const TestScriptState& state; -}; - -void testErrorf(TestScriptState& state,const TextFileLocus& locus,const char* messageFormat,...) -{ - va_list messageArguments; - va_start(messageArguments,messageFormat); - char messageBuffer[1024]; - int numPrintedChars = std::vsnprintf(messageBuffer,sizeof(messageBuffer),messageFormat,messageArguments); - if(numPrintedChars >= 1023 || numPrintedChars < 0) { Errors::unreachable(); } - messageBuffer[numPrintedChars] = 0; - va_end(messageArguments); - - state.errors.push_back({locus,messageBuffer}); -} - -void collectGarbage(TestScriptState& state) -{ - std::vector rootObjects; - rootObjects.push_back(asObject(state.lastModuleInstance)); - for(auto& mapIt : state.moduleInternalNameToInstanceMap) { rootObjects.push_back(asObject(mapIt.second)); } - for(auto& mapIt : state.moduleNameToInstanceMap) { rootObjects.push_back(asObject(mapIt.second)); } - freeUnreferencedObjects(std::move(rootObjects)); -} - -ModuleInstance* getModuleContextByInternalName(TestScriptState& state,const TextFileLocus& locus,const char* context,const std::string& internalName) -{ - // Look up the module this invoke uses. - if(!state.hasInstantiatedModule) { testErrorf(state,locus,"no module to use in %s",context); return nullptr; } - ModuleInstance* moduleInstance = state.lastModuleInstance; - if(internalName.size()) - { - auto mapIt = state.moduleInternalNameToInstanceMap.find(internalName); - if(mapIt == state.moduleInternalNameToInstanceMap.end()) - { - testErrorf(state,locus,"unknown %s module name: %s",context,internalName.c_str()); - return nullptr; - } - moduleInstance = mapIt->second; - } - return moduleInstance; -} - -bool processAction(TestScriptState& state,Action* action,Result& outResult) -{ - outResult = Result(); - - switch(action->type) - { - case ActionType::_module: - { - auto moduleAction = (ModuleAction*)action; - - // Clear the previous module. - state.lastModuleInstance = nullptr; - collectGarbage(state); - - // Link and instantiate the module. - TestScriptResolver resolver(state); - LinkResult linkResult = linkModule(*moduleAction->module,resolver); - if(linkResult.success) - { - state.hasInstantiatedModule = true; - state.lastModuleInstance = instantiateModule(*moduleAction->module,std::move(linkResult.resolvedImports)); - } - else - { - // Create an error for each import that couldn't be linked. - for(auto& missingImport : linkResult.missingImports) - { - testErrorf( - state, - moduleAction->locus, - "missing import module=\"%s\" export=\"%s\" type=\"%s\"", - missingImport.moduleName.c_str(), - missingImport.exportName.c_str(), - asString(missingImport.type).c_str() - ); - } - } - - // Register the module under its internal name. - if(moduleAction->internalModuleName.size()) - { - state.moduleInternalNameToInstanceMap[moduleAction->internalModuleName] = state.lastModuleInstance; - } - - return true; - } - case ActionType::invoke: - { - auto invokeAction = (InvokeAction*)action; - - // Look up the module this invoke uses. - ModuleInstance* moduleInstance = getModuleContextByInternalName(state,invokeAction->locus,"invoke",invokeAction->internalModuleName); - - // A null module instance at this point indicates a module that failed to link or instantiate, so don't produce further errors. - if(!moduleInstance) { return false; } - - // Find the named export in the module instance. - auto functionInstance = asFunctionNullable(getInstanceExport(moduleInstance,invokeAction->exportName)); - if(!functionInstance) { testErrorf(state,invokeAction->locus,"couldn't find exported function with name: %s",invokeAction->exportName.c_str()); return false; } - - // Execute the invoke - outResult = invokeFunction(functionInstance,invokeAction->arguments); - - return true; - } - case ActionType::get: - { - auto getAction = (GetAction*)action; - - // Look up the module this get uses. - ModuleInstance* moduleInstance = getModuleContextByInternalName(state,getAction->locus,"get",getAction->internalModuleName); - - // A null module instance at this point indicates a module that failed to link or instantiate, so just return without further errors. - if(!moduleInstance) { return false; } - - // Find the named export in the module instance. - auto globalInstance = asGlobalNullable(getInstanceExport(moduleInstance,getAction->exportName)); - if(!globalInstance) { testErrorf(state,getAction->locus,"couldn't find exported global with name: %s",getAction->exportName.c_str()); return false; } - - // Get the value of the specified global. - outResult = getGlobalValue(globalInstance); - - return true; - } - default: - Errors::unreachable(); - } -} - -// Tests whether a float is a "canonical" NaN, which just means that it's a NaN only the MSB of its significand set. -template bool isCanonicalOrArithmeticNaN(Float value,bool requireCanonical) -{ - Floats::FloatComponents components; - components.value = value; - return components.bits.exponent == Floats::FloatComponents::maxExponentBits - && (!requireCanonical || components.bits.significand == Floats::FloatComponents::canonicalSignificand); -} - -void processCommand(TestScriptState& state,const Command* command) -{ - try - { - switch(command->type) - { - case Command::_register: - { - auto registerCommand = (RegisterCommand*)command; - - // Look up a module by internal name, and bind the result to the public name. - ModuleInstance* moduleInstance = getModuleContextByInternalName(state,registerCommand->locus,"register",registerCommand->internalModuleName); - state.moduleNameToInstanceMap[registerCommand->moduleName] = moduleInstance; - break; - } - case Command::action: - { - Result result; - processAction(state,((ActionCommand*)command)->action.get(),result); - break; - } - case Command::assert_return: - { - auto assertCommand = (AssertReturnCommand*)command; - // Execute the action and do a bitwise comparison of the result to the expected result. - Result actionResult; - if(processAction(state,assertCommand->action.get(),actionResult) - && !areBitsEqual(actionResult,assertCommand->expectedReturn)) - { - testErrorf(state,assertCommand->locus,"expected %s but got %s", - asString(assertCommand->expectedReturn).c_str(), - asString(actionResult).c_str()); - } - break; - } - case Command::assert_return_canonical_nan: case Command::assert_return_arithmetic_nan: - { - auto assertCommand = (AssertReturnNaNCommand*)command; - // Execute the action and check that the result is a NaN of the expected type. - Result actionResult; - if(processAction(state,assertCommand->action.get(),actionResult)) - { - const bool requireCanonicalNaN = assertCommand->type == Command::assert_return_canonical_nan; - const bool isError = - actionResult.type == ResultType::f32 ? !isCanonicalOrArithmeticNaN(actionResult.f32,requireCanonicalNaN) - : actionResult.type == ResultType::f64 ? !isCanonicalOrArithmeticNaN(actionResult.f64,requireCanonicalNaN) - : true; - if(isError) - { - testErrorf(state,assertCommand->locus, - requireCanonicalNaN ? "expected canonical float NaN but got %s" : "expected float NaN but got %s", - asString(actionResult).c_str()); - } - } - break; - } - case Command::assert_trap: - { - auto assertCommand = (AssertTrapCommand*)command; - try - { - Result actionResult; - if(processAction(state,assertCommand->action.get(),actionResult)) - { - testErrorf(state,assertCommand->locus,"expected trap but got %s",asString(actionResult).c_str()); - } - } - catch(Runtime::Exception exception) - { - if(exception.cause != assertCommand->expectedCause) - { - testErrorf(state,assertCommand->action->locus,"expected %s trap but got %s trap", - describeExceptionCause(assertCommand->expectedCause), - describeExceptionCause(exception.cause)); - } - } - break; - } - case Command::assert_invalid: case Command::assert_malformed: - { - auto assertCommand = (AssertInvalidOrMalformedCommand*)command; - if(!assertCommand->wasInvalidOrMalformed) - { - testErrorf(state,assertCommand->locus,"module was %s", - assertCommand->type == Command::assert_invalid ? "valid" : "well formed"); - } - break; - } - case Command::assert_unlinkable: - { - auto assertCommand = (AssertUnlinkableCommand*)command; - Result result; - try - { - TestScriptResolver resolver(state); - LinkResult linkResult = linkModule(*assertCommand->moduleAction->module,resolver); - if(linkResult.success) - { - instantiateModule(*assertCommand->moduleAction->module,std::move(linkResult.resolvedImports)); - testErrorf(state,assertCommand->locus,"module was linkable"); - } - } - catch(Runtime::Exception) - { - // If the instantiation throws an exception, the assert_unlinkable succeeds. - } - break; - } - }; - } - catch(Runtime::Exception exception) - { - testErrorf(state,command->locus,"unexpected trap: %s",describeExceptionCause(exception.cause)); - } -} - -DEFINE_INTRINSIC_FUNCTION0(spectest,spectest_print,print,none) {} -DEFINE_INTRINSIC_FUNCTION1(spectest,spectest_print,print,none,i32,a) { std::cout << a << " : i32" << std::endl; } -DEFINE_INTRINSIC_FUNCTION1(spectest,spectest_print,print,none,i64,a) { std::cout << a << " : i64" << std::endl; } -DEFINE_INTRINSIC_FUNCTION1(spectest,spectest_print,print,none,f32,a) { std::cout << a << " : f32" << std::endl; } -DEFINE_INTRINSIC_FUNCTION1(spectest,spectest_print,print,none,f64,a) { std::cout << a << " : f64" << std::endl; } -DEFINE_INTRINSIC_FUNCTION2(spectest,spectest_print,print,none,f64,a,f64,b) { std::cout << a << " : f64" << std::endl << b << " : f64" << std::endl; } -DEFINE_INTRINSIC_FUNCTION2(spectest,spectest_print,print,none,i32,a,f32,b) { std::cout << a << " : i32" << std::endl << b << " : f32" << std::endl; } -DEFINE_INTRINSIC_FUNCTION2(spectest,spectest_print,print,none,i64,a,f64,b) { std::cout << a << " : i64" << std::endl << b << " : f64" << std::endl; } - -DEFINE_INTRINSIC_GLOBAL(spectest,spectest_globalI32,global,i32,false,666) -DEFINE_INTRINSIC_GLOBAL(spectest,spectest_globalI64,global,i64,false,0) -DEFINE_INTRINSIC_GLOBAL(spectest,spectest_globalF32,global,f32,false,0.0f) -DEFINE_INTRINSIC_GLOBAL(spectest,spectest_globalF64,global,f64,false,0.0) - -DEFINE_INTRINSIC_TABLE(spectest,spectest_table,table,TableType(TableElementType::anyfunc,false,SizeConstraints {10,20})) -DEFINE_INTRINSIC_MEMORY(spectest,spectest_memory,memory,MemoryType(false,SizeConstraints {1,2})) - -int commandMain(int argc,char** argv) -{ - if(argc != 2) - { - std::cerr << "Usage: Test in.wast" << std::endl; - return EXIT_FAILURE; - } - const char* filename = argv[1]; - - // Always enable debug logging for tests. - Log::setCategoryEnabled(Log::Category::debug,true); - - Runtime::init(); - - // Read the file into a string. - const std::string testScriptString = loadFile(filename); - if(!testScriptString.size()) { return EXIT_FAILURE; } - - // Process the test script. - TestScriptState testScriptState; - std::vector> testCommands; - - // Parse the test script. - WAST::parseTestCommands(testScriptString.c_str(),testScriptString.size(),testCommands,testScriptState.errors); - if(!testScriptState.errors.size()) - { - // Process the test script commands. - for(auto& command : testCommands) - { - processCommand(testScriptState,command.get()); - } - } - - if(testScriptState.errors.size()) - { - // Print any errors; - for(auto& error : testScriptState.errors) - { - std::cerr << filename << ":" << error.locus.describe() << ": " << error.message.c_str() << std::endl; - std::cerr << error.locus.sourceLine << std::endl; - std::cerr << std::setw(error.locus.column(8)) << "^" << std::endl; - } - - std::cerr << filename << ": testing failed!" << std::endl; - return EXIT_FAILURE; - } - else - { - std::cout << filename << ": all tests passed." << std::endl; - return EXIT_SUCCESS; - } -} diff --git a/libraries/wasm-jit/Source/Programs/wavm.cpp b/libraries/wasm-jit/Source/Programs/wavm.cpp deleted file mode 100644 index a122f0a328f..00000000000 --- a/libraries/wasm-jit/Source/Programs/wavm.cpp +++ /dev/null @@ -1,256 +0,0 @@ -#include "Inline/BasicTypes.h" -#include "Inline/Timing.h" -#include "Platform/Platform.h" -#include "WAST/WAST.h" -#include "Runtime/Runtime.h" -#include "Runtime/Linker.h" -#include "Runtime/Intrinsics.h" -#include "Emscripten/Emscripten.h" -#include "IR/Module.h" -#include "IR/Operators.h" -#include "IR/Validate.h" - -#include "CLI.h" - -#include - -using namespace IR; -using namespace Runtime; - -void showHelp() -{ - std::cerr << "Usage: wavm [switches] [programfile] [--] [arguments]" << std::endl; - std::cerr << " in.wast|in.wasm\t\tSpecify program file (.wast/.wasm)" << std::endl; - std::cerr << " -f|--function name\t\tSpecify function name to run in module rather than main" << std::endl; - std::cerr << " -c|--check\t\t\tExit after checking that the program is valid" << std::endl; - std::cerr << " -d|--debug\t\t\tWrite additional debug information to stdout" << std::endl; - std::cerr << " --\t\t\t\tStop parsing arguments" << std::endl; -} - -struct RootResolver : Resolver -{ - std::map moduleNameToResolverMap; - - bool resolve(const std::string& moduleName,const std::string& exportName,ObjectType type,ObjectInstance*& outObject) override - { - // Try to resolve an intrinsic first. - if(IntrinsicResolver::singleton.resolve(moduleName,exportName,type,outObject)) { return true; } - - // Then look for a named module. - auto namedResolverIt = moduleNameToResolverMap.find(moduleName); - if(namedResolverIt != moduleNameToResolverMap.end()) - { - return namedResolverIt->second->resolve(moduleName,exportName,type,outObject); - } - - // Finally, stub in missing function imports. - if(type.kind == ObjectKind::function) - { - // Generate a function body that just uses the unreachable op to fault if called. - Serialization::ArrayOutputStream codeStream; - OperatorEncoderStream encoder(codeStream); - encoder.unreachable(); - encoder.end(); - - // Generate a module for the stub function. - Module stubModule; - DisassemblyNames stubModuleNames; - stubModule.types.push_back(asFunctionType(type)); - stubModule.functions.defs.push_back({{0},{},std::move(codeStream.getBytes()),{}}); - stubModule.exports.push_back({"importStub",ObjectKind::function,0}); - stubModuleNames.functions.push_back({std::string(moduleName) + "." + exportName,{}}); - IR::setDisassemblyNames(stubModule,stubModuleNames); - IR::validateDefinitions(stubModule); - - // Instantiate the module and return the stub function instance. - auto stubModuleInstance = instantiateModule(stubModule,{}); - outObject = getInstanceExport(stubModuleInstance,"importStub"); - Log::printf(Log::Category::error,"Generated stub for missing function import %s.%s : %s\n",moduleName.c_str(),exportName.c_str(),asString(type).c_str()); - return true; - } - else if(type.kind == ObjectKind::memory) - { - outObject = asObject(Runtime::createMemory(asMemoryType(type))); - Log::printf(Log::Category::error,"Generated stub for missing memory import %s.%s : %s\n",moduleName.c_str(),exportName.c_str(),asString(type).c_str()); - return true; - } - else if(type.kind == ObjectKind::table) - { - outObject = asObject(Runtime::createTable(asTableType(type))); - Log::printf(Log::Category::error,"Generated stub for missing table import %s.%s : %s\n",moduleName.c_str(),exportName.c_str(),asString(type).c_str()); - return true; - } - else if(type.kind == ObjectKind::global) - { - outObject = asObject(Runtime::createGlobal(asGlobalType(type),Runtime::Value(asGlobalType(type).valueType,Runtime::UntaggedValue()))); - Log::printf(Log::Category::error,"Generated stub for missing global import %s.%s : %s\n",moduleName.c_str(),exportName.c_str(),asString(type).c_str()); - return true; - } - - return false; - } -}; - -int mainBody(const char* filename,const char* functionName,bool onlyCheck,char** args) -{ - Module module; - if(filename) - { - if(!loadModule(filename,module)) { return EXIT_FAILURE; } - } - else - { - showHelp(); - return EXIT_FAILURE; - } - - if(onlyCheck) { return EXIT_SUCCESS; } - - // Link and instantiate the module. - RootResolver rootResolver; - LinkResult linkResult = linkModule(module,rootResolver); - if(!linkResult.success) - { - std::cerr << "Failed to link module:" << std::endl; - for(auto& missingImport : linkResult.missingImports) - { - std::cerr << "Missing import: module=\"" << missingImport.moduleName - << "\" export=\"" << missingImport.exportName - << "\" type=\"" << asString(missingImport.type) << "\"" << std::endl; - } - return EXIT_FAILURE; - } - ModuleInstance* moduleInstance = instantiateModule(module,std::move(linkResult.resolvedImports)); - if(!moduleInstance) { return EXIT_FAILURE; } - Emscripten::initInstance(module,moduleInstance); - - // Look up the function export to call. - FunctionInstance* functionInstance; - if(!functionName) - { - functionInstance = asFunctionNullable(getInstanceExport(moduleInstance,"main")); - if(!functionInstance) { functionInstance = asFunctionNullable(getInstanceExport(moduleInstance,"_main")); } - if(!functionInstance) - { - std::cerr << "Module does not export main function" << std::endl; - return EXIT_FAILURE; - } - } - else - { - functionInstance = asFunctionNullable(getInstanceExport(moduleInstance,functionName)); - if(!functionInstance) - { - std::cerr << "Module does not export '" << functionName << "'" << std::endl; - return EXIT_FAILURE; - } - } - const FunctionType* functionType = getFunctionType(functionInstance); - - // Set up the arguments for the invoke. - std::vector invokeArgs; - if(!functionName) - { - if(functionType->parameters.size() == 2) - { - MemoryInstance* defaultMemory = Runtime::getDefaultMemory(moduleInstance); - if(!defaultMemory) - { - std::cerr << "Module does not declare a default memory object to put arguments in." << std::endl; - return EXIT_FAILURE; - } - - std::vector argStrings; - argStrings.push_back(filename); - while(*args) { argStrings.push_back(*args++); }; - - Emscripten::injectCommandArgs(argStrings,invokeArgs); - } - else if(functionType->parameters.size() > 0) - { - std::cerr << "WebAssembly function requires " << functionType->parameters.size() << " argument(s), but only 0 or 2 can be passed!" << std::endl; - return EXIT_FAILURE; - } - } - else - { - for(U32 i = 0; args[i]; ++i) - { - Value value; - switch(functionType->parameters[i]) - { - case ValueType::i32: value = (U32)atoi(args[i]); break; - case ValueType::i64: value = (U64)atol(args[i]); break; - case ValueType::f32: value = (F32)atof(args[i]); break; - case ValueType::f64: value = atof(args[i]); break; - default: Errors::unreachable(); - } - invokeArgs.push_back(value); - } - } - - // Invoke the function. - Timing::Timer executionTimer; - auto functionResult = invokeFunction(functionInstance,invokeArgs); - Timing::logTimer("Invoked function",executionTimer); - - if(functionName) - { - Log::printf(Log::Category::debug,"%s returned: %s\n",functionName,asString(functionResult).c_str()); - return EXIT_SUCCESS; - } - else if(functionResult.type == ResultType::i32) { return functionResult.i32; } - else { return EXIT_SUCCESS; } -} - -int commandMain(int argc,char** argv) -{ - const char* filename = nullptr; - const char* functionName = nullptr; - - bool onlyCheck = false; - auto args = argv; - while(*++args) - { - if(!strcmp(*args, "--function") || !strcmp(*args, "-f")) - { - if(!*++args) { showHelp(); return EXIT_FAILURE; } - functionName = *args; - } - else if(!strcmp(*args, "--check") || !strcmp(*args, "-c")) - { - onlyCheck = true; - } - else if(!strcmp(*args, "--debug") || !strcmp(*args, "-d")) - { - Log::setCategoryEnabled(Log::Category::debug,true); - } - else if(!strcmp(*args, "--")) - { - ++args; - break; - } - else if(!strcmp(*args, "--help") || !strcmp(*args, "-h")) - { - showHelp(); - return EXIT_SUCCESS; - } - else if(!filename) - { - filename = *args; - } - else { break; } - } - - Runtime::init(); - - int returnCode = EXIT_FAILURE; - #ifdef __AFL_LOOP - while(__AFL_LOOP(2000)) - #endif - { - returnCode = mainBody(filename,functionName,onlyCheck,args); - Runtime::freeUnreferencedObjects({}); - } - return returnCode; -} diff --git a/libraries/wasm-jit/Source/Runtime/CMakeLists.txt b/libraries/wasm-jit/Source/Runtime/CMakeLists.txt index 34a14336a86..907dbf37871 100644 --- a/libraries/wasm-jit/Source/Runtime/CMakeLists.txt +++ b/libraries/wasm-jit/Source/Runtime/CMakeLists.txt @@ -1,18 +1,12 @@ set(Sources Intrinsics.cpp Linker.cpp - LLVMEmitIR.cpp - LLVMJIT.cpp LLVMJIT.h - Memory.cpp - ModuleInstance.cpp ObjectGC.cpp - Runtime.cpp RuntimePrivate.h - Table.cpp - Threads.cpp WAVMIntrinsics.cpp ) + set(PublicHeaders ${WAVM_INCLUDE_DIR}/Runtime/Intrinsics.h ${WAVM_INCLUDE_DIR}/Runtime/Linker.h @@ -22,20 +16,12 @@ include_directories(${WAVM_INCLUDE_DIR}/Runtime) add_library(Runtime STATIC ${Sources} ${PublicHeaders}) -# Find an installed build of LLVM -find_package(LLVM 4.0 REQUIRED CONFIG) - -# Include the LLVM headers -include_directories(${LLVM_INCLUDE_DIRS}) -add_definitions(${LLVM_DEFINITIONS}) - add_definitions(-DRUNTIME_API=DLL_EXPORT) target_include_directories( Runtime PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../../../chain/include ) # Link against the LLVM libraries -llvm_map_components_to_libnames(LLVM_LIBS support core passes mcjit native DebugInfoDWARF) -target_link_libraries(Runtime Platform Logging IR ${LLVM_LIBS}) +target_link_libraries(Runtime Platform Logging IR) install(TARGETS Runtime LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR} diff --git a/libraries/wasm-jit/Source/Runtime/Intrinsics.cpp b/libraries/wasm-jit/Source/Runtime/Intrinsics.cpp index f62fb477adb..11f5dc3a3f1 100644 --- a/libraries/wasm-jit/Source/Runtime/Intrinsics.cpp +++ b/libraries/wasm-jit/Source/Runtime/Intrinsics.cpp @@ -11,9 +11,6 @@ namespace Intrinsics struct Singleton { std::map functionMap; - std::map variableMap; - std::map memoryMap; - std::map tableMap; Platform::Mutex* mutex; Singleton(): mutex(Platform::createMutex()) {} @@ -51,71 +48,6 @@ namespace Intrinsics delete function; } - Global::Global(const char* inName,IR::GlobalType inType) - : name(inName) - , globalType(inType) - { - global = Runtime::createGlobal(inType,Runtime::Value((I64)0)); - value = &global->value; - { - Platform::Lock lock(Singleton::get().mutex); - Singleton::get().variableMap[getDecoratedName(inName,inType)] = this; - } - } - - Global::~Global() - { - { - Platform::Lock Lock(Singleton::get().mutex); - Singleton::get().variableMap.erase(Singleton::get().variableMap.find(getDecoratedName(name,global->type))); - } - delete global; - } - - void Global::reset() - { - global = Runtime::createGlobal(globalType,Runtime::Value((I64)0)); - value = &global->value; - } - - Table::Table(const char* inName,const IR::TableType& type) - : name(inName) - , table(Runtime::createTable(type)) - { - if(!table) { Errors::fatal("failed to create intrinsic table"); } - - Platform::Lock lock(Singleton::get().mutex); - Singleton::get().tableMap[getDecoratedName(inName,type)] = this; - } - - Table::~Table() - { - { - Platform::Lock Lock(Singleton::get().mutex); - Singleton::get().tableMap.erase(Singleton::get().tableMap.find(getDecoratedName(name,table->type))); - } - delete table; - } - - Memory::Memory(const char* inName,const IR::MemoryType& type) - : name(inName) - , memory(Runtime::createMemory(type)) - { - if(!memory) { Errors::fatal("failed to create intrinsic memory"); } - - Platform::Lock lock(Singleton::get().mutex); - Singleton::get().memoryMap[getDecoratedName(inName,type)] = this; - } - - Memory::~Memory() - { - { - Platform::Lock Lock(Singleton::get().mutex); - Singleton::get().memoryMap.erase(Singleton::get().memoryMap.find(getDecoratedName(name,memory->type))); - } - delete memory; - } - Runtime::ObjectInstance* find(const std::string& name,const IR::ObjectType& type) { std::string decoratedName = getDecoratedName(name,type); @@ -131,20 +63,17 @@ namespace Intrinsics } case IR::ObjectKind::table: { - auto keyValue = Singleton::get().tableMap.find(decoratedName); - result = keyValue == Singleton::get().tableMap.end() ? nullptr : asObject((Runtime::TableInstance*)*keyValue->second); + result = nullptr; break; } case IR::ObjectKind::memory: { - auto keyValue = Singleton::get().memoryMap.find(decoratedName); - result = keyValue == Singleton::get().memoryMap.end() ? nullptr : asObject((Runtime::MemoryInstance*)*keyValue->second); + result = nullptr; break; } case IR::ObjectKind::global: { - auto keyValue = Singleton::get().variableMap.find(decoratedName); - result = keyValue == Singleton::get().variableMap.end() ? nullptr : asObject(keyValue->second->global); + result = nullptr; break; } default: Errors::unreachable(); @@ -158,9 +87,6 @@ namespace Intrinsics Platform::Lock lock(Singleton::get().mutex); std::vector result; for(auto mapIt : Singleton::get().functionMap) { result.push_back(mapIt.second->function); } - for(auto mapIt : Singleton::get().tableMap) { result.push_back((Runtime::TableInstance*)*mapIt.second); } - for(auto mapIt : Singleton::get().memoryMap) { result.push_back((Runtime::MemoryInstance*)*mapIt.second); } - for(auto mapIt : Singleton::get().variableMap) { result.push_back(mapIt.second->global); } return result; } } diff --git a/libraries/wasm-jit/Source/Runtime/LLVMJIT.cpp b/libraries/wasm-jit/Source/Runtime/LLVMJIT.cpp deleted file mode 100644 index 18cf2f4cfb1..00000000000 --- a/libraries/wasm-jit/Source/Runtime/LLVMJIT.cpp +++ /dev/null @@ -1,787 +0,0 @@ -#include "LLVMJIT.h" -#include "Inline/BasicTypes.h" -#include "Inline/Timing.h" -#include "Logging/Logging.h" -#include "RuntimePrivate.h" -#include "IR/Validate.h" - -#ifdef _DEBUG - // This needs to be 1 to allow debuggers such as Visual Studio to place breakpoints and step through the JITed code. - #define USE_WRITEABLE_JIT_CODE_PAGES 1 - - #define DUMP_UNOPTIMIZED_MODULE 1 - #define VERIFY_MODULE 1 - #define DUMP_OPTIMIZED_MODULE 1 - #define PRINT_DISASSEMBLY 0 -#else - #define USE_WRITEABLE_JIT_CODE_PAGES 0 - #define DUMP_UNOPTIMIZED_MODULE 0 - #define VERIFY_MODULE 0 - #define DUMP_OPTIMIZED_MODULE 0 - #define PRINT_DISASSEMBLY 0 -#endif - -#if PRINT_DISASSEMBLY -#include "llvm-c/Disassembler.h" -#endif - -namespace LLVMJIT -{ - llvm::LLVMContext context; - llvm::TargetMachine* targetMachine = nullptr; - llvm::Type* llvmResultTypes[(Uptr)ResultType::num]; - - llvm::Type* llvmI8Type; - llvm::Type* llvmI16Type; - llvm::Type* llvmI32Type; - llvm::Type* llvmI64Type; - llvm::Type* llvmF32Type; - llvm::Type* llvmF64Type; - llvm::Type* llvmVoidType; - llvm::Type* llvmBoolType; - llvm::Type* llvmI8PtrType; - - #if ENABLE_SIMD_PROTOTYPE - llvm::Type* llvmI8x16Type; - llvm::Type* llvmI16x8Type; - llvm::Type* llvmI32x4Type; - llvm::Type* llvmI64x2Type; - llvm::Type* llvmF32x4Type; - llvm::Type* llvmF64x2Type; - #endif - - llvm::Constant* typedZeroConstants[(Uptr)ValueType::num]; - - // A map from address to loaded JIT symbols. - Platform::Mutex* addressToSymbolMapMutex = Platform::createMutex(); - std::map addressToSymbolMap; - - // A map from function types to function indices in the invoke thunk unit. - std::map invokeThunkTypeToSymbolMap; - - // Information about a JIT symbol, used to map instruction pointers to descriptive names. - struct JITSymbol - { - enum class Type - { - functionInstance, - invokeThunk - }; - Type type; - union - { - FunctionInstance* functionInstance; - const FunctionType* invokeThunkType; - }; - Uptr baseAddress; - Uptr numBytes; - std::map offsetToOpIndexMap; - - JITSymbol(FunctionInstance* inFunctionInstance,Uptr inBaseAddress,Uptr inNumBytes,std::map&& inOffsetToOpIndexMap) - : type(Type::functionInstance), functionInstance(inFunctionInstance), baseAddress(inBaseAddress), numBytes(inNumBytes), offsetToOpIndexMap(inOffsetToOpIndexMap) {} - - JITSymbol(const FunctionType* inInvokeThunkType,Uptr inBaseAddress,Uptr inNumBytes,std::map&& inOffsetToOpIndexMap) - : type(Type::invokeThunk), invokeThunkType(inInvokeThunkType), baseAddress(inBaseAddress), numBytes(inNumBytes), offsetToOpIndexMap(inOffsetToOpIndexMap) {} - }; - - // Allocates memory for the LLVM object loader. - struct UnitMemoryManager : llvm::RTDyldMemoryManager - { - UnitMemoryManager() - : imageBaseAddress(nullptr) - , numAllocatedImagePages(0) - , isFinalized(false) - , codeSection({0}) - , readOnlySection({0}) - , readWriteSection({0}) - , hasRegisteredEHFrames(false) - {} - virtual ~UnitMemoryManager() override - { - // Deregister the exception handling frame info. - if(hasRegisteredEHFrames) - { - hasRegisteredEHFrames = false; - llvm::RTDyldMemoryManager::deregisterEHFrames(ehFramesAddr,ehFramesLoadAddr,ehFramesNumBytes); - } - - // Decommit the image pages, but leave them reserved to catch any references to them that might erroneously remain. - if(numAllocatedImagePages) - Platform::decommitVirtualPages(imageBaseAddress,numAllocatedImagePages); - } - - void registerEHFrames(U8* addr, U64 loadAddr,uintptr_t numBytes) override - { - } - void deregisterEHFrames(U8* addr, U64 loadAddr,uintptr_t numBytes) override - { - } - - virtual bool needsToReserveAllocationSpace() override { return true; } - virtual void reserveAllocationSpace(uintptr_t numCodeBytes,U32 codeAlignment,uintptr_t numReadOnlyBytes,U32 readOnlyAlignment,uintptr_t numReadWriteBytes,U32 readWriteAlignment) override - { - if(numReadWriteBytes) - Runtime::causeException(Exception::Cause::outOfMemory); - // Calculate the number of pages to be used by each section. - codeSection.numPages = shrAndRoundUp(numCodeBytes,Platform::getPageSizeLog2()); - readOnlySection.numPages = shrAndRoundUp(numReadOnlyBytes,Platform::getPageSizeLog2()); - readWriteSection.numPages = shrAndRoundUp(numReadWriteBytes,Platform::getPageSizeLog2()); - numAllocatedImagePages = codeSection.numPages + readOnlySection.numPages + readWriteSection.numPages; - if(numAllocatedImagePages) - { - // Reserve enough contiguous pages for all sections. - imageBaseAddress = Platform::allocateVirtualPages(numAllocatedImagePages); - if(!imageBaseAddress || !Platform::commitVirtualPages(imageBaseAddress,numAllocatedImagePages)) { Errors::fatal("memory allocation for JIT code failed"); } - codeSection.baseAddress = imageBaseAddress; - readOnlySection.baseAddress = codeSection.baseAddress + (codeSection.numPages << Platform::getPageSizeLog2()); - readWriteSection.baseAddress = readOnlySection.baseAddress + (readOnlySection.numPages << Platform::getPageSizeLog2()); - } - } - virtual U8* allocateCodeSection(uintptr_t numBytes,U32 alignment,U32 sectionID,llvm::StringRef sectionName) override - { - return allocateBytes((Uptr)numBytes,alignment,codeSection); - } - virtual U8* allocateDataSection(uintptr_t numBytes,U32 alignment,U32 sectionID,llvm::StringRef SectionName,bool isReadOnly) override - { - return allocateBytes((Uptr)numBytes,alignment,isReadOnly ? readOnlySection : readWriteSection); - } - virtual bool finalizeMemory(std::string* ErrMsg = nullptr) override - { - WAVM_ASSERT_THROW(!isFinalized); - isFinalized = true; - // Set the requested final memory access for each section's pages. - const Platform::MemoryAccess codeAccess = USE_WRITEABLE_JIT_CODE_PAGES ? Platform::MemoryAccess::ReadWriteExecute : Platform::MemoryAccess::Execute; - if(codeSection.numPages && !Platform::setVirtualPageAccess(codeSection.baseAddress,codeSection.numPages,codeAccess)) { return false; } - if(readOnlySection.numPages && !Platform::setVirtualPageAccess(readOnlySection.baseAddress,readOnlySection.numPages,Platform::MemoryAccess::ReadOnly)) { return false; } - if(readWriteSection.numPages && !Platform::setVirtualPageAccess(readWriteSection.baseAddress,readWriteSection.numPages,Platform::MemoryAccess::ReadWrite)) { return false; } - return true; - } - virtual void invalidateInstructionCache() - { - // Invalidate the instruction cache for the whole image. - llvm::sys::Memory::InvalidateInstructionCache(imageBaseAddress,numAllocatedImagePages << Platform::getPageSizeLog2()); - } - - U8* getImageBaseAddress() const { return imageBaseAddress; } - - private: - struct Section - { - U8* baseAddress; - Uptr numPages; - Uptr numCommittedBytes; - }; - - U8* imageBaseAddress; - Uptr numAllocatedImagePages; - bool isFinalized; - - Section codeSection; - Section readOnlySection; - Section readWriteSection; - - bool hasRegisteredEHFrames; - U8* ehFramesAddr; - U64 ehFramesLoadAddr; - Uptr ehFramesNumBytes; - - U8* allocateBytes(Uptr numBytes,Uptr alignment,Section& section) - { - WAVM_ASSERT_THROW(section.baseAddress); - WAVM_ASSERT_THROW(!(alignment & (alignment - 1))); - WAVM_ASSERT_THROW(!isFinalized); - - // Allocate the section at the lowest uncommitted byte of image memory. - U8* allocationBaseAddress = section.baseAddress + align(section.numCommittedBytes,alignment); - WAVM_ASSERT_THROW(!(reinterpret_cast(allocationBaseAddress) & (alignment-1))); - section.numCommittedBytes = align(section.numCommittedBytes,alignment) + align(numBytes,alignment); - - // Check that enough space was reserved in the section. - if(section.numCommittedBytes > (section.numPages << Platform::getPageSizeLog2())) { Errors::fatal("didn't reserve enough space in section"); } - - return allocationBaseAddress; - } - - static Uptr align(Uptr size,Uptr alignment) { return (size + alignment - 1) & ~(alignment - 1); } - static Uptr shrAndRoundUp(Uptr value,Uptr shift) { return (value + (Uptr(1)<> shift; } - - UnitMemoryManager(const UnitMemoryManager&) = delete; - void operator=(const UnitMemoryManager&) = delete; - }; - - // A unit of JIT compilation. - // Encapsulates the LLVM JIT compilation pipeline but allows subclasses to define how the resulting code is used. - struct JITUnit - { - JITUnit(bool inShouldLogMetrics = true) - : shouldLogMetrics(inShouldLogMetrics) - #ifdef _WIN32 - , pdataCopy(nullptr) - #endif - { - objectLayer = llvm::make_unique(NotifyLoadedFunctor(this),NotifyFinalizedFunctor(this)); - objectLayer->setProcessAllSections(true); - compileLayer = llvm::make_unique(*objectLayer,llvm::orc::SimpleCompiler(*targetMachine)); - } - ~JITUnit() - { - if(handleIsValid) - compileLayer->removeModuleSet(handle); - #ifdef _WIN64 - if(pdataCopy) { Platform::deregisterSEHUnwindInfo(reinterpret_cast(pdataCopy)); } - #endif - } - - void compile(llvm::Module* llvmModule); - - virtual void notifySymbolLoaded(const char* name,Uptr baseAddress,Uptr numBytes,std::map&& offsetToOpIndexMap) = 0; - - private: - - // Functor that receives notifications when an object produced by the JIT is loaded. - struct NotifyLoadedFunctor - { - JITUnit* jitUnit; - NotifyLoadedFunctor(JITUnit* inJITUnit): jitUnit(inJITUnit) {} - void operator()( - const llvm::orc::ObjectLinkingLayerBase::ObjSetHandleT& objectSetHandle, - const std::vector>>& objectSet, - const std::vector>& loadedObjects - ); - }; - - // Functor that receives notifications when an object produced by the JIT is finalized. - struct NotifyFinalizedFunctor - { - JITUnit* jitUnit; - NotifyFinalizedFunctor(JITUnit* inJITUnit): jitUnit(inJITUnit) {} - void operator()(const llvm::orc::ObjectLinkingLayerBase::ObjSetHandleT& objectSetHandle); - }; - typedef llvm::orc::ObjectLinkingLayer ObjectLayer; - typedef llvm::orc::IRCompileLayer CompileLayer; - - UnitMemoryManager memoryManager; - std::unique_ptr objectLayer; - std::unique_ptr compileLayer; - CompileLayer::ModuleSetHandleT handle; - bool handleIsValid = false; - bool shouldLogMetrics; - - struct LoadedObject - { - llvm::object::ObjectFile* object; - llvm::RuntimeDyld::LoadedObjectInfo* loadedObject; - }; - - std::vector loadedObjects; - - #ifdef _WIN32 - U8* pdataCopy; - #endif - }; - - // The JIT compilation unit for a WebAssembly module instance. - struct JITModule : JITUnit, JITModuleBase - { - ModuleInstance* moduleInstance; - - std::vector functionDefSymbols; - - JITModule(ModuleInstance* inModuleInstance): moduleInstance(inModuleInstance) {} - ~JITModule() override - { - } - - void notifySymbolLoaded(const char* name,Uptr baseAddress,Uptr numBytes,std::map&& offsetToOpIndexMap) override - { - // Save the address range this function was loaded at for future address->symbol lookups. - Uptr functionDefIndex; - if(getFunctionIndexFromExternalName(name,functionDefIndex)) - { - WAVM_ASSERT_THROW(moduleInstance); - WAVM_ASSERT_THROW(functionDefIndex < moduleInstance->functionDefs.size()); - FunctionInstance* functionInstance = moduleInstance->functionDefs[functionDefIndex]; - functionInstance->nativeFunction = reinterpret_cast(baseAddress); - } - } - }; - - // The JIT compilation unit for a single invoke thunk. - struct JITInvokeThunkUnit : JITUnit - { - const FunctionType* functionType; - - JITSymbol* symbol; - - JITInvokeThunkUnit(const FunctionType* inFunctionType): JITUnit(false), functionType(inFunctionType), symbol(nullptr) {} - - void notifySymbolLoaded(const char* name,Uptr baseAddress,Uptr numBytes,std::map&& offsetToOpIndexMap) override - { - #if defined(_WIN32) && !defined(_WIN64) - WAVM_ASSERT_THROW(!strcmp(name,"_invokeThunk")); - #else - WAVM_ASSERT_THROW(!strcmp(name,"invokeThunk")); - #endif - symbol = new JITSymbol(functionType,baseAddress,numBytes,std::move(offsetToOpIndexMap)); - } - }; - - // Used to override LLVM's default behavior of looking up unresolved symbols in DLL exports. - struct NullResolver : llvm::JITSymbolResolver - { - static NullResolver singleton; - virtual llvm::JITSymbol findSymbol(const std::string& name) override; - virtual llvm::JITSymbol findSymbolInLogicalDylib(const std::string& name) override; - }; - - static std::map runtimeSymbolMap = - { - #ifdef _WIN32 - // the LLVM X86 code generator calls __chkstk when allocating more than 4KB of stack space - {"__chkstk","__chkstk"}, - #ifndef _WIN64 - {"__aullrem","_aullrem"}, - {"__allrem","_allrem"}, - {"__aulldiv","_aulldiv"}, - {"__alldiv","_alldiv"}, - #endif - #endif - #ifdef __arm__ - {"__aeabi_uidiv","__aeabi_uidiv"}, - {"__aeabi_idiv","__aeabi_idiv"}, - {"__aeabi_idivmod","__aeabi_idivmod"}, - {"__aeabi_uldiv","__aeabi_uldiv"}, - {"__aeabi_uldivmod","__aeabi_uldivmod"}, - {"__aeabi_unwind_cpp_pr0","__aeabi_unwind_cpp_pr0"}, - {"__aeabi_unwind_cpp_pr1","__aeabi_unwind_cpp_pr1"}, - #endif - }; - - NullResolver NullResolver::singleton; - llvm::JITSymbol NullResolver::findSymbol(const std::string& name) - { - // Allow some intrinsics used by LLVM - auto runtimeSymbolNameIt = runtimeSymbolMap.find(name); - if(runtimeSymbolNameIt != runtimeSymbolMap.end()) - { - const char* lookupName = runtimeSymbolNameIt->second; - void *addr = llvm::sys::DynamicLibrary::SearchForAddressOfSymbol(lookupName); - if(!addr) { Errors::fatalf("LLVM generated code references undefined external symbol: %s\n",lookupName); } - return llvm::JITSymbol(reinterpret_cast(addr),llvm::JITSymbolFlags::None); - } - - Errors::fatalf("LLVM generated code references disallowed external symbol: %s\n",name.c_str()); - } - llvm::JITSymbol NullResolver::findSymbolInLogicalDylib(const std::string& name) { return llvm::JITSymbol(nullptr); } - - void JITUnit::NotifyLoadedFunctor::operator()( - const llvm::orc::ObjectLinkingLayerBase::ObjSetHandleT& objectSetHandle, - const std::vector>>& objectSet, - const std::vector>& loadedObjects - ) - { - WAVM_ASSERT_THROW(objectSet.size() == loadedObjects.size()); - for(Uptr objectIndex = 0;objectIndex < loadedObjects.size();++objectIndex) - { - llvm::object::ObjectFile* object = objectSet[objectIndex].get()->getBinary(); - llvm::RuntimeDyld::LoadedObjectInfo* loadedObject = loadedObjects[objectIndex].get(); - - // Make a copy of the loaded object info for use by the finalizer. - jitUnit->loadedObjects.push_back({object,loadedObject}); - - #ifdef _WIN64 - // On Windows, look for .pdata and .xdata sections containing information about how to unwind the stack. - // This needs to be done before the below emitAndFinalize call, which will incorrectly apply relocations to the unwind info. - - // Find the pdata section. - llvm::object::SectionRef pdataSection; - for(auto section : object->sections()) - { - llvm::StringRef sectionName; - if(!section.getName(sectionName)) - { - if(sectionName == ".pdata") { pdataSection = section; break; } - } - } - - // Pass the pdata section to the platform to register unwind info. - if(pdataSection.getObject()) - { - const Uptr imageBaseAddress = reinterpret_cast(jitUnit->memoryManager.getImageBaseAddress()); - const Uptr pdataSectionLoadAddress = (Uptr)loadedObject->getSectionLoadAddress(pdataSection); - - // The LLVM COFF dynamic loader doesn't handle the image-relative relocations used by the pdata section, - // and overwrites those values with o: https://github.com/llvm-mirror/llvm/blob/e84d8c12d5157a926db15976389f703809c49aa5/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h#L96 - // This works around that by making a copy of the pdata section and doing the pdata relocations manually. - jitUnit->pdataCopy = new U8[pdataSection.getSize()]; - memcpy(jitUnit->pdataCopy,reinterpret_cast(pdataSectionLoadAddress),pdataSection.getSize()); - - for(auto pdataRelocIt : pdataSection.relocations()) - { - // Only handle type 3 (IMAGE_REL_AMD64_ADDR32NB). - if(pdataRelocIt.getType() != 3) { Errors::unreachable(); } - - const auto symbol = pdataRelocIt.getSymbol(); - const U64 symbolAddress = symbol->getAddress().get(); - const llvm::object::section_iterator symbolSection = symbol->getSection().get(); - U32* valueToRelocate = (U32*)(jitUnit->pdataCopy + pdataRelocIt.getOffset()); - const U64 relocatedValue64 = - + (symbolAddress - symbolSection->getAddress()) - + loadedObject->getSectionLoadAddress(*symbolSection) - + *valueToRelocate - - imageBaseAddress; - if(relocatedValue64 > UINT32_MAX) { Errors::unreachable(); } - *valueToRelocate = (U32)relocatedValue64; - } - - Platform::registerSEHUnwindInfo(imageBaseAddress,reinterpret_cast(jitUnit->pdataCopy),pdataSection.getSize()); - } - #endif - } - - } - - #if PRINT_DISASSEMBLY - void disassembleFunction(U8* bytes,Uptr numBytes) - { - LLVMDisasmContextRef disasmRef = LLVMCreateDisasm(llvm::sys::getProcessTriple().c_str(),nullptr,0,nullptr,nullptr); - - U8* nextByte = bytes; - Uptr numBytesRemaining = numBytes; - while(numBytesRemaining) - { - char instructionBuffer[256]; - const Uptr numInstructionBytes = LLVMDisasmInstruction( - disasmRef, - nextByte, - numBytesRemaining, - reinterpret_cast(nextByte), - instructionBuffer, - sizeof(instructionBuffer) - ); - WAVM_ASSERT_THROW(numInstructionBytes > 0); - WAVM_ASSERT_THROW(numInstructionBytes <= numBytesRemaining); - numBytesRemaining -= numInstructionBytes; - nextByte += numInstructionBytes; - - Log::printf(Log::Category::debug,"\t\t%s\n",instructionBuffer); - }; - - LLVMDisasmDispose(disasmRef); - } - #endif - - void JITUnit::NotifyFinalizedFunctor::operator()(const llvm::orc::ObjectLinkingLayerBase::ObjSetHandleT& objectSetHandle) - { - for(Uptr objectIndex = 0;objectIndex < jitUnit->loadedObjects.size();++objectIndex) - { - llvm::object::ObjectFile* object = jitUnit->loadedObjects[objectIndex].object; - llvm::RuntimeDyld::LoadedObjectInfo* loadedObject = jitUnit->loadedObjects[objectIndex].loadedObject; - - // Iterate over the functions in the loaded object. - for(auto symbolSizePair : llvm::object::computeSymbolSizes(*object)) - { - auto symbol = symbolSizePair.first; - auto name = symbol.getName(); - auto address = symbol.getAddress(); - if( symbol.getType() && symbol.getType().get() == llvm::object::SymbolRef::ST_Function - && name - && address) - { - // Compute the address the functions was loaded at. - WAVM_ASSERT_THROW(*address <= UINTPTR_MAX); - Uptr loadedAddress = Uptr(*address); - auto symbolSection = symbol.getSection(); - if(symbolSection) - { - loadedAddress += (Uptr)loadedObject->getSectionLoadAddress(*symbolSection.get()); - } - - #if PRINT_DISASSEMBLY - Log::printf(Log::Category::error,"Disassembly for function %s\n",name.get().data()); - disassembleFunction(reinterpret_cast(loadedAddress),Uptr(symbolSizePair.second)); - #endif - - std::map offsetToOpIndexMap; - // Notify the JIT unit that the symbol was loaded. - WAVM_ASSERT_THROW(symbolSizePair.second <= UINTPTR_MAX); - jitUnit->notifySymbolLoaded( - name->data(),loadedAddress, - Uptr(symbolSizePair.second), - std::move(offsetToOpIndexMap) - ); - } - } - } - - jitUnit->loadedObjects.clear(); - } - - static Uptr printedModuleId = 0; - - void printModule(const llvm::Module* llvmModule,const char* filename) - { - std::error_code errorCode; - std::string augmentedFilename = std::string(filename) + std::to_string(printedModuleId++) + ".ll"; - llvm::raw_fd_ostream dumpFileStream(augmentedFilename,errorCode,llvm::sys::fs::OpenFlags::F_Text); - llvmModule->print(dumpFileStream,nullptr); - Log::printf(Log::Category::debug,"Dumped LLVM module to: %s\n",augmentedFilename.c_str()); - } - - void JITUnit::compile(llvm::Module* llvmModule) - { - // Get a target machine object for this host, and set the module to use its data layout. - llvmModule->setDataLayout(targetMachine->createDataLayout()); - - // Verify the module. - if(DUMP_UNOPTIMIZED_MODULE) { printModule(llvmModule,"llvmDump"); } - if(VERIFY_MODULE) - { - std::string verifyOutputString; - llvm::raw_string_ostream verifyOutputStream(verifyOutputString); - if(llvm::verifyModule(*llvmModule,&verifyOutputStream)) - { Errors::fatalf("LLVM verification errors:\n%s\n",verifyOutputString.c_str()); } - Log::printf(Log::Category::debug,"Verified LLVM module\n"); - } - - // Run some optimization on the module's functions. - Timing::Timer optimizationTimer; - - auto fpm = new llvm::legacy::FunctionPassManager(llvmModule); - fpm->add(llvm::createPromoteMemoryToRegisterPass()); - fpm->add(llvm::createInstructionCombiningPass()); - fpm->add(llvm::createCFGSimplificationPass()); - fpm->add(llvm::createJumpThreadingPass()); - fpm->add(llvm::createConstantPropagationPass()); - fpm->doInitialization(); - - for(auto functionIt = llvmModule->begin();functionIt != llvmModule->end();++functionIt) - { fpm->run(*functionIt); } - delete fpm; - - if(shouldLogMetrics) - { - Timing::logRatePerSecond("Optimized LLVM module",optimizationTimer,(F64)llvmModule->size(),"functions"); - } - - if(DUMP_OPTIMIZED_MODULE) { printModule(llvmModule,"llvmOptimizedDump"); } - - // Pass the module to the JIT compiler. - Timing::Timer machineCodeTimer; - handle = compileLayer->addModuleSet( - std::vector{llvmModule}, - &memoryManager, - &NullResolver::singleton); - handleIsValid = true; - compileLayer->emitAndFinalize(handle); - - if(shouldLogMetrics) - { - Timing::logRatePerSecond("Generated machine code",machineCodeTimer,(F64)llvmModule->size(),"functions"); - } - - delete llvmModule; - } - - void instantiateModule(const IR::Module& module,ModuleInstance* moduleInstance) - { - // Emit LLVM IR for the module. - auto llvmModule = emitModule(module,moduleInstance); - - // Construct the JIT compilation pipeline for this module. - auto jitModule = new JITModule(moduleInstance); - moduleInstance->jitModule = jitModule; - - // Compile the module. - jitModule->compile(llvmModule); - } - - std::string getExternalFunctionName(ModuleInstance* moduleInstance,Uptr functionDefIndex) - { - WAVM_ASSERT_THROW(functionDefIndex < moduleInstance->functionDefs.size()); - return "wasmFunc" + std::to_string(functionDefIndex) - + "_" + moduleInstance->functionDefs[functionDefIndex]->debugName; - } - - bool getFunctionIndexFromExternalName(const char* externalName,Uptr& outFunctionDefIndex) - { - #if defined(_WIN32) && !defined(_WIN64) - const char wasmFuncPrefix[] = "_wasmFunc"; - #else - const char wasmFuncPrefix[] = "wasmFunc"; - #endif - const Uptr numPrefixChars = sizeof(wasmFuncPrefix) - 1; - if(!strncmp(externalName,wasmFuncPrefix,numPrefixChars)) - { - char* numberEnd = nullptr; - U64 functionDefIndex64 = std::strtoull(externalName + numPrefixChars,&numberEnd,10); - if(functionDefIndex64 > UINTPTR_MAX) { return false; } - outFunctionDefIndex = Uptr(functionDefIndex64); - return true; - } - else { return false; } - } - - bool describeInstructionPointer(Uptr ip,std::string& outDescription) - { - JITSymbol* symbol; - { - Platform::Lock addressToSymbolMapLock(addressToSymbolMapMutex); - auto symbolIt = addressToSymbolMap.upper_bound(ip); - if(symbolIt == addressToSymbolMap.end()) { return false; } - symbol = symbolIt->second; - } - if(ip < symbol->baseAddress || ip >= symbol->baseAddress + symbol->numBytes) { return false; } - - switch(symbol->type) - { - case JITSymbol::Type::functionInstance: - outDescription = symbol->functionInstance->debugName; - if(!outDescription.size()) { outDescription = ""; } - break; - case JITSymbol::Type::invokeThunk: - outDescription = "invokeThunkType) + ">"; - break; - default: Errors::unreachable(); - }; - - // Find the highest entry in the offsetToOpIndexMap whose offset is <= the symbol-relative IP. - U32 ipOffset = (U32)(ip - symbol->baseAddress); - Iptr opIndex = -1; - for(auto offsetMapIt : symbol->offsetToOpIndexMap) - { - if(offsetMapIt.first <= ipOffset) { opIndex = offsetMapIt.second; } - else { break; } - } - if(opIndex >= 0) { outDescription += " (op " + std::to_string(opIndex) + ")"; } - return true; - } - - InvokeFunctionPointer getInvokeThunk(const FunctionType* functionType) - { - // Reuse cached invoke thunks for the same function type. - auto mapIt = invokeThunkTypeToSymbolMap.find(functionType); - if(mapIt != invokeThunkTypeToSymbolMap.end()) { return reinterpret_cast(mapIt->second->baseAddress); } - - auto llvmModule = new llvm::Module("",context); - auto llvmFunctionType = llvm::FunctionType::get( - llvmVoidType, - {asLLVMType(functionType)->getPointerTo(),llvmI64Type->getPointerTo()}, - false); - auto llvmFunction = llvm::Function::Create(llvmFunctionType,llvm::Function::ExternalLinkage,"invokeThunk",llvmModule); - auto argIt = llvmFunction->args().begin(); - llvm::Value* functionPointer = &*argIt++; - llvm::Value* argBaseAddress = &*argIt; - auto entryBlock = llvm::BasicBlock::Create(context,"entry",llvmFunction); - llvm::IRBuilder<> irBuilder(entryBlock); - - // Load the function's arguments from an array of 64-bit values at an address provided by the caller. - std::vector structArgLoads; - for(Uptr parameterIndex = 0;parameterIndex < functionType->parameters.size();++parameterIndex) - { - structArgLoads.push_back(irBuilder.CreateLoad( - irBuilder.CreatePointerCast( - irBuilder.CreateInBoundsGEP(argBaseAddress,{emitLiteral((Uptr)parameterIndex)}), - asLLVMType(functionType->parameters[parameterIndex])->getPointerTo() - ) - )); - } - - // Call the llvm function with the actual implementation. - auto returnValue = irBuilder.CreateCall(functionPointer,structArgLoads); - - // If the function has a return value, write it to the end of the argument array. - if(functionType->ret != ResultType::none) - { - auto llvmResultType = asLLVMType(functionType->ret); - irBuilder.CreateStore( - returnValue, - irBuilder.CreatePointerCast( - irBuilder.CreateInBoundsGEP(argBaseAddress,{emitLiteral((Uptr)functionType->parameters.size())}), - llvmResultType->getPointerTo() - ) - ); - } - - irBuilder.CreateRetVoid(); - - // Compile the invoke thunk. - auto jitUnit = new JITInvokeThunkUnit(functionType); - jitUnit->compile(llvmModule); - - WAVM_ASSERT_THROW(jitUnit->symbol); - invokeThunkTypeToSymbolMap[functionType] = jitUnit->symbol; - - { - Platform::Lock addressToSymbolMapLock(addressToSymbolMapMutex); - addressToSymbolMap[jitUnit->symbol->baseAddress + jitUnit->symbol->numBytes] = jitUnit->symbol; - } - - return reinterpret_cast(jitUnit->symbol->baseAddress); - } - - void init() - { - llvm::InitializeNativeTarget(); - llvm::InitializeNativeTargetAsmPrinter(); - llvm::InitializeNativeTargetAsmParser(); - llvm::InitializeNativeTargetDisassembler(); - llvm::sys::DynamicLibrary::LoadLibraryPermanently(nullptr); - - auto targetTriple = llvm::sys::getProcessTriple(); - #ifdef __APPLE__ - // Didn't figure out exactly why, but this works around a problem with the MacOS dynamic loader. Without it, - // our symbols can't be found in the JITed object file. - targetTriple += "-elf"; - #endif - targetMachine = llvm::EngineBuilder().selectTarget( - llvm::Triple(targetTriple),"","", - #if defined(_WIN32) && !defined(_WIN64) - // Use SSE2 instead of the FPU on x86 for more control over how intermediate results are rounded. - llvm::SmallVector({"+sse2"}) - #else - llvm::SmallVector() - #endif - ); - - llvmI8Type = llvm::Type::getInt8Ty(context); - llvmI16Type = llvm::Type::getInt16Ty(context); - llvmI32Type = llvm::Type::getInt32Ty(context); - llvmI64Type = llvm::Type::getInt64Ty(context); - llvmF32Type = llvm::Type::getFloatTy(context); - llvmF64Type = llvm::Type::getDoubleTy(context); - llvmVoidType = llvm::Type::getVoidTy(context); - llvmBoolType = llvm::Type::getInt1Ty(context); - llvmI8PtrType = llvmI8Type->getPointerTo(); - - #if ENABLE_SIMD_PROTOTYPE - llvmI8x16Type = llvm::VectorType::get(llvmI8Type,16); - llvmI16x8Type = llvm::VectorType::get(llvmI16Type,8); - llvmI32x4Type = llvm::VectorType::get(llvmI32Type,4); - llvmI64x2Type = llvm::VectorType::get(llvmI64Type,2); - llvmF32x4Type = llvm::VectorType::get(llvmF32Type,4); - llvmF64x2Type = llvm::VectorType::get(llvmF64Type,2); - #endif - - llvmResultTypes[(Uptr)ResultType::none] = llvm::Type::getVoidTy(context); - llvmResultTypes[(Uptr)ResultType::i32] = llvmI32Type; - llvmResultTypes[(Uptr)ResultType::i64] = llvmI64Type; - llvmResultTypes[(Uptr)ResultType::f32] = llvmF32Type; - llvmResultTypes[(Uptr)ResultType::f64] = llvmF64Type; - - #if ENABLE_SIMD_PROTOTYPE - llvmResultTypes[(Uptr)ResultType::v128] = llvmI64x2Type; - #endif - - // Create zero constants of each type. - typedZeroConstants[(Uptr)ValueType::any] = nullptr; - typedZeroConstants[(Uptr)ValueType::i32] = emitLiteral((U32)0); - typedZeroConstants[(Uptr)ValueType::i64] = emitLiteral((U64)0); - typedZeroConstants[(Uptr)ValueType::f32] = emitLiteral((F32)0.0f); - typedZeroConstants[(Uptr)ValueType::f64] = emitLiteral((F64)0.0); - - #if ENABLE_SIMD_PROTOTYPE - typedZeroConstants[(Uptr)ValueType::v128] = llvm::ConstantVector::get({typedZeroConstants[(Uptr)ValueType::i64],typedZeroConstants[(Uptr)ValueType::i64]}); - #endif - } -} diff --git a/libraries/wasm-jit/Source/Runtime/LLVMJIT.h b/libraries/wasm-jit/Source/Runtime/LLVMJIT.h index cad02a101ed..d77a6b00de2 100644 --- a/libraries/wasm-jit/Source/Runtime/LLVMJIT.h +++ b/libraries/wasm-jit/Source/Runtime/LLVMJIT.h @@ -5,31 +5,13 @@ #include "RuntimePrivate.h" #include "Intrinsics.h" -#ifdef _WIN32 - #pragma warning(push) - #pragma warning (disable:4267) - #pragma warning (disable:4800) - #pragma warning (disable:4291) - #pragma warning (disable:4244) - #pragma warning (disable:4351) - #pragma warning (disable:4065) - #pragma warning (disable:4624) - #pragma warning (disable:4245) // conversion from 'int' to 'unsigned int', signed/unsigned mismatch - #pragma warning(disable:4146) // unary minus operator applied to unsigned type, result is still unsigned - #pragma warning(disable:4458) // declaration of 'x' hides class member - #pragma warning(disable:4510) // default constructor could not be generated - #pragma warning(disable:4610) // struct can never be instantiated - user defined constructor required - #pragma warning(disable:4324) // structure was padded due to alignment specifier - #pragma warning(disable:4702) // unreachable code -#endif - #include "llvm/Analysis/Passes.h" #include "llvm/ExecutionEngine/ExecutionEngine.h" #include "llvm/ExecutionEngine/RTDyldMemoryManager.h" #include "llvm/ExecutionEngine/Orc/CompileUtils.h" #include "llvm/ExecutionEngine/Orc/IRCompileLayer.h" #include "llvm/ExecutionEngine/Orc/LambdaResolver.h" -#include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h" +#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/IRBuilder.h" @@ -49,17 +31,14 @@ #include "llvm/Support/DynamicLibrary.h" #include "llvm/Transforms/Scalar.h" #include "llvm/IR/DIBuilder.h" +#include "llvm/Transforms/InstCombine/InstCombine.h" +#include "llvm/Transforms/Utils.h" +#include "llvm/ExecutionEngine/Orc/NullResolver.h" +#include "llvm/ExecutionEngine/Orc/Core.h" #include #include #include -#ifdef _WIN32 - #undef and - #undef or - #undef xor - #pragma warning(pop) -#endif - namespace LLVMJIT { // The global LLVM context. diff --git a/libraries/wasm-jit/Source/Runtime/Linker.cpp b/libraries/wasm-jit/Source/Runtime/Linker.cpp index 5675e44e36e..0e73cefbdad 100644 --- a/libraries/wasm-jit/Source/Runtime/Linker.cpp +++ b/libraries/wasm-jit/Source/Runtime/Linker.cpp @@ -12,6 +12,20 @@ namespace Runtime { RUNTIME_API IntrinsicResolver IntrinsicResolver::singleton; + bool isA(ObjectInstance* object,const ObjectType& type) + { + if(type.kind != object->kind) { return false; } + + switch(type.kind) + { + case ObjectKind::function: return asFunctionType(type) == asFunction(object)->type; + case ObjectKind::global: return asGlobalType(type) == asGlobal(object)->type; + case ObjectKind::table: return isSubset(asTableType(type),asTable(object)->type); + case ObjectKind::memory: return isSubset(asMemoryType(type),asMemory(object)->type); + default: Errors::unreachable(); + } + } + bool IntrinsicResolver::resolve(const std::string& moduleName,const std::string& exportName,ObjectType type,ObjectInstance*& outObject) { // Make sure the wavmIntrinsics module can't be directly imported. diff --git a/libraries/wasm-jit/Source/Runtime/Memory.cpp b/libraries/wasm-jit/Source/Runtime/Memory.cpp deleted file mode 100644 index 3fc3b58a7c6..00000000000 --- a/libraries/wasm-jit/Source/Runtime/Memory.cpp +++ /dev/null @@ -1,162 +0,0 @@ -#include "Inline/BasicTypes.h" -#include "Runtime.h" -#include "Platform/Platform.h" -#include "RuntimePrivate.h" - -namespace Runtime -{ - // Global lists of memories; used to query whether an address is reserved by one of them. - std::vector memories; - - static Uptr getPlatformPagesPerWebAssemblyPageLog2() - { - errorUnless(Platform::getPageSizeLog2() <= IR::numBytesPerPageLog2); - return IR::numBytesPerPageLog2 - Platform::getPageSizeLog2(); - } - - U8* allocateVirtualPagesAligned(Uptr numBytes,Uptr alignmentBytes,U8*& outUnalignedBaseAddress,Uptr& outUnalignedNumPlatformPages) - { - const Uptr numAllocatedVirtualPages = numBytes >> Platform::getPageSizeLog2(); - const Uptr alignmentPages = alignmentBytes >> Platform::getPageSizeLog2(); - outUnalignedNumPlatformPages = numAllocatedVirtualPages + alignmentPages; - outUnalignedBaseAddress = Platform::allocateVirtualPages(outUnalignedNumPlatformPages); - if(!outUnalignedBaseAddress) { outUnalignedNumPlatformPages = 0; return nullptr; } - else { return (U8*)((Uptr)(outUnalignedBaseAddress + alignmentBytes - 1) & ~(alignmentBytes - 1)); } - } - - MemoryInstance* createMemory(MemoryType type) - { - MemoryInstance* memory = new MemoryInstance(type); - - // On a 64-bit runtime, allocate 8GB of address space for the memory. - // This allows eliding bounds checks on memory accesses, since a 32-bit index + 32-bit offset will always be within the reserved address-space. - // On a 32-bit runtime, allocate 256MB. - const Uptr memoryMaxBytes = HAS_64BIT_ADDRESS_SPACE ? Uptr(8ull*1024*1024*1024) : 0x10000000; - - // On a 64 bit runtime, align the instance memory base to a 4GB boundary, so the lower 32-bits will all be zero. Maybe it will allow better code generation? - // Note that this reserves a full extra 4GB, but only uses (4GB-1 page) for alignment, so there will always be a guard page at the end to - // protect against unaligned loads/stores that straddle the end of the address-space. - const Uptr alignmentBytes = HAS_64BIT_ADDRESS_SPACE ? Uptr(4ull*1024*1024*1024) : ((Uptr)1 << Platform::getPageSizeLog2()); - memory->baseAddress = allocateVirtualPagesAligned(memoryMaxBytes,alignmentBytes,memory->reservedBaseAddress,memory->reservedNumPlatformPages); - memory->endOffset = memoryMaxBytes; - if(!memory->baseAddress) { delete memory; return nullptr; } - - // Grow the memory to the type's minimum size. - WAVM_ASSERT_THROW(type.size.min <= UINTPTR_MAX); - if(growMemory(memory,Uptr(type.size.min)) == -1) { delete memory; return nullptr; } - - // Add the memory to the global array. - memories.push_back(memory); - return memory; - } - - MemoryInstance::~MemoryInstance() - { - // Decommit all default memory pages. - if(numPages > 0) { Platform::decommitVirtualPages(baseAddress,numPages << getPlatformPagesPerWebAssemblyPageLog2()); } - - // Free the virtual address space. - if(reservedNumPlatformPages > 0) { Platform::freeVirtualPages(reservedBaseAddress,reservedNumPlatformPages); } - reservedBaseAddress = baseAddress = nullptr; - reservedNumPlatformPages = 0; - - // Remove the memory from the global array. - for(Uptr memoryIndex = 0;memoryIndex < memories.size();++memoryIndex) - { - if(memories[memoryIndex] == this) { memories.erase(memories.begin() + memoryIndex); break; } - } - - theMemoryInstance = nullptr; - } - - bool isAddressOwnedByMemory(U8* address) - { - // Iterate over all memories and check if the address is within the reserved address space for each. - for(auto memory : memories) - { - U8* startAddress = memory->reservedBaseAddress; - U8* endAddress = memory->reservedBaseAddress + (memory->reservedNumPlatformPages << Platform::getPageSizeLog2()); - if(address >= startAddress && address < endAddress) { return true; } - } - return false; - } - - Uptr getMemoryNumPages(MemoryInstance* memory) { return memory->numPages; } - Uptr getMemoryMaxPages(MemoryInstance* memory) - { - WAVM_ASSERT_THROW(memory->type.size.max <= UINTPTR_MAX); - return Uptr(memory->type.size.max); - } - - void resetMemory(MemoryInstance* memory, MemoryType& newMemoryType) { - memory->type.size.min = 1; - if(shrinkMemory(memory, memory->numPages - 1) == -1) - causeException(Exception::Cause::outOfMemory); - memset(memory->baseAddress, 0, 1<type = newMemoryType; - if(growMemory(memory, memory->type.size.min - 1) == -1) - causeException(Exception::Cause::outOfMemory); - } - - Iptr growMemory(MemoryInstance* memory,Uptr numNewPages) - { - const Uptr previousNumPages = memory->numPages; - if(numNewPages > 0) - { - // If the number of pages to grow would cause the memory's size to exceed its maximum, return -1. - if(numNewPages > memory->type.size.max || memory->numPages > memory->type.size.max - numNewPages) { return -1; } - - // Try to commit the new pages, and return -1 if the commit fails. - if(!Platform::commitVirtualPages( - memory->baseAddress + (memory->numPages << IR::numBytesPerPageLog2), - numNewPages << getPlatformPagesPerWebAssemblyPageLog2() - )) - { - return -1; - } - memset(memory->baseAddress + (memory->numPages << IR::numBytesPerPageLog2), 0, numNewPages << IR::numBytesPerPageLog2); - memory->numPages += numNewPages; - } - return previousNumPages; - } - - Iptr shrinkMemory(MemoryInstance* memory,Uptr numPagesToShrink) - { - const Uptr previousNumPages = memory->numPages; - if(numPagesToShrink > 0) - { - // If the number of pages to shrink would cause the memory's size to drop below its minimum, return -1. - if(numPagesToShrink > memory->numPages - || memory->numPages - numPagesToShrink < memory->type.size.min) - { return -1; } - memory->numPages -= numPagesToShrink; - - // Decommit the pages that were shrunk off the end of the memory. - Platform::decommitVirtualPages( - memory->baseAddress + (memory->numPages << IR::numBytesPerPageLog2), - numPagesToShrink << getPlatformPagesPerWebAssemblyPageLog2() - ); - } - return previousNumPages; - } - - U8* getMemoryBaseAddress(MemoryInstance* memory) - { - return memory->baseAddress; - } - - U8* getValidatedMemoryOffsetRange(MemoryInstance* memory,Uptr offset,Uptr numBytes) - { - // Validate that the range [offset..offset+numBytes) is contained by the memory's reserved pages. - U8* address = memory->baseAddress + offset; - if( !memory - || address < memory->reservedBaseAddress - || address + numBytes < address - || address + numBytes >= memory->reservedBaseAddress + (memory->reservedNumPlatformPages << Platform::getPageSizeLog2())) - { - causeException(Exception::Cause::accessViolation); - } - return address; - } - -} diff --git a/libraries/wasm-jit/Source/Runtime/ModuleInstance.cpp b/libraries/wasm-jit/Source/Runtime/ModuleInstance.cpp deleted file mode 100644 index 436439e4a99..00000000000 --- a/libraries/wasm-jit/Source/Runtime/ModuleInstance.cpp +++ /dev/null @@ -1,203 +0,0 @@ -#include "Inline/BasicTypes.h" -#include "Runtime.h" -#include "RuntimePrivate.h" -#include "IR/Module.h" - -#include - -namespace Runtime -{ - std::vector moduleInstances; - - Value evaluateInitializer(ModuleInstance* moduleInstance,InitializerExpression expression) - { - switch(expression.type) - { - case InitializerExpression::Type::i32_const: return expression.i32; - case InitializerExpression::Type::i64_const: return expression.i64; - case InitializerExpression::Type::f32_const: return expression.f32; - case InitializerExpression::Type::f64_const: return expression.f64; - case InitializerExpression::Type::get_global: - { - // Find the import this refers to. - errorUnless(expression.globalIndex < moduleInstance->globals.size()); - GlobalInstance* globalInstance = moduleInstance->globals[expression.globalIndex]; - return Runtime::Value(globalInstance->type.valueType,globalInstance->value); - } - default: Errors::unreachable(); - }; - } - - MemoryInstance* MemoryInstance::theMemoryInstance = nullptr; - - ModuleInstance* instantiateModule(const IR::Module& module,ImportBindings&& imports) - { - ModuleInstance* moduleInstance = new ModuleInstance( - std::move(imports.functions), - std::move(imports.tables), - std::move(imports.memories), - std::move(imports.globals) - ); - - // Get disassembly names for the module's objects. - DisassemblyNames disassemblyNames; - IR::getDisassemblyNames(module,disassemblyNames); - - // Check the type of the ModuleInstance's imports. - errorUnless(moduleInstance->functions.size() == module.functions.imports.size()); - for(Uptr importIndex = 0;importIndex < module.functions.imports.size();++importIndex) - { - errorUnless(isA(moduleInstance->functions[importIndex],module.types[module.functions.imports[importIndex].type.index])); - } - errorUnless(moduleInstance->tables.size() == module.tables.imports.size()); - for(Uptr importIndex = 0;importIndex < module.tables.imports.size();++importIndex) - { - errorUnless(isA(moduleInstance->tables[importIndex],module.tables.imports[importIndex].type)); - } - errorUnless(moduleInstance->memories.size() == module.memories.imports.size()); - for(Uptr importIndex = 0;importIndex < module.memories.imports.size();++importIndex) - { - errorUnless(isA(moduleInstance->memories[importIndex],module.memories.imports[importIndex].type)); - } - errorUnless(moduleInstance->globals.size() == module.globals.imports.size()); - for(Uptr importIndex = 0;importIndex < module.globals.imports.size();++importIndex) - { - errorUnless(isA(moduleInstance->globals[importIndex],module.globals.imports[importIndex].type)); - } - - // Instantiate the module's memory and table definitions. - for(const TableDef& tableDef : module.tables.defs) - { - auto table = createTable(tableDef.type); - if(!table) { causeException(Exception::Cause::outOfMemory); } - moduleInstance->tables.push_back(table); - } - for(const MemoryDef& memoryDef : module.memories.defs) - { - if(!MemoryInstance::theMemoryInstance) { - MemoryInstance::theMemoryInstance = createMemory(memoryDef.type); - if(!MemoryInstance::theMemoryInstance) { causeException(Exception::Cause::outOfMemory); } - } - moduleInstance->memories.push_back(MemoryInstance::theMemoryInstance); - } - - // Find the default memory and table for the module. - if(moduleInstance->memories.size() != 0) - { - WAVM_ASSERT_THROW(moduleInstance->memories.size() == 1); - moduleInstance->defaultMemory = moduleInstance->memories[0]; - } - if(moduleInstance->tables.size() != 0) - { - WAVM_ASSERT_THROW(moduleInstance->tables.size() == 1); - moduleInstance->defaultTable = moduleInstance->tables[0]; - } - - // If any memory or table segment doesn't fit, throw an exception before mutating any memory/table. - for(auto& tableSegment : module.tableSegments) - { - TableInstance* table = moduleInstance->tables[tableSegment.tableIndex]; - const Value baseOffsetValue = evaluateInitializer(moduleInstance,tableSegment.baseOffset); - errorUnless(baseOffsetValue.type == ValueType::i32); - const U32 baseOffset = baseOffsetValue.i32; - if(baseOffset > table->elements.size() - || table->elements.size() - baseOffset < tableSegment.indices.size()) - { causeException(Exception::Cause::invalidSegmentOffset); } - } - - //Previously, the module instantiation would write in to the memoryInstance here. Don't do that - //since the memoryInstance is shared across all moduleInstances and we could be compiling - //a new instance while another instance is running - - // Instantiate the module's global definitions. - for(const GlobalDef& globalDef : module.globals.defs) - { - const Value initialValue = evaluateInitializer(moduleInstance,globalDef.initializer); - errorUnless(initialValue.type == globalDef.type.valueType); - moduleInstance->globals.push_back(new GlobalInstance(globalDef.type,initialValue)); - } - - // Create the FunctionInstance objects for the module's function definitions. - for(Uptr functionDefIndex = 0;functionDefIndex < module.functions.defs.size();++functionDefIndex) - { - const Uptr functionIndex = moduleInstance->functions.size(); - const DisassemblyNames::Function& functionNames = disassemblyNames.functions[functionIndex]; - std::string debugName = functionNames.name; - if(!debugName.size()) { debugName = ""; } - auto functionInstance = new FunctionInstance(moduleInstance,module.types[module.functions.defs[functionDefIndex].type.index],nullptr,debugName.c_str()); - moduleInstance->functionDefs.push_back(functionInstance); - moduleInstance->functions.push_back(functionInstance); - } - - // Generate machine code for the module. - LLVMJIT::instantiateModule(module,moduleInstance); - - // Set up the instance's exports. - for(const Export& exportIt : module.exports) - { - ObjectInstance* exportedObject = nullptr; - switch(exportIt.kind) - { - case ObjectKind::function: exportedObject = moduleInstance->functions[exportIt.index]; break; - case ObjectKind::table: exportedObject = moduleInstance->tables[exportIt.index]; break; - case ObjectKind::memory: exportedObject = moduleInstance->memories[exportIt.index]; break; - case ObjectKind::global: exportedObject = moduleInstance->globals[exportIt.index]; break; - default: Errors::unreachable(); - } - moduleInstance->exportMap[exportIt.name] = exportedObject; - } - - // Copy the module's table segments into the module's default table. - for(const TableSegment& tableSegment : module.tableSegments) - { - TableInstance* table = moduleInstance->tables[tableSegment.tableIndex]; - - const Value baseOffsetValue = evaluateInitializer(moduleInstance,tableSegment.baseOffset); - errorUnless(baseOffsetValue.type == ValueType::i32); - const U32 baseOffset = baseOffsetValue.i32; - WAVM_ASSERT_THROW(baseOffset + tableSegment.indices.size() <= table->elements.size()); - - for(Uptr index = 0;index < tableSegment.indices.size();++index) - { - const Uptr functionIndex = tableSegment.indices[index]; - WAVM_ASSERT_THROW(functionIndex < moduleInstance->functions.size()); - setTableElement(table,baseOffset + index,moduleInstance->functions[functionIndex]); - } - } - - // Call the module's start function. - if(module.startFunctionIndex != UINTPTR_MAX) - { - WAVM_ASSERT_THROW(moduleInstance->functions[module.startFunctionIndex]->type == IR::FunctionType::get()); - moduleInstance->startFunctionIndex = module.startFunctionIndex; - } - - moduleInstances.push_back(moduleInstance); - return moduleInstance; - } - - ModuleInstance::~ModuleInstance() - { - delete jitModule; - } - - MemoryInstance* getDefaultMemory(ModuleInstance* moduleInstance) { return moduleInstance->defaultMemory; } - uint64_t getDefaultMemorySize(ModuleInstance* moduleInstance) { return moduleInstance->defaultMemory->numPages << IR::numBytesPerPageLog2; } - TableInstance* getDefaultTable(ModuleInstance* moduleInstance) { return moduleInstance->defaultTable; } - - void runInstanceStartFunc(ModuleInstance* moduleInstance) { - if(moduleInstance->startFunctionIndex != UINTPTR_MAX) - invokeFunction(moduleInstance->functions[moduleInstance->startFunctionIndex],{}); - } - - void resetGlobalInstances(ModuleInstance* moduleInstance) { - for(GlobalInstance*& gi : moduleInstance->globals) - memcpy(&gi->value, &gi->initialValue, sizeof(gi->value)); - } - - ObjectInstance* getInstanceExport(ModuleInstance* moduleInstance,const std::string& name) - { - auto mapIt = moduleInstance->exportMap.find(name); - return mapIt == moduleInstance->exportMap.end() ? nullptr : mapIt->second; - } -} diff --git a/libraries/wasm-jit/Source/Runtime/ObjectGC.cpp b/libraries/wasm-jit/Source/Runtime/ObjectGC.cpp index 7c099e61659..5c815fdd8a3 100644 --- a/libraries/wasm-jit/Source/Runtime/ObjectGC.cpp +++ b/libraries/wasm-jit/Source/Runtime/ObjectGC.cpp @@ -40,9 +40,6 @@ namespace Runtime std::set referencedObjects; std::vector pendingScanObjects; - // Gather GC roots from running WASM threads. - getThreadGCRoots(rootObjectReferences); - // Initialize the referencedObjects set from the rootObjectReferences and intrinsic objects. for(auto object : rootObjectReferences) { diff --git a/libraries/wasm-jit/Source/Runtime/Runtime.cpp b/libraries/wasm-jit/Source/Runtime/Runtime.cpp deleted file mode 100644 index 7508a6caf87..00000000000 --- a/libraries/wasm-jit/Source/Runtime/Runtime.cpp +++ /dev/null @@ -1,157 +0,0 @@ -#include "Inline/BasicTypes.h" -#include "Platform/Platform.h" -#include "Logging/Logging.h" -#include "Runtime.h" -#include "RuntimePrivate.h" - -#include - -namespace Runtime -{ - void init() - { - LLVMJIT::init(); - initWAVMIntrinsics(); - } - - // Returns a vector of strings, each element describing a frame of the call stack. - // If the frame is a JITed function, use the JIT's information about the function - // to describe it, otherwise fallback to whatever platform-specific symbol resolution - // is available. - std::vector describeCallStack(const Platform::CallStack& callStack) - { - std::vector frameDescriptions; - for(auto frame : callStack.stackFrames) - { - std::string frameDescription; - if( LLVMJIT::describeInstructionPointer(frame.ip,frameDescription) - || Platform::describeInstructionPointer(frame.ip,frameDescription)) - { - frameDescriptions.push_back(frameDescription); - } - else { frameDescriptions.push_back(""); } - } - return frameDescriptions; - } - - [[noreturn]] void causeException(Exception::Cause cause) - { - auto callStack = Platform::captureCallStack(); - throw Exception {cause,describeCallStack(callStack)}; - } - - bool isA(ObjectInstance* object,const ObjectType& type) - { - if(type.kind != object->kind) { return false; } - - switch(type.kind) - { - case ObjectKind::function: return asFunctionType(type) == asFunction(object)->type; - case ObjectKind::global: return asGlobalType(type) == asGlobal(object)->type; - case ObjectKind::table: return isSubset(asTableType(type),asTable(object)->type); - case ObjectKind::memory: return isSubset(asMemoryType(type),asMemory(object)->type); - default: Errors::unreachable(); - } - } - - [[noreturn]] void handleHardwareTrap(Platform::HardwareTrapType trapType,Platform::CallStack&& trapCallStack,Uptr trapOperand) - { - std::cerr << "handle hardware trap\n"; - std::vector callStackDescription = describeCallStack(trapCallStack); - - switch(trapType) - { - case Platform::HardwareTrapType::accessViolation: - { - // If the access violation occured in a Table's reserved pages, treat it as an undefined table element runtime error. - if(isAddressOwnedByTable(reinterpret_cast(trapOperand))) { throw Exception { Exception::Cause::undefinedTableElement, callStackDescription }; } - // If the access violation occured in a Memory's reserved pages, treat it as an access violation runtime error. - else if(isAddressOwnedByMemory(reinterpret_cast(trapOperand))) { throw Exception { Exception::Cause::accessViolation, callStackDescription }; } - else - { - // If the access violation occured outside of a Table or Memory, treat it as a bug (possibly a security hole) - // rather than a runtime error in the WebAssembly code. - Log::printf(Log::Category::error,"Access violation outside of table or memory reserved addresses. Call stack:\n"); - for(auto calledFunction : callStackDescription) { Log::printf(Log::Category::error," %s\n",calledFunction.c_str()); } - Errors::fatalf("unsandboxed access violation"); - } - } - case Platform::HardwareTrapType::stackOverflow: throw Exception { Exception::Cause::stackOverflow, callStackDescription }; - case Platform::HardwareTrapType::intDivideByZeroOrOverflow: throw Exception { Exception::Cause::integerDivideByZeroOrIntegerOverflow, callStackDescription }; - default: Errors::unreachable(); - }; - } - - - Result invokeFunction(FunctionInstance* function,const std::vector& parameters) - { - const FunctionType* functionType = function->type; - - // Check that the parameter types match the function, and copy them into a memory block that stores each as a 64-bit value. - if(parameters.size() != functionType->parameters.size()) - { - throw Exception {Exception::Cause::invokeSignatureMismatch}; - } - - U64* thunkMemory = (U64*)alloca((functionType->parameters.size() + getArity(functionType->ret)) * sizeof(U64)); - for(Uptr parameterIndex = 0;parameterIndex < functionType->parameters.size();++parameterIndex) - { - if(functionType->parameters[parameterIndex] != parameters[parameterIndex].type) - { - throw Exception {Exception::Cause::invokeSignatureMismatch}; - } - - thunkMemory[parameterIndex] = parameters[parameterIndex].i64; - } - - // Get the invoke thunk for this function type. - LLVMJIT::InvokeFunctionPointer invokeFunctionPointer = LLVMJIT::getInvokeThunk(functionType); - - // Catch platform-specific runtime exceptions and turn them into Runtime::Values. - Result result; - Platform::HardwareTrapType trapType; - Platform::CallStack trapCallStack; - Uptr trapOperand; - trapType = Platform::catchHardwareTraps(trapCallStack,trapOperand, - [&] - { - // Call the invoke thunk. - (*invokeFunctionPointer)(function->nativeFunction,thunkMemory); - - // Read the return value out of the thunk memory block. - if(functionType->ret != ResultType::none) - { - result.type = functionType->ret; - result.i64 = thunkMemory[functionType->parameters.size()]; - } - }); - - // If there was no hardware trap, just return the result. - if(trapType == Platform::HardwareTrapType::none) { return result; } - else { handleHardwareTrap(trapType,std::move(trapCallStack),trapOperand); } - } - - const FunctionType* getFunctionType(FunctionInstance* function) - { - return function->type; - } - - GlobalInstance* createGlobal(GlobalType type,Value initialValue) - { - return new GlobalInstance(type,initialValue); - } - - Value getGlobalValue(GlobalInstance* global) - { - return Value(global->type.valueType,global->value); - } - - Value setGlobalValue(GlobalInstance* global,Value newValue) - { - WAVM_ASSERT_THROW(newValue.type == global->type.valueType); - WAVM_ASSERT_THROW(global->type.isMutable); - const Value previousValue = Value(global->type.valueType,global->value); - global->value = newValue; - return previousValue; - } -} diff --git a/libraries/wasm-jit/Source/Runtime/Table.cpp b/libraries/wasm-jit/Source/Runtime/Table.cpp deleted file mode 100644 index f871d48e505..00000000000 --- a/libraries/wasm-jit/Source/Runtime/Table.cpp +++ /dev/null @@ -1,136 +0,0 @@ -#include "Inline/BasicTypes.h" -#include "Runtime.h" -#include "Platform/Platform.h" -#include "RuntimePrivate.h" -#include - -namespace Runtime -{ - // Global lists of tables; used to query whether an address is reserved by one of them. - std::vector tables; - - static Uptr getNumPlatformPages(Uptr numBytes) - { - return (numBytes + (Uptr(1)<> Platform::getPageSizeLog2(); - } - - TableInstance* createTable(TableType type) - { - TableInstance* table = new TableInstance(type); - - const Uptr tableMaxBytes = sizeof(TableInstance::FunctionElement)*eosio::chain::wasm_constraints::maximum_table_elements; - - const Uptr alignmentBytes = 1U << Platform::getPageSizeLog2(); - table->baseAddress = (TableInstance::FunctionElement*)allocateVirtualPagesAligned(tableMaxBytes,alignmentBytes,table->reservedBaseAddress,table->reservedNumPlatformPages); - table->endOffset = tableMaxBytes; - if(!table->baseAddress) { delete table; return nullptr; } - - // Grow the table to the type's minimum size. - WAVM_ASSERT_THROW(type.size.min <= UINTPTR_MAX); - if(growTable(table,Uptr(type.size.min)) == -1) { delete table; return nullptr; } - - // Add the table to the global array. - tables.push_back(table); - return table; - } - - TableInstance::~TableInstance() - { - // Decommit all pages. - if(elements.size() > 0) { Platform::decommitVirtualPages((U8*)baseAddress,getNumPlatformPages(elements.size() * sizeof(TableInstance::FunctionElement))); } - - // Free the virtual address space. - if(reservedNumPlatformPages > 0) { Platform::freeVirtualPages((U8*)reservedBaseAddress,reservedNumPlatformPages); } - reservedBaseAddress = nullptr; - reservedNumPlatformPages = 0; - baseAddress = nullptr; - - // Remove the table from the global array. - for(Uptr tableIndex = 0;tableIndex < tables.size();++tableIndex) - { - if(tables[tableIndex] == this) { tables.erase(tables.begin() + tableIndex); break; } - } - } - - bool isAddressOwnedByTable(U8* address) - { - // Iterate over all tables and check if the address is within the reserved address space for each. - for(auto table : tables) - { - U8* startAddress = (U8*)table->reservedBaseAddress; - U8* endAddress = ((U8*)table->reservedBaseAddress) + (table->reservedNumPlatformPages << Platform::getPageSizeLog2()); - if(address >= startAddress && address < endAddress) { return true; } - } - return false; - } - - ObjectInstance* setTableElement(TableInstance* table,Uptr index,ObjectInstance* newValue) - { - // Write the new table element to both the table's elements array and its indirect function call data. - WAVM_ASSERT_THROW(index < table->elements.size()); - FunctionInstance* functionInstance = asFunction(newValue); - WAVM_ASSERT_THROW(functionInstance->nativeFunction); - table->baseAddress[index].type = functionInstance->type; - table->baseAddress[index].value = functionInstance->nativeFunction; - auto oldValue = table->elements[index]; - table->elements[index] = newValue; - return oldValue; - } - - Uptr getTableNumElements(TableInstance* table) - { - return table->elements.size(); - } - - Iptr growTable(TableInstance* table,Uptr numNewElements) - { - const Uptr previousNumElements = table->elements.size(); - if(numNewElements > 0) - { - // If the number of elements to grow would cause the table's size to exceed its maximum, return -1. - if(numNewElements > table->type.size.max || table->elements.size() > table->type.size.max - numNewElements) { return -1; } - - // Try to commit pages for the new elements, and return -1 if the commit fails. - const Uptr previousNumPlatformPages = getNumPlatformPages(table->elements.size() * sizeof(TableInstance::FunctionElement)); - const Uptr newNumPlatformPages = getNumPlatformPages((table->elements.size()+numNewElements) * sizeof(TableInstance::FunctionElement)); - if(newNumPlatformPages != previousNumPlatformPages - && !Platform::commitVirtualPages( - (U8*)table->baseAddress + (previousNumPlatformPages << Platform::getPageSizeLog2()), - newNumPlatformPages - previousNumPlatformPages - )) - { - return -1; - } - - // Also grow the table's elements array. - table->elements.insert(table->elements.end(),numNewElements,nullptr); - } - return previousNumElements; - } - - Iptr shrinkTable(TableInstance* table,Uptr numElementsToShrink) - { - const Uptr previousNumElements = table->elements.size(); - if(numElementsToShrink > 0) - { - // If the number of elements to shrink would cause the tables's size to drop below its minimum, return -1. - if(numElementsToShrink > table->elements.size() - || table->elements.size() - numElementsToShrink < table->type.size.min) { return -1; } - - // Shrink the table's elements array. - table->elements.resize(table->elements.size() - numElementsToShrink); - - // Decommit the pages that were shrunk off the end of the table's indirect function call data. - const Uptr previousNumPlatformPages = getNumPlatformPages(previousNumElements * sizeof(TableInstance::FunctionElement)); - const Uptr newNumPlatformPages = getNumPlatformPages(table->elements.size() * sizeof(TableInstance::FunctionElement)); - if(newNumPlatformPages != previousNumPlatformPages) - { - Platform::decommitVirtualPages( - (U8*)table->baseAddress + (newNumPlatformPages << Platform::getPageSizeLog2()), - (previousNumPlatformPages - newNumPlatformPages) << Platform::getPageSizeLog2() - ); - } - } - return previousNumElements; - } -} diff --git a/libraries/wasm-jit/Source/Runtime/Threads.cpp b/libraries/wasm-jit/Source/Runtime/Threads.cpp deleted file mode 100644 index 551f92a4ac6..00000000000 --- a/libraries/wasm-jit/Source/Runtime/Threads.cpp +++ /dev/null @@ -1,374 +0,0 @@ -#include "Inline/BasicTypes.h" -#include "Logging/Logging.h" -#include "Intrinsics.h" -#include "RuntimePrivate.h" - -#include -#include -#include -#include -#include - -#if ENABLE_THREADING_PROTOTYPE -// Keeps track of the entry and error functions used by a running WebAssembly-spawned thread. -// Used to find garbage collection roots. -struct Thread -{ - Runtime::FunctionInstance* entryFunction; - Runtime::FunctionInstance* errorFunction; -}; - -// Holds a list of threads (in the form of events that will wake them) that -// are waiting on a specific address. -struct WaitList -{ - Platform::Mutex* mutex; - std::vector wakeEvents; - std::atomic numReferences; - - WaitList(): mutex(Platform::createMutex()), numReferences(1) {} - ~WaitList() { destroyMutex(mutex); } -}; - -// An event that is reused within a thread when it waits on a WaitList. -THREAD_LOCAL Platform::Event* threadWakeEvent = nullptr; - -// A map from address to a list of threads waiting on that address. -static Platform::Mutex* addressToWaitListMapMutex = Platform::createMutex(); -static std::map addressToWaitListMap; - -// A global list of running threads created by WebAssembly code. -static Platform::Mutex* threadsMutex = Platform::createMutex(); -static std::vector threads; - -// Opens the wait list for a given address. -// Increases the wait list's reference count, and returns a pointer to it. -// Note that it does not lock the wait list mutex. -// A call to openWaitList should be followed by a call to closeWaitList to avoid leaks. -static WaitList* openWaitList(Uptr address) -{ - Platform::Lock mapLock(addressToWaitListMapMutex); - auto mapIt = addressToWaitListMap.find(address); - if(mapIt != addressToWaitListMap.end()) - { - ++mapIt->second->numReferences; - return mapIt->second; - } - else - { - WaitList* waitList = new WaitList(); - addressToWaitListMap.emplace(address,waitList); - return waitList; - } -} - -// Closes a wait list, deleting it and removing it from the global map if it was the last reference. -static void closeWaitList(Uptr address,WaitList* waitList) -{ - if(--waitList->numReferences == 0) - { - Platform::Lock mapLock(addressToWaitListMapMutex); - if(!waitList->numReferences) - { - WAVM_ASSERT_THROW(!waitList->wakeEvents.size()); - delete waitList; - addressToWaitListMap.erase(address); - } - } -} - -// Loads a value from memory with seq_cst memory order. -// The caller must ensure that the pointer is naturally aligned. -template -static Value atomicLoad(const Value* valuePointer) -{ - static_assert(sizeof(std::atomic) == sizeof(Value),"relying on non-standard behavior"); - std::atomic* valuePointerAtomic = (std::atomic*)valuePointer; - return valuePointerAtomic->load(); -} - -// Stores a value to memory with seq_cst memory order. -// The caller must ensure that the pointer is naturally aligned. -template -static void atomicStore(Value* valuePointer,Value newValue) -{ - static_assert(sizeof(std::atomic) == sizeof(Value),"relying on non-standard behavior"); - std::atomic* valuePointerAtomic = (std::atomic*)valuePointer; - valuePointerAtomic->store(newValue); -} - -// Decodes a floating-point timeout value relative to startTime. -U64 getEndTimeFromTimeout(U64 startTime,F64 timeout) -{ - const F64 timeoutMicroseconds = timeout * 1000.0; - U64 endTime = UINT64_MAX; - if(!std::isnan(timeoutMicroseconds) && std::isfinite(timeoutMicroseconds)) - { - if(timeoutMicroseconds <= 0.0) - { - endTime = startTime; - } - else if(timeoutMicroseconds <= F64(UINT64_MAX - 1)) - { - endTime = startTime + U64(timeoutMicroseconds); - errorUnless(endTime >= startTime); - } - } - return endTime; -} - -template -static U32 waitOnAddress(Value* valuePointer,Value expectedValue,F64 timeout) -{ - const U64 endTime = getEndTimeFromTimeout(Platform::getMonotonicClock(),timeout); - - // Open the wait list for this address. - const Uptr address = reinterpret_cast(valuePointer); - WaitList* waitList = openWaitList(address); - - // Lock the wait list, and check that *valuePointer is still what the caller expected it to be. - lockMutex(waitList->mutex); - if(atomicLoad(valuePointer) != expectedValue) - { - // If *valuePointer wasn't the expected value, unlock the wait list and return. - unlockMutex(waitList->mutex); - closeWaitList(address,waitList); - return 1; - } - else - { - // If the thread hasn't yet created a wake event, do so. - if(!threadWakeEvent) { threadWakeEvent = Platform::createEvent(); } - - // Add the wake event to the wait list, and unlock the wait list. - waitList->wakeEvents.push_back(threadWakeEvent); - unlockMutex(waitList->mutex); - } - - // Wait for the thread's wake event to be signaled. - bool timedOut = false; - if(!Platform::waitForEvent(threadWakeEvent,endTime)) - { - // If the wait timed out, lock the wait list and check if the thread's wake event is still in the wait list. - Platform::Lock waitListLock(waitList->mutex); - auto wakeEventIt = std::find(waitList->wakeEvents.begin(),waitList->wakeEvents.end(),threadWakeEvent); - if(wakeEventIt != waitList->wakeEvents.end()) - { - // If the event was still on the wait list, remove it, and return the "timed out" result. - waitList->wakeEvents.erase(wakeEventIt); - timedOut = true; - } - else - { - // In between the wait timing out and locking the wait list, some other thread tried to wake this thread. - // The event will now be signaled, so use an immediately expiring wait on it to reset it. - errorUnless(Platform::waitForEvent(threadWakeEvent,Platform::getMonotonicClock())); - } - } - - closeWaitList(address,waitList); - return timedOut ? 2 : 0; -} - -static U32 wakeAddress(Uptr address,U32 numToWake) -{ - if(numToWake == 0) { return 0; } - - // Open the wait list for this address. - WaitList* waitList = openWaitList(address); - { - Platform::Lock waitListLock(waitList->mutex); - - // Determine how many threads to wake. - // numToWake==UINT32_MAX means wake all waiting threads. - Uptr actualNumToWake = numToWake; - if(numToWake == UINT32_MAX || numToWake > waitList->wakeEvents.size()) - { - actualNumToWake = waitList->wakeEvents.size(); - } - - // Signal the events corresponding to the oldest waiting threads. - for(Uptr wakeIndex = 0;wakeIndex < actualNumToWake;++wakeIndex) - { - signalEvent(waitList->wakeEvents[wakeIndex]); - } - - // Remove the events from the wait list. - waitList->wakeEvents.erase(waitList->wakeEvents.begin(),waitList->wakeEvents.begin() + actualNumToWake); - } - closeWaitList(address,waitList); - - return numToWake; -} - -namespace Runtime -{ - DEFINE_INTRINSIC_FUNCTION1(wavmIntrinsics,misalignedAtomicTrap,misalignedAtomicTrap,none,i32,address) - { - causeException(Exception::Cause::misalignedAtomicMemoryAccess); - } - - DEFINE_INTRINSIC_FUNCTION1(wavmIntrinsics,isLockFree,isLockFree,i32,i32,numBytes) - { - // Assume we're running on X86. - switch(numBytes) - { - case 1: case 2: case 4: case 8: return true; - default: return false; - }; - } - - DEFINE_INTRINSIC_FUNCTION3(wavmIntrinsics,wake,wake,i32,i32,addressOffset,i32,numToWake,i64,memoryInstanceBits) - { - MemoryInstance* memoryInstance = reinterpret_cast(memoryInstanceBits); - - // Validate that the address is within the memory's bounds and 4-byte aligned. - if(U32(addressOffset) > memoryInstance->endOffset) { causeException(Exception::Cause::accessViolation); } - if(addressOffset & 3) { causeException(Exception::Cause::misalignedAtomicMemoryAccess); } - - const Uptr address = reinterpret_cast(getMemoryBaseAddress(memoryInstance)) + addressOffset; - return wakeAddress(address,numToWake); - } - - DEFINE_INTRINSIC_FUNCTION4(wavmIntrinsics,wait,wait,i32,i32,addressOffset,i32,expectedValue,f64,timeout,i64,memoryInstanceBits) - { - MemoryInstance* memoryInstance = reinterpret_cast(memoryInstanceBits); - - // Validate that the address is within the memory's bounds and naturally aligned. - if(U32(addressOffset) > memoryInstance->endOffset) { causeException(Exception::Cause::accessViolation); } - if(addressOffset & 3) { causeException(Exception::Cause::misalignedAtomicMemoryAccess); } - - I32* valuePointer = reinterpret_cast(getMemoryBaseAddress(memoryInstance) + addressOffset); - return waitOnAddress(valuePointer,expectedValue,timeout); - } - DEFINE_INTRINSIC_FUNCTION4(wavmIntrinsics,wait,wait,i32,i32,addressOffset,i64,expectedValue,f64,timeout,i64,memoryInstanceBits) - { - MemoryInstance* memoryInstance = reinterpret_cast(memoryInstanceBits); - - // Validate that the address is within the memory's bounds and naturally aligned. - if(U32(addressOffset) > memoryInstance->endOffset) { causeException(Exception::Cause::accessViolation); } - if(addressOffset & 7) { causeException(Exception::Cause::misalignedAtomicMemoryAccess); } - - I64* valuePointer = reinterpret_cast(getMemoryBaseAddress(memoryInstance) + addressOffset); - return waitOnAddress(valuePointer,expectedValue,timeout); - } - - FunctionInstance* getFunctionFromTable(TableInstance* table,const FunctionType* expectedType,U32 elementIndex) - { - // Validate that the index is valid. - if(elementIndex * sizeof(TableInstance::FunctionElement) >= table->endOffset) - { - causeException(Runtime::Exception::Cause::undefinedTableElement); - } - // Validate that the indexed function's type matches the expected type. - const FunctionType* actualSignature = table->baseAddress[elementIndex].type; - if(actualSignature != expectedType) - { - causeException(Runtime::Exception::Cause::indirectCallSignatureMismatch); - } - return asFunction(table->elements[elementIndex]); - } - - void callAndTurnHardwareTrapsIntoRuntimeExceptions(void (*function)(I32),I32 argument) - { - Platform::CallStack trapCallStack; - Uptr trapOperand; - const Platform::HardwareTrapType trapType = Platform::catchHardwareTraps( - trapCallStack,trapOperand, - [function,argument]{(*function)(argument);} - ); - if(trapType != Platform::HardwareTrapType::none) - { - handleHardwareTrap(trapType,std::move(trapCallStack),trapOperand); - } - } - - static void threadFunc(Thread* thread,I32 argument) - { - try - { - // Call the thread entry function. - callAndTurnHardwareTrapsIntoRuntimeExceptions((void(*)(I32))thread->entryFunction->nativeFunction,argument); - } - catch(Runtime::Exception exception) - { - // Log that a runtime exception was handled by a thread error function. - Log::printf(Log::Category::error,"Runtime exception in thread: %s\n",describeExceptionCause(exception.cause)); - for(auto calledFunction : exception.callStack) { Log::printf(Log::Category::error," %s\n",calledFunction.c_str()); } - Log::printf(Log::Category::error,"Passing exception on to thread error handler\n"); - - try - { - // Call the thread error function. - callAndTurnHardwareTrapsIntoRuntimeExceptions((void(*)(I32))thread->errorFunction->nativeFunction,argument); - } - catch(Runtime::Exception secondException) - { - // Log that the thread error function caused a runtime exception, and exit with a fatal error. - Log::printf(Log::Category::error,"Runtime exception in thread error handler: %s\n",describeExceptionCause(secondException.cause)); - for(auto calledFunction : secondException.callStack) { Log::printf(Log::Category::error," %s\n",calledFunction.c_str()); } - Errors::fatalf("double fault"); - } - } - - // Destroy the thread wake event before exiting the thread. - if(threadWakeEvent) - { - Platform::destroyEvent(threadWakeEvent); - } - - // Remove the thread from the global list. - { - Platform::Lock threadsLock(threadsMutex); - auto threadIt = std::find(threads.begin(),threads.end(),thread); - threads.erase(threadIt); - } - - // Delete the thread object. - delete thread; - } - - DEFINE_INTRINSIC_FUNCTION4(wavmIntrinsics,launchThread,launchThread,none, - i32,entryFunctionIndex, - i32,argument, - i32,errorFunctionIndex, - i64,functionTablePointer) - { - TableInstance* defaultTable = reinterpret_cast(functionTablePointer); - const FunctionType* functionType = FunctionType::get(ResultType::none,{ValueType::i32}); - - // Create a thread object that will expose its entry and error functions to the garbage collector as roots. - Thread* thread = new Thread(); - thread->entryFunction = getFunctionFromTable(defaultTable,functionType,entryFunctionIndex); - thread->errorFunction = getFunctionFromTable(defaultTable,functionType,errorFunctionIndex); - { - Platform::Lock threadsLock(threadsMutex); - threads.push_back(thread); - } - - // Use a std::thread to spawn the thread. - std::thread stdThread([thread,argument]() - { - threadFunc(thread,argument); - }); - - // Detach the std::thread from the underlying thread. - stdThread.detach(); - } - - void getThreadGCRoots(std::vector& outGCRoots) - { - Platform::Lock threadsLock(threadsMutex); - for(auto thread : threads) - { - outGCRoots.push_back(thread->entryFunction); - outGCRoots.push_back(thread->errorFunction); - } - } -} -#else -namespace Runtime -{ - void getThreadGCRoots(std::vector& outGCRoots) {} -} -#endif \ No newline at end of file diff --git a/pipeline.jsonc b/pipeline.jsonc index c526684f7ab..40772347d41 100644 --- a/pipeline.jsonc +++ b/pipeline.jsonc @@ -14,7 +14,7 @@ { "environment": { - "BUILDER_TAG": "v1.8.0" + "BUILDER_TAG": "v2.0.0" } }, "eosio-sync-tests": @@ -22,7 +22,8 @@ "environment": { "SKIP_PRE_V180": "true", - "SKIP_V180": "false" + "SKIP_V180": "true", + "SKIP_V200": "false" } }, "eosio-replay-tests": @@ -30,7 +31,8 @@ "environment": { "SKIP_PRE_V180": "true", - "SKIP_V180": "false" + "SKIP_V180": "true", + "SKIP_V200": "false" } }, "eosio-resume-from-state": @@ -38,10 +40,10 @@ "test": [ { - "tag": "v1.8.6" + "tag": "v2.0.0-rc2" }, { - "tag": "v1.8.7" + "tag": "v2.0.0-rc3" } ] } diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index 3a591eedbb5..334d560baae 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include diff --git a/plugins/chain_api_plugin/include/eosio/chain_api_plugin/chain_api_plugin.hpp b/plugins/chain_api_plugin/include/eosio/chain_api_plugin/chain_api_plugin.hpp index 9a854cf0b56..d2064aef4e7 100644 --- a/plugins/chain_api_plugin/include/eosio/chain_api_plugin/chain_api_plugin.hpp +++ b/plugins/chain_api_plugin/include/eosio/chain_api_plugin/chain_api_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include diff --git a/plugins/chain_interface/include/eosio/chain/plugin_interface.hpp b/plugins/chain_interface/include/eosio/chain/plugin_interface.hpp index cdec12008ef..44ef1860c60 100644 --- a/plugins/chain_interface/include/eosio/chain/plugin_interface.hpp +++ b/plugins/chain_interface/include/eosio/chain/plugin_interface.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include @@ -43,13 +39,13 @@ namespace eosio { namespace chain { namespace plugin_interface { namespace incoming { namespace channels { using block = channel_decl; - using transaction = channel_decl; + using transaction = channel_decl; } namespace methods { // synchronously push a block/trx to a single provider using block_sync = method_decl; - using transaction_async = method_decl), first_provider_policy>; + using transaction_async = method_decl), first_provider_policy>; } } diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 90c09b787f9..a854095fe3f 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include @@ -14,10 +10,13 @@ #include #include #include +#include #include #include +#include + #include #include #include @@ -27,6 +26,13 @@ #include #include +// reflect chainbase::environment for --print-build-info option +FC_REFLECT_ENUM( chainbase::environment::os_t, + (OS_LINUX)(OS_MACOS)(OS_WINDOWS)(OS_OTHER) ) +FC_REFLECT_ENUM( chainbase::environment::arch_t, + (ARCH_X86_64)(ARCH_ARM)(ARCH_RISCV)(ARCH_OTHER) ) +FC_REFLECT(chainbase::environment, (debug)(os)(arch)(boost_version)(compiler) ) + namespace eosio { //declare operator<< and validate funciton for read_mode in the same namespace as read_mode itself @@ -117,26 +123,6 @@ using fc::flat_map; using boost::signals2::scoped_connection; -//using txn_msg_rate_limits = controller::txn_msg_rate_limits; - -#define CATCH_AND_CALL(NEXT)\ - catch ( const fc::exception& err ) {\ - NEXT(err.dynamic_copy_exception());\ - } catch ( const std::exception& e ) {\ - fc::exception fce( \ - FC_LOG_MESSAGE( warn, "rethrow ${what}: ", ("what",e.what())),\ - fc::std_exception_code,\ - BOOST_CORE_TYPEID(e).name(),\ - e.what() ) ;\ - NEXT(fce.dynamic_copy_exception());\ - } catch( ... ) {\ - fc::unhandled_exception e(\ - FC_LOG_MESSAGE(warn, "rethrow"),\ - std::current_exception());\ - NEXT(e.dynamic_copy_exception());\ - } - - class chain_plugin_impl { public: chain_plugin_impl() @@ -159,7 +145,7 @@ class chain_plugin_impl { fc::optional block_logger; fc::optional chain_config; fc::optional chain; - fc::optional chain_id; + fc::optional genesis; //txn_msg_rate_limits rate_limits; fc::optional wasm_runtime; fc::microseconds abi_serializer_max_time_ms; @@ -212,7 +198,15 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip ("protocol-features-dir", bpo::value()->default_value("protocol_features"), "the location of the protocol_features directory (absolute path or relative to application config dir)") ("checkpoint", bpo::value>()->composing(), "Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.") - ("wasm-runtime", bpo::value()->value_name("wavm/wabt"), "Override default WASM runtime") + ("wasm-runtime", bpo::value()->value_name("runtime")->notifier([](const auto& vm){ +#ifndef EOSIO_EOS_VM_OC_DEVELOPER + //throwing an exception here (like EOS_ASSERT) is just gobbled up with a "Failed to initialize" error :( + if(vm == wasm_interface::vm_type::eos_vm_oc) { + elog("EOS VM OC is a tier-up compiler and works in conjunction with the configured base WASM runtime. Enable EOS VM OC via 'eos-vm-oc-enable' option"); + EOS_ASSERT(false, plugin_exception, ""); + } +#endif + }), "Override default WASM runtime") ("abi-serializer-max-time-ms", bpo::value()->default_value(config::default_abi_serializer_max_time_ms), "Override default maximum ABI serialization time allowed in ms") ("chain-state-db-size-mb", bpo::value()->default_value(config::default_state_size / (1024 * 1024)), "Maximum size (in MiB) of the chain state database") @@ -252,6 +246,8 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip "In \"light\" mode all incoming blocks headers will be fully validated; transactions in those validated blocks will be trusted \n") ("disable-ram-billing-notify-checks", bpo::bool_switch()->default_value(false), "Disable the check which subjectively fails a transaction if a contract bills more RAM to another account within the context of a notification handler (i.e. when the receiver is not the code of the action).") + ("maximum-variable-signature-length", bpo::value()->default_value(16384u), + "Subjectively limit the maximum length of variable components in a variable legnth signature to this size in bytes") ("trusted-producer", bpo::value>()->composing(), "Indicate a producer whose blocks headers signed by it will be fully validated, but transactions in those validated blocks will be trusted.") ("database-map-mode", bpo::value()->default_value(chainbase::pinnable_mapped_file::map_mode::mapped), "Database map mode (\"mapped\", \"heap\", or \"locked\").\n" @@ -266,6 +262,17 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip #ifdef __linux__ ("database-hugepage-path", bpo::value>()->composing(), "Optional path for database hugepages when in \"locked\" mode (may specify multiple times)") #endif + +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + ("eos-vm-oc-cache-size-mb", bpo::value()->default_value(eosvmoc::config().cache_size / (1024u*1024u)), "Maximum size (in MiB) of the EOS VM OC code cache") + ("eos-vm-oc-compile-threads", bpo::value()->default_value(1u)->notifier([](const auto t) { + if(t == 0) { + elog("eos-vm-oc-compile-threads must be set to a non-zero value"); + EOS_ASSERT(false, plugin_exception, ""); + } + }), "Number of threads to use for EOS VM OC tier-up") + ("eos-vm-oc-enable", bpo::bool_switch(), "Enable EOS VM OC tier-up runtime") +#endif ; // TODO: rate limiting @@ -285,6 +292,10 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip "extract genesis_state from blocks.log as JSON, print to console, and exit") ("extract-genesis-json", bpo::value(), "extract genesis_state from blocks.log as JSON, write into specified file, and exit") + ("print-build-info", bpo::bool_switch()->default_value(false), + "print build environment information to console as JSON and exit") + ("extract-build-info", bpo::value(), + "extract build environment information as JSON, write into specified file, and exit") ("fix-reversible-blocks", bpo::bool_switch()->default_value(false), "recovers reversible block database if that database is in a bad state") ("force-all-checks", bpo::bool_switch()->default_value(false), @@ -308,10 +319,12 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip } -#define LOAD_VALUE_SET(options, name, container) \ -if( options.count(name) ) { \ - const std::vector& ops = options[name].as>(); \ - std::copy(ops.begin(), ops.end(), std::inserter(container, container.end())); \ +#define LOAD_VALUE_SET(options, op_name, container) \ +if( options.count(op_name) ) { \ + const std::vector& ops = options[op_name].as>(); \ + for( const auto& v : ops ) { \ + container.emplace( eosio::chain::name( v ) ); \ + } \ } fc::time_point calculate_genesis_timestamp( string tstr ) { @@ -552,6 +565,27 @@ protocol_feature_set initialize_protocol_features( const fc::path& p, bool popul return pfs; } +void +chain_plugin::do_hard_replay(const variables_map& options) { + ilog( "Hard replay requested: deleting state database" ); + clear_directory_contents( my->chain_config->state_dir ); + auto backup_dir = block_log::repair_log( my->blocks_dir, options.at( "truncate-at-block" ).as()); + if( fc::exists( backup_dir / config::reversible_blocks_dir_name ) || + options.at( "fix-reversible-blocks" ).as()) { + // Do not try to recover reversible blocks if the directory does not exist, unless the option was explicitly provided. + if( !recover_reversible_blocks( backup_dir / config::reversible_blocks_dir_name, + my->chain_config->reversible_cache_size, + my->chain_config->blocks_dir / config::reversible_blocks_dir_name, + options.at( "truncate-at-block" ).as())) { + ilog( "Reversible blocks database was not corrupted. Copying from backup to blocks directory." ); + fc::copy( backup_dir / config::reversible_blocks_dir_name, + my->chain_config->blocks_dir / config::reversible_blocks_dir_name ); + fc::copy( backup_dir / config::reversible_blocks_dir_name / "shared_memory.bin", + my->chain_config->blocks_dir / config::reversible_blocks_dir_name / "shared_memory.bin" ); + } + } +} + void chain_plugin::plugin_initialize(const variables_map& options) { ilog("initializing chain plugin"); @@ -566,6 +600,28 @@ void chain_plugin::plugin_initialize(const variables_map& options) { my->chain_config = controller::config(); + if( options.at( "print-build-info" ).as() || options.count( "extract-build-info") ) { + if( options.at( "print-build-info" ).as() ) { + ilog( "Build environment JSON:\n${e}", ("e", json::to_pretty_string( chainbase::environment() )) ); + } + if( options.count( "extract-build-info") ) { + auto p = options.at( "extract-build-info" ).as(); + + if( p.is_relative()) { + p = bfs::current_path() / p; + } + + EOS_ASSERT( fc::json::save_to_file( chainbase::environment(), p, true ), misc_exception, + "Error occurred while writing build info JSON to '${path}'", + ("path", p.generic_string()) + ); + + ilog( "Saved build info JSON to '${path}'", ("path", p.generic_string()) ); + } + + EOS_THROW( node_management_success, "reported build environment information" ); + } + LOAD_VALUE_SET( options, "sender-bypass-whiteblacklist", my->chain_config->sender_bypass_whiteblacklist ); LOAD_VALUE_SET( options, "actor-whitelist", my->chain_config->actor_whitelist ); LOAD_VALUE_SET( options, "actor-blacklist", my->chain_config->actor_blacklist ); @@ -582,7 +638,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { EOS_ASSERT( pos != std::string::npos, plugin_config_exception, "Invalid entry in action-blacklist: '${a}'", ("a", a)); account_name code( a.substr( 0, pos )); action_name act( a.substr( pos + 2 )); - list.emplace( code.value, act.value ); + list.emplace( code, act ); } } @@ -673,19 +729,26 @@ void chain_plugin::plugin_initialize(const variables_map& options) { my->chain_config->disable_replay_opts = options.at( "disable-replay-opts" ).as(); my->chain_config->contracts_console = options.at( "contracts-console" ).as(); my->chain_config->allow_ram_billing_in_notify = options.at( "disable-ram-billing-notify-checks" ).as(); + my->chain_config->maximum_variable_signature_length = options.at( "maximum-variable-signature-length" ).as(); if( options.count( "extract-genesis-json" ) || options.at( "print-genesis-json" ).as()) { - genesis_state gs; + fc::optional gs; if( fc::exists( my->blocks_dir / "blocks.log" )) { gs = block_log::extract_genesis_state( my->blocks_dir ); + EOS_ASSERT( gs, + plugin_config_exception, + "Block log at '${path}' does not contain a genesis state, it only has the chain-id.", + ("path", (my->blocks_dir / "blocks.log").generic_string()) + ); } else { wlog( "No blocks.log found at '${p}'. Using default genesis state.", ("p", (my->blocks_dir / "blocks.log").generic_string())); + gs.emplace(); } if( options.at( "print-genesis-json" ).as()) { - ilog( "Genesis JSON:\n${genesis}", ("genesis", json::to_pretty_string( gs ))); + ilog( "Genesis JSON:\n${genesis}", ("genesis", json::to_pretty_string( *gs ))); } if( options.count( "extract-genesis-json" )) { @@ -695,7 +758,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { p = bfs::current_path() / p; } - EOS_ASSERT( fc::json::save_to_file( gs, p, true ), + EOS_ASSERT( fc::json::save_to_file( *gs, p, true ), misc_exception, "Error occurred while writing genesis JSON to '${path}'", ("path", p.generic_string()) @@ -727,25 +790,9 @@ void chain_plugin::plugin_initialize(const variables_map& options) { if( options.at( "truncate-at-block" ).as() > 0 ) wlog( "The --truncate-at-block option does not make sense when deleting all blocks." ); clear_directory_contents( my->chain_config->state_dir ); - fc::remove_all( my->blocks_dir ); + clear_directory_contents( my->blocks_dir ); } else if( options.at( "hard-replay-blockchain" ).as()) { - ilog( "Hard replay requested: deleting state database" ); - clear_directory_contents( my->chain_config->state_dir ); - auto backup_dir = block_log::repair_log( my->blocks_dir, options.at( "truncate-at-block" ).as()); - if( fc::exists( backup_dir / config::reversible_blocks_dir_name ) || - options.at( "fix-reversible-blocks" ).as()) { - // Do not try to recover reversible blocks if the directory does not exist, unless the option was explicitly provided. - if( !recover_reversible_blocks( backup_dir / config::reversible_blocks_dir_name, - my->chain_config->reversible_cache_size, - my->chain_config->blocks_dir / config::reversible_blocks_dir_name, - options.at( "truncate-at-block" ).as())) { - ilog( "Reversible blocks database was not corrupted. Copying from backup to blocks directory." ); - fc::copy( backup_dir / config::reversible_blocks_dir_name, - my->chain_config->blocks_dir / config::reversible_blocks_dir_name ); - fc::copy( backup_dir / config::reversible_blocks_dir_name / "shared_memory.bin", - my->chain_config->blocks_dir / config::reversible_blocks_dir_name / "shared_memory.bin" ); - } - } + do_hard_replay(options); } else if( options.at( "replay-blockchain" ).as()) { ilog( "Replay requested: deleting state database" ); if( options.at( "truncate-at-block" ).as() > 0 ) @@ -784,63 +831,85 @@ void chain_plugin::plugin_initialize(const variables_map& options) { wlog("The --import-reversible-blocks option should be used by itself."); } + fc::optional chain_id; if (options.count( "snapshot" )) { my->snapshot_path = options.at( "snapshot" ).as(); EOS_ASSERT( fc::exists(*my->snapshot_path), plugin_config_exception, "Cannot load snapshot, ${name} does not exist", ("name", my->snapshot_path->generic_string()) ); // recover genesis information from the snapshot + // used for validation code below auto infile = std::ifstream(my->snapshot_path->generic_string(), (std::ios::in | std::ios::binary)); - auto reader = std::make_shared(infile); - reader->validate(); - reader->read_section([this]( auto §ion ){ - section.read_row(my->chain_config->genesis); - }); + istream_snapshot_reader reader(infile); + reader.validate(); + chain_id = controller::extract_chain_id(reader); infile.close(); EOS_ASSERT( options.count( "genesis-timestamp" ) == 0, plugin_config_exception, "--snapshot is incompatible with --genesis-timestamp as the snapshot contains genesis information"); + EOS_ASSERT( options.count( "genesis-json" ) == 0, + plugin_config_exception, + "--snapshot is incompatible with --genesis-json as the snapshot contains genesis information"); - if( options.count( "genesis-json" )) { - auto genesis_path = options.at( "genesis-json" ).as(); - if( genesis_path.is_relative() ) { - genesis_path = bfs::current_path() / genesis_path; - } - EOS_ASSERT( fc::is_regular_file( genesis_path ), - plugin_config_exception, - "Specified genesis file '${genesis}' does not exist.", - ("genesis", genesis_path.generic_string())); - auto genesis_file = fc::json::from_file( genesis_path ).as(); - EOS_ASSERT( my->chain_config->genesis == genesis_file, plugin_config_exception, - "Genesis state provided via command line arguments does not match the existing genesis state in the snapshot. " - "It is not necessary to provide a genesis state argument when loading a snapshot." - ); - } auto shared_mem_path = my->chain_config->state_dir / "shared_memory.bin"; - EOS_ASSERT( !fc::exists(shared_mem_path), + EOS_ASSERT( !fc::is_regular_file(shared_mem_path), plugin_config_exception, "Snapshot can only be used to initialize an empty database." ); if( fc::is_regular_file( my->blocks_dir / "blocks.log" )) { - auto log_genesis = block_log::extract_genesis_state(my->blocks_dir); - EOS_ASSERT( log_genesis.compute_chain_id() == my->chain_config->genesis.compute_chain_id(), - plugin_config_exception, - "Genesis information in blocks.log does not match genesis information in the snapshot"); + auto block_log_genesis = block_log::extract_genesis_state(my->blocks_dir); + if( block_log_genesis ) { + const auto& block_log_chain_id = block_log_genesis->compute_chain_id(); + EOS_ASSERT( *chain_id == block_log_chain_id, + plugin_config_exception, + "snapshot chain ID (${snapshot_chain_id}) does not match the chain ID from the genesis state in the block log (${block_log_chain_id})", + ("snapshot_chain_id", *chain_id) + ("block_log_chain_id", block_log_chain_id) + ); + } else { + const auto& block_log_chain_id = block_log::extract_chain_id(my->blocks_dir); + EOS_ASSERT( *chain_id == block_log_chain_id, + plugin_config_exception, + "snapshot chain ID (${snapshot_chain_id}) does not match the chain ID (${block_log_chain_id}) in the block log", + ("snapshot_chain_id", *chain_id) + ("block_log_chain_id", block_log_chain_id) + ); + } } } else { - bfs::path genesis_file; - bool genesis_timestamp_specified = false; - fc::optional existing_genesis; - if( fc::exists( my->blocks_dir / "blocks.log" ) ) { - my->chain_config->genesis = block_log::extract_genesis_state( my->blocks_dir ); - existing_genesis = my->chain_config->genesis; + chain_id = controller::extract_chain_id_from_db( my->chain_config->state_dir ); + + fc::optional block_log_genesis; + fc::optional block_log_chain_id; + + if( fc::is_regular_file( my->blocks_dir / "blocks.log" ) ) { + block_log_genesis = block_log::extract_genesis_state( my->blocks_dir ); + if( block_log_genesis ) { + block_log_chain_id = block_log_genesis->compute_chain_id(); + } else { + block_log_chain_id = block_log::extract_chain_id( my->blocks_dir ); + } + + if( chain_id ) { + EOS_ASSERT( *block_log_chain_id == *chain_id, block_log_exception, + "Chain ID in blocks.log (${block_log_chain_id}) does not match the existing " + " chain ID in state (${state_chain_id}).", + ("block_log_chain_id", *block_log_chain_id) + ("state_chain_id", *chain_id) + ); + } else if( block_log_genesis ) { + ilog( "Starting fresh blockchain state using genesis state extracted from blocks.log." ); + my->genesis = block_log_genesis; + // Delay setting chain_id until later so that the code handling genesis-json below can know + // that chain_id still only represents a chain ID extracted from the state (assuming it exists). + } } - if( options.count( "genesis-json" )) { - genesis_file = options.at( "genesis-json" ).as(); + if( options.count( "genesis-json" ) ) { + bfs::path genesis_file = options.at( "genesis-json" ).as(); if( genesis_file.is_relative()) { genesis_file = bfs::current_path() / genesis_file; } @@ -850,33 +919,72 @@ void chain_plugin::plugin_initialize(const variables_map& options) { "Specified genesis file '${genesis}' does not exist.", ("genesis", genesis_file.generic_string())); - my->chain_config->genesis = fc::json::from_file( genesis_file ).as(); - } + genesis_state provided_genesis = fc::json::from_file( genesis_file ).as(); - if( options.count( "genesis-timestamp" ) ) { - my->chain_config->genesis.initial_timestamp = calculate_genesis_timestamp( options.at( "genesis-timestamp" ).as() ); - genesis_timestamp_specified = true; - } + if( options.count( "genesis-timestamp" ) ) { + provided_genesis.initial_timestamp = calculate_genesis_timestamp( options.at( "genesis-timestamp" ).as() ); - if( !existing_genesis ) { - if( !genesis_file.empty() ) { - if( genesis_timestamp_specified ) { - ilog( "Using genesis state provided in '${genesis}' but with adjusted genesis timestamp", - ("genesis", genesis_file.generic_string()) ); + ilog( "Using genesis state provided in '${genesis}' but with adjusted genesis timestamp", + ("genesis", genesis_file.generic_string()) ); + } else { + ilog( "Using genesis state provided in '${genesis}'", ("genesis", genesis_file.generic_string())); + } + + if( block_log_genesis ) { + EOS_ASSERT( *block_log_genesis == provided_genesis, plugin_config_exception, + "Genesis state, provided via command line arguments, does not match the existing genesis state" + " in blocks.log. It is not necessary to provide genesis state arguments when a full blocks.log " + "file already exists." + ); + } else { + const auto& provided_genesis_chain_id = provided_genesis.compute_chain_id(); + if( chain_id ) { + EOS_ASSERT( provided_genesis_chain_id == *chain_id, plugin_config_exception, + "Genesis state, provided via command line arguments, has a chain ID (${provided_genesis_chain_id}) " + "that does not match the existing chain ID in the database state (${state_chain_id}). " + "It is not necessary to provide genesis state arguments when an initialized database state already exists.", + ("provided_genesis_chain_id", provided_genesis_chain_id) + ("state_chain_id", *chain_id) + ); } else { - ilog( "Using genesis state provided in '${genesis}'", ("genesis", genesis_file.generic_string())); + if( block_log_chain_id ) { + EOS_ASSERT( provided_genesis_chain_id == *block_log_chain_id, plugin_config_exception, + "Genesis state, provided via command line arguments, has a chain ID (${provided_genesis_chain_id}) " + "that does not match the existing chain ID in blocks.log (${block_log_chain_id}).", + ("provided_genesis_chain_id", provided_genesis_chain_id) + ("block_log_chain_id", *block_log_chain_id) + ); + } + + chain_id = provided_genesis_chain_id; + + ilog( "Starting fresh blockchain state using provided genesis state." ); + my->genesis = std::move(provided_genesis); } - wlog( "Starting up fresh blockchain with provided genesis state." ); - } else if( genesis_timestamp_specified ) { - wlog( "Starting up fresh blockchain with default genesis state but with adjusted genesis timestamp." ); - } else { - wlog( "Starting up fresh blockchain with default genesis state." ); } } else { - EOS_ASSERT( my->chain_config->genesis == *existing_genesis, plugin_config_exception, - "Genesis state provided via command line arguments does not match the existing genesis state in blocks.log. " - "It is not necessary to provide genesis state arguments when a blocks.log file already exists." - ); + EOS_ASSERT( options.count( "genesis-timestamp" ) == 0, + plugin_config_exception, + "--genesis-timestamp is only valid if also passed in with --genesis-json"); + } + + if( !chain_id ) { + if( my->genesis ) { + // Uninitialized state database and genesis state extracted from block log + chain_id = my->genesis->compute_chain_id(); + } else { + // Uninitialized state database and no genesis state provided + + EOS_ASSERT( !block_log_chain_id, plugin_config_exception, + "Genesis state is necessary to initialize fresh blockchain state but genesis state could not be " + "found in the blocks log. Please either load from snapshot or find a blocks log that starts " + "from genesis." + ); + + ilog( "Starting fresh blockchain state using default genesis state." ); + my->genesis.emplace(); + chain_id = my->genesis->compute_chain_id(); + } } } @@ -894,8 +1002,16 @@ void chain_plugin::plugin_initialize(const variables_map& options) { my->chain_config->db_hugepage_paths = options.at("database-hugepage-path").as>(); #endif - my->chain.emplace( *my->chain_config, std::move(pfs) ); - my->chain_id.emplace( my->chain->get_chain_id()); +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + if( options.count("eos-vm-oc-cache-size-mb") ) + my->chain_config->eosvmoc_config.cache_size = options.at( "eos-vm-oc-cache-size-mb" ).as() * 1024u * 1024u; + if( options.count("eos-vm-oc-compile-threads") ) + my->chain_config->eosvmoc_config.threads = options.at("eos-vm-oc-compile-threads").as(); + if( options["eos-vm-oc-enable"].as() ) + my->chain_config->eosvmoc_tierup = true; +#endif + + my->chain.emplace( *my->chain_config, std::move(pfs), *chain_id ); // set up method providers my->get_block_by_number_provider = app().get_method().register_provider( @@ -968,6 +1084,8 @@ void chain_plugin::plugin_startup() auto reader = std::make_shared(infile); my->chain->startup(shutdown, reader); infile.close(); + } else if( my->genesis ) { + my->chain->startup(shutdown, *my->genesis); } else { my->chain->startup(shutdown); } @@ -982,8 +1100,13 @@ void chain_plugin::plugin_startup() ilog("starting chain in read/write mode"); } - ilog("Blockchain started; head block is #${num}, genesis timestamp is ${ts}", - ("num", my->chain->head_block_num())("ts", (std::string)my->chain_config->genesis.initial_timestamp)); + if (my->genesis) { + ilog("Blockchain started; head block is #${num}, genesis timestamp is ${ts}", + ("num", my->chain->head_block_num())("ts", (std::string)my->genesis->initial_timestamp)); + } + else { + ilog("Blockchain started; head block is #${num}", ("num", my->chain->head_block_num())); + } my->chain_config.reset(); } FC_CAPTURE_AND_RETHROW() } @@ -1014,12 +1137,8 @@ void chain_plugin::accept_block(const signed_block_ptr& block ) { my->incoming_block_sync_method(block); } -void chain_plugin::accept_transaction(const chain::packed_transaction& trx, next_function next) { - my->incoming_transaction_async_method(std::make_shared(std::make_shared(trx)), false, std::forward(next)); -} - -void chain_plugin::accept_transaction(const chain::transaction_metadata_ptr& trx, next_function next) { - my->incoming_transaction_async_method(trx, false, std::forward(next)); +void chain_plugin::accept_transaction(const chain::packed_transaction_ptr& trx, next_function next) { + my->incoming_transaction_async_method(trx, false, std::move(next)); } bool chain_plugin::block_is_on_preferred_chain(const block_id_type& block_id) { @@ -1046,7 +1165,7 @@ bool chain_plugin::recover_reversible_blocks( const fc::path& db_dir, uint32_t c } catch( ... ) { throw; } - // Reversible block database is dirty. So back it up (unless already moved) and then create a new one. + // Reversible block database is dirty (or incompatible). So back it up (unless already moved) and then create a new one. auto reversible_dir = fc::canonical( db_dir ); if( reversible_dir.filename().generic_string() == "." ) { @@ -1077,7 +1196,16 @@ bool chain_plugin::recover_reversible_blocks( const fc::path& db_dir, uint32_t c ilog( "Reconstructing '${reversible_dir}' from backed up reversible directory", ("reversible_dir", reversible_dir) ); - chainbase::database old_reversible( backup_dir, database::read_only, 0, true ); + optional old_reversible; + + try { + old_reversible = chainbase::database( backup_dir, database::read_only, 0, true ); + } catch (const std::runtime_error &) { + // since we are allowing for dirty, it must be incompatible + ilog( "Did not recover any reversible blocks since reversible database incompatible"); + return true; + } + chainbase::database new_reversible( reversible_dir, database::read_write, cache_size ); std::fstream reversible_blocks; reversible_blocks.open( (reversible_dir.parent_path() / std::string("portable-reversible-blocks-").append( now ) ).generic_string().c_str(), @@ -1086,9 +1214,9 @@ bool chain_plugin::recover_reversible_blocks( const fc::path& db_dir, uint32_t c uint32_t num = 0; uint32_t start = 0; uint32_t end = 0; - old_reversible.add_index(); + old_reversible->add_index(); new_reversible.add_index(); - const auto& ubi = old_reversible.get_index(); + const auto& ubi = old_reversible->get_index(); auto itr = ubi.begin(); if( itr != ubi.end() ) { start = itr->blocknum; @@ -1231,8 +1359,7 @@ controller& chain_plugin::chain() { return *my->chain; } const controller& chain_plugin::chain() const { return *my->chain; } chain::chain_id_type chain_plugin::get_chain_id()const { - EOS_ASSERT( my->chain_id.valid(), chain_id_type_exception, "chain ID has not been initialized yet" ); - return *my->chain_id; + return my->chain->get_chain_id(); } fc::microseconds chain_plugin::get_abi_serializer_max_time() const { @@ -1254,6 +1381,7 @@ void chain_plugin::log_guard_exception(const chain::guard_exception&e ) { void chain_plugin::handle_guard_exception(const chain::guard_exception& e) { log_guard_exception(e); + elog("database chain::guard_exception, quiting..."); // log string searched for in: tests/nodeos_under_min_avail_ram.py // quit the app app().quit(); } @@ -1302,7 +1430,8 @@ read_only::get_info_results read_only::get_info(const read_only::get_info_params //__builtin_popcountll(db.get_dynamic_global_properties().recent_slots_filled) / 64.0, app().version_string(), db.fork_db_pending_head_block_num(), - db.fork_db_pending_head_block_id() + db.fork_db_pending_head_block_id(), + app().full_version_string() }; } @@ -1377,7 +1506,7 @@ read_only::get_activated_protocol_features( const read_only::get_activated_proto uint64_t read_only::get_table_index_name(const read_only::get_table_rows_params& p, bool& primary) { using boost::algorithm::starts_with; // see multi_index packing of index name - const uint64_t table = p.table; + const uint64_t table = p.table.to_uint64_t(); uint64_t index = table & 0xFFFFFFFFFFFFFFF0ULL; EOS_ASSERT( index == table, chain::contract_table_query_exception, "Unsupported table name: ${n}", ("n", p.table) ); @@ -1430,7 +1559,7 @@ uint64_t convert_to_type(const string& str, const string& desc) { auto trimmed_str = str; boost::trim(trimmed_str); name s(trimmed_str); - return s.value; + return s.to_uint64_t(); } catch( ... ) { } if (str.find(',') != string::npos) { // fix #6274 only match formats like 4,EOS @@ -1462,6 +1591,43 @@ double convert_to_type(const string& str, const string& desc) { return val; } +template +string convert_to_string(const Type& source, const string& key_type, const string& encode_type, const string& desc) { + try { + return fc::variant(source).as(); + } FC_RETHROW_EXCEPTIONS(warn, "Could not convert ${desc} from '${source}' to string.", ("desc", desc)("source",source) ) +} + +template<> +string convert_to_string(const chain::key256_t& source, const string& key_type, const string& encode_type, const string& desc) { + try { + if (key_type == chain_apis::sha256 || (key_type == chain_apis::i256 && encode_type == chain_apis::hex)) { + auto byte_array = fixed_bytes<32>(source).extract_as_byte_array(); + fc::sha256 val(reinterpret_cast(byte_array.data()), byte_array.size()); + return std::string(val); + } else if (key_type == chain_apis::i256) { + auto byte_array = fixed_bytes<32>(source).extract_as_byte_array(); + fc::sha256 val(reinterpret_cast(byte_array.data()), byte_array.size()); + return std::string("0x") + std::string(val); + } else if (key_type == chain_apis::ripemd160) { + auto byte_array = fixed_bytes<20>(source).extract_as_byte_array(); + fc::ripemd160 val; + memcpy(val._hash, byte_array.data(), byte_array.size() ); + return std::string(val); + } + EOS_ASSERT( false, chain_type_exception, "Incompatible key_type and encode_type for key256_t next_key" ); + + } FC_RETHROW_EXCEPTIONS(warn, "Could not convert ${desc} source '${source}' to string.", ("desc", desc)("source",source) ) +} + +template<> +string convert_to_string(const float128_t& source, const string& key_type, const string& encode_type, const string& desc) { + try { + float64_t f = f128_to_f64(source); + return fc::variant(f).as(); + } FC_RETHROW_EXCEPTIONS(warn, "Could not convert ${desc} from '${source}' to string.", ("desc", desc)("source",source) ) +} + abi_def get_abi( const controller& db, const name& account ) { const auto &d = db.db(); const account_object *code_accnt = d.find(account); @@ -1521,6 +1687,11 @@ read_only::get_table_rows_result read_only::get_table_rows( const read_only::get }); } else if (p.key_type == chain_apis::float128) { + if ( p.encode_type == chain_apis::hex) { + return get_table_rows_by_seckey(p, abi, [](uint128_t v)->float128_t{ + return *reinterpret_cast(&v); + }); + } return get_table_rows_by_seckey(p, abi, [](double v)->float128_t{ float64_t f = *(float64_t *)&v; float128_t f128; @@ -1546,18 +1717,18 @@ read_only::get_table_by_scope_result read_only::get_table_by_scope( const read_o const auto& d = db.db(); const auto& idx = d.get_index(); - auto lower_bound_lookup_tuple = std::make_tuple( p.code.value, std::numeric_limits::lowest(), p.table.value ); - auto upper_bound_lookup_tuple = std::make_tuple( p.code.value, std::numeric_limits::max(), - (p.table.empty() ? std::numeric_limits::max() : p.table.value) ); + auto lower_bound_lookup_tuple = std::make_tuple( p.code, name(std::numeric_limits::lowest()), p.table ); + auto upper_bound_lookup_tuple = std::make_tuple( p.code, name(std::numeric_limits::max()), + (p.table.empty() ? name(std::numeric_limits::max()) : p.table) ); if( p.lower_bound.size() ) { uint64_t scope = convert_to_type(p.lower_bound, "lower_bound scope"); - std::get<1>(lower_bound_lookup_tuple) = scope; + std::get<1>(lower_bound_lookup_tuple) = name(scope); } if( p.upper_bound.size() ) { uint64_t scope = convert_to_type(p.upper_bound, "upper_bound scope"); - std::get<1>(upper_bound_lookup_tuple) = scope; + std::get<1>(upper_bound_lookup_tuple) = name(scope); } if( upper_bound_lookup_tuple < lower_bound_lookup_tuple ) @@ -1574,7 +1745,7 @@ read_only::get_table_by_scope_result read_only::get_table_by_scope( const read_o ++count; } if( itr != end_itr ) { - result.more = string(itr->scope); + result.more = itr->scope.to_string(); } }; @@ -1592,7 +1763,7 @@ read_only::get_table_by_scope_result read_only::get_table_by_scope( const read_o vector read_only::get_currency_balance( const read_only::get_currency_balance_params& p )const { const abi_def abi = eosio::chain_apis::get_abi( db, p.code ); - (void)get_table_type( abi, "accounts" ); + (void)get_table_type( abi, name("accounts") ); vector results; walk_key_value_table(p.code, p.account, N(accounts), [&](const key_value_object& obj){ @@ -1619,11 +1790,11 @@ fc::variant read_only::get_currency_stats( const read_only::get_currency_stats_p fc::mutable_variant_object results; const abi_def abi = eosio::chain_apis::get_abi( db, p.code ); - (void)get_table_type( abi, "stat" ); + (void)get_table_type( abi, name("stat") ); uint64_t scope = ( eosio::chain::string_to_symbol( 0, boost::algorithm::to_upper_copy(p.symbol).c_str() ) >> 8 ); - walk_key_value_table(p.code, scope, N(stat), [&](const key_value_object& obj){ + walk_key_value_table(p.code, name(scope), N(stat), [&](const key_value_object& obj){ EOS_ASSERT( obj.value.size() >= sizeof(read_only::get_currency_stats_result), chain::asset_type_exception, "Invalid data on table"); fc::datastream ds(obj.value.data(), obj.value.size()); @@ -1648,7 +1819,7 @@ fc::variant get_global_row( const database& db, const abi_def& abi, const abi_se EOS_ASSERT(table_id, chain::contract_table_query_exception, "Missing table global"); const auto& kv_index = db.get_index(); - const auto it = kv_index.find(boost::make_tuple(table_id->id, N(global))); + const auto it = kv_index.find(boost::make_tuple(table_id->id, N(global).to_uint64_t())); EOS_ASSERT(it != kv_index.end(), chain::contract_table_query_exception, "Missing row in table global"); vector data; @@ -1669,7 +1840,7 @@ read_only::get_producers_result read_only::get_producers( const read_only::get_p const auto* const table_id = d.find( boost::make_tuple(config::system_account_name, config::system_account_name, N(producers))); const auto* const secondary_table_id = d.find( - boost::make_tuple(config::system_account_name, config::system_account_name, N(producers) | secondary_index_num)); + boost::make_tuple(config::system_account_name, config::system_account_name, name(N(producers).to_uint64_t() | secondary_index_num))); EOS_ASSERT(table_id && secondary_table_id, chain::contract_table_query_exception, "Missing producers table"); const auto& kv_index = d.get_index(); @@ -1682,13 +1853,13 @@ read_only::get_producers_result read_only::get_producers( const read_only::get_p vector data; auto it = [&]{ - if(lower.value == 0) + if(lower.to_uint64_t() == 0) return secondary_index_by_secondary.lower_bound( boost::make_tuple(secondary_table_id->id, to_softfloat64(std::numeric_limits::lowest()), 0)); else return secondary_index.project( secondary_index_by_primary.lower_bound( - boost::make_tuple(secondary_table_id->id, lower.value))); + boost::make_tuple(secondary_table_id->id, lower.to_uint64_t()))); }(); for( ; it != secondary_index_by_secondary.end() && it->t_id == secondary_table_id->id; ++it ) { @@ -1709,12 +1880,20 @@ read_only::get_producers_result read_only::get_producers( const read_only::get_p read_only::get_producers_result result; for (auto p : db.active_producers().producers) { - fc::variant row = fc::mutable_variant_object() + auto row = fc::mutable_variant_object() ("owner", p.producer_name) - ("producer_key", p.block_signing_key) + ("producer_authority", p.authority) ("url", "") ("total_votes", 0.0f); + // detect a legacy key and maintain API compatibility for those entries + if (p.authority.contains()) { + const auto& auth = p.authority.get(); + if (auth.keys.size() == 1 && auth.keys.back().weight == auth.threshold) { + row("producer_key", auth.keys.back().key); + } + } + result.rows.push_back(row); } @@ -1900,13 +2079,12 @@ void read_write::push_transaction(const read_write::push_transaction_params& par try { auto pretty_input = std::make_shared(); auto resolver = make_resolver(this, abi_serializer_max_time); - transaction_metadata_ptr ptrx; try { abi_serializer::from_variant(params, *pretty_input, resolver, abi_serializer_max_time); - ptrx = std::make_shared( pretty_input ); } EOS_RETHROW_EXCEPTIONS(chain::packed_transaction_type_exception, "Invalid packed transaction") - app().get_method()(ptrx, true, [this, next](const fc::static_variant& result) -> void{ + app().get_method()(pretty_input, true, + [this, next](const fc::static_variant& result) -> void { if (result.contains()) { next(result.get()); } else { @@ -2020,13 +2198,12 @@ void read_write::send_transaction(const read_write::send_transaction_params& par try { auto pretty_input = std::make_shared(); auto resolver = make_resolver(this, abi_serializer_max_time); - transaction_metadata_ptr ptrx; try { abi_serializer::from_variant(params, *pretty_input, resolver, abi_serializer_max_time); - ptrx = std::make_shared( pretty_input ); } EOS_RETHROW_EXCEPTIONS(chain::packed_transaction_type_exception, "Invalid packed transaction") - app().get_method()(ptrx, true, [this, next](const fc::static_variant& result) -> void{ + app().get_method()(pretty_input, true, + [this, next](const fc::static_variant& result) -> void { if (result.contains()) { next(result.get()); } else { @@ -2207,7 +2384,7 @@ read_only::get_account_results read_only::get_account( const get_account_params& t_id = d.find(boost::make_tuple( config::system_account_name, params.account_name, N(userres) )); if (t_id != nullptr) { const auto &idx = d.get_index(); - auto it = idx.find(boost::make_tuple( t_id->id, params.account_name )); + auto it = idx.find(boost::make_tuple( t_id->id, params.account_name.to_uint64_t() )); if ( it != idx.end() ) { vector data; copy_inline_row(*it, data); @@ -2218,7 +2395,7 @@ read_only::get_account_results read_only::get_account( const get_account_params& t_id = d.find(boost::make_tuple( config::system_account_name, params.account_name, N(delband) )); if (t_id != nullptr) { const auto &idx = d.get_index(); - auto it = idx.find(boost::make_tuple( t_id->id, params.account_name )); + auto it = idx.find(boost::make_tuple( t_id->id, params.account_name.to_uint64_t() )); if ( it != idx.end() ) { vector data; copy_inline_row(*it, data); @@ -2229,7 +2406,7 @@ read_only::get_account_results read_only::get_account( const get_account_params& t_id = d.find(boost::make_tuple( config::system_account_name, params.account_name, N(refunds) )); if (t_id != nullptr) { const auto &idx = d.get_index(); - auto it = idx.find(boost::make_tuple( t_id->id, params.account_name )); + auto it = idx.find(boost::make_tuple( t_id->id, params.account_name.to_uint64_t() )); if ( it != idx.end() ) { vector data; copy_inline_row(*it, data); @@ -2240,13 +2417,24 @@ read_only::get_account_results read_only::get_account( const get_account_params& t_id = d.find(boost::make_tuple( config::system_account_name, config::system_account_name, N(voters) )); if (t_id != nullptr) { const auto &idx = d.get_index(); - auto it = idx.find(boost::make_tuple( t_id->id, params.account_name )); + auto it = idx.find(boost::make_tuple( t_id->id, params.account_name.to_uint64_t() )); if ( it != idx.end() ) { vector data; copy_inline_row(*it, data); result.voter_info = abis.binary_to_variant( "voter_info", data, abi_serializer_max_time, shorten_abi_errors ); } } + + t_id = d.find(boost::make_tuple( config::system_account_name, config::system_account_name, N(rexbal) )); + if (t_id != nullptr) { + const auto &idx = d.get_index(); + auto it = idx.find(boost::make_tuple( t_id->id, params.account_name.to_uint64_t() )); + if( it != idx.end() ) { + vector data; + copy_inline_row(*it, data); + result.rex_info = abis.binary_to_variant( "rex_balance", data, abi_serializer_max_time, shorten_abi_errors ); + } + } } return result; } diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 544456afd6c..e00deb709e2 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include @@ -15,6 +11,7 @@ #include #include #include +#include #include #include @@ -69,6 +66,16 @@ uint64_t convert_to_type(const string& str, const string& desc); template<> double convert_to_type(const string& str, const string& desc); +template +string convert_to_string(const Type& source, const string& key_type, const string& encode_type, const string& desc); + +template<> +string convert_to_string(const chain::key256_t& source, const string& key_type, const string& encode_type, const string& desc); + +template<> +string convert_to_string(const float128_t& source, const string& key_type, const string& encode_type, const string& desc); + + class read_only { const controller& db; const fc::microseconds abi_serializer_max_time; @@ -106,6 +113,7 @@ class read_only { optional server_version_string; optional fork_db_head_block_num; optional fork_db_head_block_id; + optional server_full_version_string; }; get_info_results get_info(const get_info_params&) const; @@ -155,6 +163,7 @@ class read_only { fc::variant self_delegated_bandwidth; fc::variant refund_request; fc::variant voter_info; + fc::variant rex_info; }; struct get_account_params { @@ -296,13 +305,14 @@ class read_only { struct get_table_rows_result { vector rows; ///< one row per item, either encoded as hex String or JSON object bool more = false; ///< true if last element in data is not the end and sizeof data() < limit + string next_key; ///< fill lower_bound with this value to fetch more rows }; get_table_rows_result get_table_rows( const get_table_rows_params& params )const; struct get_table_by_scope_params { name code; // mandatory - name table = 0; // optional, act as filter + name table; // optional, act as filter string lower_bound; // lower bound of scope, optional string upper_bound; // upper bound of scope, optional uint32_t limit = 10; @@ -413,14 +423,14 @@ class read_only { read_only::get_table_rows_result result; const auto& d = db.db(); - uint64_t scope = convert_to_type(p.scope, "scope"); + name scope{ convert_to_type(p.scope, "scope") }; abi_serializer abis; abis.set_abi(abi, abi_serializer_max_time); bool primary = false; const uint64_t table_with_index = get_table_index_name(p, primary); const auto* t_id = d.find(boost::make_tuple(p.code, scope, p.table)); - const auto* index_t_id = d.find(boost::make_tuple(p.code, scope, table_with_index)); + const auto* index_t_id = d.find(boost::make_tuple(p.code, scope, name(table_with_index))); if( t_id != nullptr && index_t_id != nullptr ) { using secondary_key_type = std::result_of_t; static_assert( std::is_same::value, "Return type of conv does not match type of secondary key for IndexType" ); @@ -484,6 +494,7 @@ class read_only { } if( itr != end_itr ) { result.more = true; + result.next_key = convert_to_string(itr->secondary_key, p.key_type, p.encode_type, "next_key - next lower bound"); } }; @@ -507,7 +518,7 @@ class read_only { abi_serializer abis; abis.set_abi(abi, abi_serializer_max_time); - const auto* t_id = d.find(boost::make_tuple(p.code, scope, p.table)); + const auto* t_id = d.find(boost::make_tuple(p.code, name(scope), p.table)); if( t_id != nullptr ) { const auto& idx = d.get_index(); auto lower_bound_lookup_tuple = std::make_tuple( t_id->id, std::numeric_limits::lowest() ); @@ -516,7 +527,7 @@ class read_only { if( p.lower_bound.size() ) { if( p.key_type == "name" ) { name s(p.lower_bound); - std::get<1>(lower_bound_lookup_tuple) = s.value; + std::get<1>(lower_bound_lookup_tuple) = s.to_uint64_t(); } else { auto lv = convert_to_type( p.lower_bound, "lower_bound" ); std::get<1>(lower_bound_lookup_tuple) = lv; @@ -526,7 +537,7 @@ class read_only { if( p.upper_bound.size() ) { if( p.key_type == "name" ) { name s(p.upper_bound); - std::get<1>(upper_bound_lookup_tuple) = s.value; + std::get<1>(upper_bound_lookup_tuple) = s.to_uint64_t(); } else { auto uv = convert_to_type( p.upper_bound, "upper_bound" ); std::get<1>(upper_bound_lookup_tuple) = uv; @@ -558,6 +569,7 @@ class read_only { } if( itr != end_itr ) { result.more = true; + result.next_key = convert_to_string(itr->primary_key, p.key_type, p.encode_type, "next_key - next lower bound"); } }; @@ -628,13 +640,13 @@ class read_write { using index_type = chain::index256_index; static auto function() { return [](const input_type& v) { - chain::key256_t k; -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wstrict-aliasing" - k[0] = ((uint128_t *)&v._hash)[0]; //0-127 - k[1] = ((uint128_t *)&v._hash)[1]; //127-256 -#pragma GCC diagnostic pop - return k; + // The input is in big endian, i.e. f58262c8005bb64b8f99ec6083faf050c502d099d9929ae37ffed2fe1bb954fb + // fixed_bytes will convert the input to array of 2 uint128_t in little endian, i.e. 50f0fa8360ec998f4bb65b00c86282f5 fb54b91bfed2fe7fe39a92d999d002c5 + // which is the format used by secondary index + uint8_t buffer[32]; + memcpy(buffer, v.data(), 32); + fixed_bytes<32> fb(buffer); + return chain::key256_t(fb.get_array()); }; } }; @@ -646,10 +658,13 @@ class read_write { using index_type = chain::index256_index; static auto function() { return [](const input_type& v) { - chain::key256_t k; - memset(k.data(), 0, sizeof(k)); - memcpy(k.data(), v._hash, sizeof(v._hash)); - return k; + // The input is in big endian, i.e. 83a83a3876c64c33f66f33c54f1869edef5b5d4a000000000000000000000000 + // fixed_bytes will convert the input to array of 2 uint128_t in little endian, i.e. ed69184fc5336ff6334cc676383aa883 0000000000000000000000004a5d5bef + // which is the format used by secondary index + uint8_t buffer[20]; + memcpy(buffer, v.data(), 20); + fixed_bytes<20> fb(buffer); + return chain::key256_t(fb.get_array()); }; } }; @@ -660,9 +675,14 @@ class read_write { using index_type = chain::index256_index; static auto function() { return [](const input_type v) { + // The input is in little endian of uint256_t, i.e. fb54b91bfed2fe7fe39a92d999d002c550f0fa8360ec998f4bb65b00c86282f5 + // the following will convert the input to array of 2 uint128_t in little endian, i.e. 50f0fa8360ec998f4bb65b00c86282f5 fb54b91bfed2fe7fe39a92d999d002c5 + // which is the format used by secondary index chain::key256_t k; - k[0] = ((uint128_t *)&v)[0]; //0-127 - k[1] = ((uint128_t *)&v)[1]; //127-256 + uint8_t buffer[32]; + boost::multiprecision::export_bits(v, buffer, 8, false); + memcpy(&k[0], buffer + 16, 16); + memcpy(&k[1], buffer, 16); return k; }; } @@ -687,8 +707,7 @@ class chain_plugin : public plugin { chain_apis::read_write get_read_write_api() { return chain_apis::read_write(chain(), get_abi_serializer_max_time()); } void accept_block( const chain::signed_block_ptr& block ); - void accept_transaction(const chain::packed_transaction& trx, chain::plugin_interface::next_function next); - void accept_transaction(const chain::transaction_metadata_ptr& trx, chain::plugin_interface::next_function next); + void accept_transaction(const chain::packed_transaction_ptr& trx, chain::plugin_interface::next_function next); bool block_is_on_preferred_chain(const chain::block_id_type& block_id); @@ -716,6 +735,8 @@ class chain_plugin : public plugin { fc::microseconds get_abi_serializer_max_time() const; static void handle_guard_exception(const chain::guard_exception& e); + void do_hard_replay(const variables_map& options); + static void handle_db_exhaustion(); static void handle_bad_alloc(); private: @@ -729,7 +750,10 @@ class chain_plugin : public plugin { FC_REFLECT( eosio::chain_apis::permission, (perm_name)(parent)(required_auth) ) FC_REFLECT(eosio::chain_apis::empty, ) FC_REFLECT(eosio::chain_apis::read_only::get_info_results, -(server_version)(chain_id)(head_block_num)(last_irreversible_block_num)(last_irreversible_block_id)(head_block_id)(head_block_time)(head_block_producer)(virtual_block_cpu_limit)(virtual_block_net_limit)(block_cpu_limit)(block_net_limit)(server_version_string)(fork_db_head_block_num)(fork_db_head_block_id) ) + (server_version)(chain_id)(head_block_num)(last_irreversible_block_num)(last_irreversible_block_id) + (head_block_id)(head_block_time)(head_block_producer) + (virtual_block_cpu_limit)(virtual_block_net_limit)(block_cpu_limit)(block_net_limit) + (server_version_string)(fork_db_head_block_num)(fork_db_head_block_id)(server_full_version_string) ) FC_REFLECT(eosio::chain_apis::read_only::get_activated_protocol_features_params, (lower_bound)(upper_bound)(limit)(search_by_block_num)(reverse) ) FC_REFLECT(eosio::chain_apis::read_only::get_activated_protocol_features_results, (activated_protocol_features)(more) ) FC_REFLECT(eosio::chain_apis::read_only::get_block_params, (block_num_or_id)) @@ -738,7 +762,7 @@ FC_REFLECT(eosio::chain_apis::read_only::get_block_header_state_params, (block_n FC_REFLECT( eosio::chain_apis::read_write::push_transaction_results, (transaction_id)(processed) ) FC_REFLECT( eosio::chain_apis::read_only::get_table_rows_params, (json)(code)(scope)(table)(table_key)(lower_bound)(upper_bound)(limit)(key_type)(index_position)(encode_type)(reverse)(show_payer) ) -FC_REFLECT( eosio::chain_apis::read_only::get_table_rows_result, (rows)(more) ); +FC_REFLECT( eosio::chain_apis::read_only::get_table_rows_result, (rows)(more)(next_key) ); FC_REFLECT( eosio::chain_apis::read_only::get_table_by_scope_params, (code)(table)(lower_bound)(upper_bound)(limit)(reverse) ) FC_REFLECT( eosio::chain_apis::read_only::get_table_by_scope_result_row, (code)(scope)(table)(payer)(count)); @@ -760,7 +784,7 @@ FC_REFLECT( eosio::chain_apis::read_only::get_scheduled_transactions_result, (tr FC_REFLECT( eosio::chain_apis::read_only::get_account_results, (account_name)(head_block_num)(head_block_time)(privileged)(last_code_update)(created) (core_liquid_balance)(ram_quota)(net_weight)(cpu_weight)(net_limit)(cpu_limit)(ram_usage)(permissions) - (total_resources)(self_delegated_bandwidth)(refund_request)(voter_info) ) + (total_resources)(self_delegated_bandwidth)(refund_request)(voter_info)(rex_info) ) // @swap code_hash FC_REFLECT( eosio::chain_apis::read_only::get_code_results, (account_name)(code_hash)(wast)(wasm)(abi) ) FC_REFLECT( eosio::chain_apis::read_only::get_code_hash_results, (account_name)(code_hash) ) diff --git a/plugins/db_size_api_plugin/db_size_api_plugin.cpp b/plugins/db_size_api_plugin/db_size_api_plugin.cpp index 8c6df9566fe..1f5cff033b8 100644 --- a/plugins/db_size_api_plugin/db_size_api_plugin.cpp +++ b/plugins/db_size_api_plugin/db_size_api_plugin.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include diff --git a/plugins/db_size_api_plugin/include/eosio/db_size_api_plugin/db_size_api_plugin.hpp b/plugins/db_size_api_plugin/include/eosio/db_size_api_plugin/db_size_api_plugin.hpp index 54a2d8ef63b..76495b23381 100644 --- a/plugins/db_size_api_plugin/include/eosio/db_size_api_plugin/db_size_api_plugin.hpp +++ b/plugins/db_size_api_plugin/include/eosio/db_size_api_plugin/db_size_api_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/plugins/eosio-make_new_plugin.sh b/plugins/eosio-make_new_plugin.sh index 241edbf73c9..77937ee47e0 100755 --- a/plugins/eosio-make_new_plugin.sh +++ b/plugins/eosio-make_new_plugin.sh @@ -9,13 +9,13 @@ fi pluginName=$1 echo Copying template... -cp -r template_plugin $pluginName +cp -R template_plugin $pluginName echo Renaming files/directories... mv $pluginName/include/eosio/template_plugin $pluginName/include/eosio/$pluginName for file in `find $pluginName -type f -name '*template_plugin*'`; do mv $file `sed s/template_plugin/$pluginName/g <<< $file`; done; echo Renaming in files... -find $pluginName -type f -exec sed -i "s/template_plugin/$pluginName/g" {} \; +for file in `find $pluginName -type f`; do sed -i.bak -e "s/template_plugin/$pluginName/g" "$file" && rm "$file.bak"; done echo "Done! $pluginName is ready. Don't forget to add it to CMakeLists.txt!" diff --git a/plugins/faucet_testnet_plugin/faucet_testnet_plugin.cpp b/plugins/faucet_testnet_plugin/faucet_testnet_plugin.cpp index 32db7146f6d..c4ddba45425 100644 --- a/plugins/faucet_testnet_plugin/faucet_testnet_plugin.cpp +++ b/plugins/faucet_testnet_plugin/faucet_testnet_plugin.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include diff --git a/plugins/faucet_testnet_plugin/include/eosio/faucet_testnet_plugin/faucet_testnet_plugin.hpp b/plugins/faucet_testnet_plugin/include/eosio/faucet_testnet_plugin/faucet_testnet_plugin.hpp index f59b1ef9bb3..237347df985 100644 --- a/plugins/faucet_testnet_plugin/include/eosio/faucet_testnet_plugin/faucet_testnet_plugin.hpp +++ b/plugins/faucet_testnet_plugin/include/eosio/faucet_testnet_plugin/faucet_testnet_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include diff --git a/plugins/history_api_plugin/history_api_plugin.cpp b/plugins/history_api_plugin/history_api_plugin.cpp index d76dd7fd44b..b5155c68f7e 100644 --- a/plugins/history_api_plugin/history_api_plugin.cpp +++ b/plugins/history_api_plugin/history_api_plugin.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include diff --git a/plugins/history_api_plugin/include/eosio/history_api_plugin/history_api_plugin.hpp b/plugins/history_api_plugin/include/eosio/history_api_plugin/history_api_plugin.hpp index 5e6dd936e24..557ac1b48b1 100644 --- a/plugins/history_api_plugin/include/eosio/history_api_plugin/history_api_plugin.hpp +++ b/plugins/history_api_plugin/include/eosio/history_api_plugin/history_api_plugin.hpp @@ -1,8 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ - #pragma once #include #include diff --git a/plugins/history_plugin/history_plugin.cpp b/plugins/history_plugin/history_plugin.cpp index 12cd3a5e731..4de5d99093e 100644 --- a/plugins/history_plugin/history_plugin.cpp +++ b/plugins/history_plugin/history_plugin.cpp @@ -148,14 +148,14 @@ namespace eosio { if (bypass_filter) { pass_on = true; } - if (filter_on.find({ act.receiver, 0, 0 }) != filter_on.end()) { + if (filter_on.find({ act.receiver, {}, {} }) != filter_on.end()) { pass_on = true; } - if (filter_on.find({ act.receiver, act.act.name, 0 }) != filter_on.end()) { + if (filter_on.find({ act.receiver, act.act.name, {} }) != filter_on.end()) { pass_on = true; } for (const auto& a : act.act.authorization) { - if (filter_on.find({ act.receiver, 0, a.actor }) != filter_on.end()) { + if (filter_on.find({ act.receiver, {}, a.actor }) != filter_on.end()) { pass_on = true; } if (filter_on.find({ act.receiver, act.act.name, a.actor }) != filter_on.end()) { @@ -165,14 +165,14 @@ namespace eosio { if (!pass_on) { return false; } - if (filter_out.find({ act.receiver, 0, 0 }) != filter_out.end()) { + if (filter_out.find({ act.receiver, {}, {} }) != filter_out.end()) { return false; } - if (filter_out.find({ act.receiver, act.act.name, 0 }) != filter_out.end()) { + if (filter_out.find({ act.receiver, act.act.name, {} }) != filter_out.end()) { return false; } for (const auto& a : act.act.authorization) { - if (filter_out.find({ act.receiver, 0, a.actor }) != filter_out.end()) { + if (filter_out.find({ act.receiver, {}, a.actor }) != filter_out.end()) { return false; } if (filter_out.find({ act.receiver, act.act.name, a.actor }) != filter_out.end()) { @@ -189,13 +189,13 @@ namespace eosio { result.insert( act.receiver ); for( const auto& a : act.act.authorization ) { if( bypass_filter || - filter_on.find({ act.receiver, 0, 0}) != filter_on.end() || - filter_on.find({ act.receiver, 0, a.actor}) != filter_on.end() || - filter_on.find({ act.receiver, act.act.name, 0}) != filter_on.end() || + filter_on.find({ act.receiver, {}, {}}) != filter_on.end() || + filter_on.find({ act.receiver, {}, a.actor}) != filter_on.end() || + filter_on.find({ act.receiver, act.act.name, {}}) != filter_on.end() || filter_on.find({ act.receiver, act.act.name, a.actor }) != filter_on.end() ) { - if ((filter_out.find({ act.receiver, 0, 0 }) == filter_out.end()) && - (filter_out.find({ act.receiver, 0, a.actor }) == filter_out.end()) && - (filter_out.find({ act.receiver, act.act.name, 0 }) == filter_out.end()) && + if ((filter_out.find({ act.receiver, {}, {} }) == filter_out.end()) && + (filter_out.find({ act.receiver, {}, a.actor }) == filter_out.end()) && + (filter_out.find({ act.receiver, act.act.name, {} }) == filter_out.end()) && (filter_out.find({ act.receiver, act.act.name, a.actor }) == filter_out.end())) { result.insert( a.actor ); } @@ -209,7 +209,7 @@ namespace eosio { chainbase::database& db = const_cast( chain.db() ); // Override read-only access to state DB (highly unrecommended practice!) const auto& idx = db.get_index(); - auto itr = idx.lower_bound( boost::make_tuple( name(n.value+1), 0 ) ); + auto itr = idx.lower_bound( boost::make_tuple( name(n.to_uint64_t()+1), 0 ) ); uint64_t asn = 0; if( itr != idx.begin() ) --itr; @@ -320,8 +320,8 @@ namespace eosio { std::vector v; boost::split( v, s, boost::is_any_of( ":" )); EOS_ASSERT( v.size() == 3, fc::invalid_arg_exception, "Invalid value ${s} for --filter-on", ("s", s)); - filter_entry fe{v[0], v[1], v[2]}; - EOS_ASSERT( fe.receiver.value, fc::invalid_arg_exception, + filter_entry fe{eosio::chain::name(v[0]), eosio::chain::name(v[1]), eosio::chain::name(v[2])}; + EOS_ASSERT( fe.receiver.to_uint64_t(), fc::invalid_arg_exception, "Invalid value ${s} for --filter-on", ("s", s)); my->filter_on.insert( fe ); } @@ -332,8 +332,8 @@ namespace eosio { std::vector v; boost::split( v, s, boost::is_any_of( ":" )); EOS_ASSERT( v.size() == 3, fc::invalid_arg_exception, "Invalid value ${s} for --filter-out", ("s", s)); - filter_entry fe{v[0], v[1], v[2]}; - EOS_ASSERT( fe.receiver.value, fc::invalid_arg_exception, + filter_entry fe{eosio::chain::name(v[0]), eosio::chain::name(v[1]), eosio::chain::name(v[2])}; + EOS_ASSERT( fe.receiver.to_uint64_t(), fc::invalid_arg_exception, "Invalid value ${s} for --filter-out", ("s", s)); my->filter_out.insert( fe ); } @@ -383,7 +383,7 @@ namespace eosio { auto n = params.account_name; idump((pos)); if( pos == -1 ) { - auto itr = idx.lower_bound( boost::make_tuple( name(n.value+1), 0 ) ); + auto itr = idx.lower_bound( boost::make_tuple( name(n.to_uint64_t()+1), 0 ) ); if( itr == idx.begin() ) { if( itr->account == n ) pos = itr->account_sequence_num+1; diff --git a/plugins/history_plugin/include/eosio/history_plugin/account_control_history_object.hpp b/plugins/history_plugin/include/eosio/history_plugin/account_control_history_object.hpp index 5005c279f5a..29529876440 100644 --- a/plugins/history_plugin/include/eosio/history_plugin/account_control_history_object.hpp +++ b/plugins/history_plugin/include/eosio/history_plugin/account_control_history_object.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp b/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp index 4cfb2588e46..0bbeda361be 100644 --- a/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp +++ b/plugins/history_plugin/include/eosio/history_plugin/history_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/plugins/history_plugin/include/eosio/history_plugin/public_key_history_object.hpp b/plugins/history_plugin/include/eosio/history_plugin/public_key_history_object.hpp index 2e8e37f0049..052f74c5a54 100644 --- a/plugins/history_plugin/include/eosio/history_plugin/public_key_history_object.hpp +++ b/plugins/history_plugin/include/eosio/history_plugin/public_key_history_object.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/plugins/http_client_plugin/http_client_plugin.cpp b/plugins/http_client_plugin/http_client_plugin.cpp index cb9d7c8e580..60811ff44ba 100644 --- a/plugins/http_client_plugin/http_client_plugin.cpp +++ b/plugins/http_client_plugin/http_client_plugin.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include diff --git a/plugins/http_client_plugin/include/eosio/http_client_plugin/http_client_plugin.hpp b/plugins/http_client_plugin/include/eosio/http_client_plugin/http_client_plugin.hpp index 95803bed1f8..0b6303dbc22 100644 --- a/plugins/http_client_plugin/include/eosio/http_client_plugin/http_client_plugin.hpp +++ b/plugins/http_client_plugin/include/eosio/http_client_plugin/http_client_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index ff7036f7d86..7a720fa7564 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -1,9 +1,7 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include +#ifdef BOOST_ASIO_HAS_LOCAL_SOCKETS #include +#endif #include #include @@ -28,6 +26,9 @@ #include #include +const fc::string logger_name("http_plugin"); +fc::logger logger; + namespace eosio { static appbase::abstract_plugin& _http_plugin = app().register_plugin(); @@ -93,6 +94,7 @@ namespace eosio { static const long timeout_open_handshake = 0; }; +#ifdef BOOST_ASIO_HAS_LOCAL_SOCKETS struct asio_local_with_stub_log : public websocketpp::config::asio { typedef asio_local_with_stub_log type; typedef asio base; @@ -124,10 +126,13 @@ namespace eosio { static const long timeout_open_handshake = 0; }; +#endif } using websocket_server_type = websocketpp::server>; +#ifdef BOOST_ASIO_HAS_LOCAL_SOCKETS using websocket_local_server_type = websocketpp::server; +#endif using websocket_server_tls_type = websocketpp::server>; using ssl_context_ptr = websocketpp::lib::shared_ptr; @@ -158,8 +163,10 @@ namespace eosio { websocket_server_tls_type https_server; +#ifdef BOOST_ASIO_HAS_LOCAL_SOCKETS optional unix_endpoint; websocket_local_server_type unix_server; +#endif bool validate_host; set valid_hosts; @@ -210,9 +217,9 @@ namespace eosio { "!DHE:!RSA:!AES128:!RC4:!DES:!3DES:!DSS:!SRP:!PSK:!EXP:!MD5:!LOW:!aNULL:!eNULL") != 1) EOS_THROW(chain::http_exception, "Failed to set HTTPS cipher list"); } catch (const fc::exception& e) { - elog("https server initialization error: ${w}", ("w", e.to_detail_string())); + fc_elog( logger, "https server initialization error: ${w}", ("w", e.to_detail_string()) ); } catch(std::exception& e) { - elog("https server initialization error: ${w}", ("w", e.what())); + fc_elog( logger, "https server initialization error: ${w}", ("w", e.what()) ); } return ctx; @@ -228,13 +235,13 @@ namespace eosio { throw; } catch (const fc::exception& e) { err += e.to_detail_string(); - elog( "${e}", ("e", err)); + fc_elog( logger, "${e}", ("e", err)); error_results results{websocketpp::http::status_code::internal_server_error, "Internal Service Error", error_results::error_info( e, verbose_http_errors )}; con->set_body( fc::json::to_string( results, deadline )); } catch (const std::exception& e) { err += e.what(); - elog( "${e}", ("e", err)); + fc_elog( logger, "${e}", ("e", err)); error_results results{websocketpp::http::status_code::internal_server_error, "Internal Service Error", error_results::error_info( fc::exception( FC_LOG_MESSAGE( error, e.what())), @@ -251,10 +258,10 @@ namespace eosio { } } catch (fc::timeout_exception& e) { con->set_body( R"xxx({"message": "Internal Server Error"})xxx" ); - elog( "Timeout exception ${te} attempting to handle exception: ${e}", ("te", e.to_detail_string())("e", err) ); + fc_elog( logger, "Timeout exception ${te} attempting to handle exception: ${e}", ("te", e.to_detail_string())("e", err) ); } catch (...) { con->set_body( R"xxx({"message": "Internal Server Error"})xxx" ); - std::cerr << "Exception attempting to handle exception: " << err << std::endl; + fc_elog( logger, "Exception attempting to handle exception: ${e}", ("e", err) ); } } @@ -275,7 +282,7 @@ namespace eosio { template bool verify_max_bytes_in_flight( const T& con ) { if( bytes_in_flight > max_bytes_in_flight ) { - dlog( "503 - too many bytes in flight: ${bytes}", ("bytes", bytes_in_flight.load()) ); + fc_dlog( logger, "503 - too many bytes in flight: ${bytes}", ("bytes", bytes_in_flight.load()) ); error_results results{websocketpp::http::status_code::too_many_requests, "Busy", error_results::error_info()}; con->set_body( fc::json::to_string( results, fc::time_point::maximum() )); con->set_status( websocketpp::http::status_code::too_many_requests ); @@ -368,7 +375,7 @@ namespace eosio { } ); } else { - dlog( "404 - not found: ${ep}", ("ep", resource)); + fc_dlog( logger, "404 - not found: ${ep}", ("ep", resource) ); error_results results{websocketpp::http::status_code::not_found, "Not Found", error_results::error_info(fc::exception( FC_LOG_MESSAGE( error, "Unknown Endpoint" )), verbose_http_errors )}; con->set_body( fc::json::to_string( results, fc::time_point::now() + max_response_time )); @@ -391,11 +398,11 @@ namespace eosio { handle_http_request>(ws.get_con_from_hdl(hdl)); }); } catch ( const fc::exception& e ){ - elog( "http: ${e}", ("e",e.to_detail_string())); + fc_elog( logger, "http: ${e}", ("e", e.to_detail_string()) ); } catch ( const std::exception& e ){ - elog( "http: ${e}", ("e",e.what())); + fc_elog( logger, "http: ${e}", ("e", e.what()) ); } catch (...) { - elog("error thrown from http io service"); + fc_elog( logger, "error thrown from http io service" ); } } @@ -406,10 +413,12 @@ namespace eosio { } }; +#ifdef BOOST_ASIO_HAS_LOCAL_SOCKETS template<> bool http_plugin_impl::allow_host(const detail::asio_local_with_stub_log::request_type& req, websocketpp::server::connection_ptr con) { return true; } +#endif http_plugin::http_plugin():my(new http_plugin_impl()){ app().register_config_type(); @@ -417,10 +426,16 @@ namespace eosio { http_plugin::~http_plugin(){} void http_plugin::set_program_options(options_description&, options_description& cfg) { +#ifdef BOOST_ASIO_HAS_LOCAL_SOCKETS if(current_http_plugin_defaults.default_unix_socket_path.length()) cfg.add_options() ("unix-socket-path", bpo::value()->default_value(current_http_plugin_defaults.default_unix_socket_path), "The filename (relative to data-dir) to create a unix socket for HTTP RPC; set blank to disable."); + else + cfg.add_options() + ("unix-socket-path", bpo::value(), + "The filename (relative to data-dir) to create a unix socket for HTTP RPC; set blank to disable."); +#endif if(current_http_plugin_defaults.default_http_port) cfg.add_options() @@ -448,26 +463,29 @@ namespace eosio { ("access-control-allow-origin", bpo::value()->notifier([this](const string& v) { my->access_control_allow_origin = v; - ilog("configured http with Access-Control-Allow-Origin: ${o}", ("o", my->access_control_allow_origin)); + fc_ilog( logger, "configured http with Access-Control-Allow-Origin: ${o}", + ("o", my->access_control_allow_origin) ); }), "Specify the Access-Control-Allow-Origin to be returned on each request.") ("access-control-allow-headers", bpo::value()->notifier([this](const string& v) { my->access_control_allow_headers = v; - ilog("configured http with Access-Control-Allow-Headers : ${o}", ("o", my->access_control_allow_headers)); + fc_ilog( logger, "configured http with Access-Control-Allow-Headers : ${o}", + ("o", my->access_control_allow_headers) ); }), "Specify the Access-Control-Allow-Headers to be returned on each request.") ("access-control-max-age", bpo::value()->notifier([this](const string& v) { my->access_control_max_age = v; - ilog("configured http with Access-Control-Max-Age : ${o}", ("o", my->access_control_max_age)); + fc_ilog( logger, "configured http with Access-Control-Max-Age : ${o}", + ("o", my->access_control_max_age) ); }), "Specify the Access-Control-Max-Age to be returned on each request.") ("access-control-allow-credentials", bpo::bool_switch()->notifier([this](bool v) { my->access_control_allow_credentials = v; - if (v) ilog("configured http with Access-Control-Allow-Credentials: true"); + if( v ) fc_ilog( logger, "configured http with Access-Control-Allow-Credentials: true" ); })->default_value(false), "Specify if Access-Control-Allow-Credentials: true should be returned on each request.") ("max-body-size", bpo::value()->default_value(1024*1024), @@ -515,12 +533,14 @@ namespace eosio { } } +#ifdef BOOST_ASIO_HAS_LOCAL_SOCKETS if( options.count( "unix-socket-path" ) && !options.at( "unix-socket-path" ).as().empty()) { boost::filesystem::path sock_path = options.at("unix-socket-path").as(); if (sock_path.is_relative()) sock_path = app().data_dir() / sock_path; my->unix_endpoint = asio::local::stream_protocol::endpoint(sock_path.string()); } +#endif if( options.count( "https-server-address" ) && options.at( "https-server-address" ).as().length()) { if( !options.count( "https-certificate-chain-file" ) || @@ -571,27 +591,30 @@ namespace eosio { void http_plugin::plugin_startup() { + handle_sighup(); // setup logging + my->thread_pool.emplace( "http", my->thread_pool_size ); if(my->listen_endpoint) { try { my->create_server_for_endpoint(*my->listen_endpoint, my->server); - ilog("start listening for http requests"); + fc_ilog( logger, "start listening for http requests" ); my->server.listen(*my->listen_endpoint); my->server.start_accept(); } catch ( const fc::exception& e ){ - elog( "http service failed to start: ${e}", ("e",e.to_detail_string())); + fc_elog( logger, "http service failed to start: ${e}", ("e", e.to_detail_string()) ); throw; } catch ( const std::exception& e ){ - elog( "http service failed to start: ${e}", ("e",e.what())); + fc_elog( logger, "http service failed to start: ${e}", ("e", e.what()) ); throw; } catch (...) { - elog("error thrown from http io service"); + fc_elog( logger, "error thrown from http io service" ); throw; } } +#ifdef BOOST_ASIO_HAS_LOCAL_SOCKETS if(my->unix_endpoint) { try { my->unix_server.clear_access_channels(websocketpp::log::alevel::all); @@ -603,16 +626,17 @@ namespace eosio { }); my->unix_server.start_accept(); } catch ( const fc::exception& e ){ - elog( "unix socket service failed to start: ${e}", ("e",e.to_detail_string())); + fc_elog( logger, "unix socket service (${path}) failed to start: ${e}", ("e", e.to_detail_string())("path",my->unix_endpoint->path()) ); throw; } catch ( const std::exception& e ){ - elog( "unix socket service failed to start: ${e}", ("e",e.what())); + fc_elog( logger, "unix socket service (${path}) failed to start: ${e}", ("e", e.what())("path",my->unix_endpoint->path()) ); throw; } catch (...) { - elog("error thrown from unix socket io service"); + fc_elog( logger, "error thrown from unix socket (${path}) io service", ("path",my->unix_endpoint->path()) ); throw; } } +#endif if(my->https_listen_endpoint) { try { @@ -621,17 +645,17 @@ namespace eosio { return my->on_tls_init(hdl); }); - ilog("start listening for https requests"); + fc_ilog( logger, "start listening for https requests" ); my->https_server.listen(*my->https_listen_endpoint); my->https_server.start_accept(); } catch ( const fc::exception& e ){ - elog( "https service failed to start: ${e}", ("e",e.to_detail_string())); + fc_elog( logger, "https service failed to start: ${e}", ("e", e.to_detail_string()) ); throw; } catch ( const std::exception& e ){ - elog( "https service failed to start: ${e}", ("e",e.what())); + fc_elog( logger, "https service failed to start: ${e}", ("e", e.what()) ); throw; } catch (...) { - elog("error thrown from https io service"); + fc_elog( logger, "error thrown from https io service" ); throw; } } @@ -650,13 +674,19 @@ namespace eosio { }}); } + void http_plugin::handle_sighup() { + fc::logger::update( logger_name, logger ); + } + void http_plugin::plugin_shutdown() { if(my->server.is_listening()) my->server.stop_listening(); if(my->https_server.is_listening()) my->https_server.stop_listening(); +#ifdef BOOST_ASIO_HAS_LOCAL_SOCKETS if(my->unix_server.is_listening()) my->unix_server.stop_listening(); +#endif if( my->thread_pool ) { my->thread_pool->stop(); @@ -664,7 +694,7 @@ namespace eosio { } void http_plugin::add_handler(const string& url, const url_handler& handler) { - ilog( "add api url: ${c}", ("c",url) ); + fc_ilog( logger, "add api url: ${c}", ("c", url) ); my->url_handlers.insert(std::make_pair(url,handler)); } @@ -684,28 +714,28 @@ namespace eosio { } catch (fc::eof_exception& e) { error_results results{422, "Unprocessable Entity", error_results::error_info(e, verbose_http_errors)}; cb( 422, fc::variant( results )); - elog( "Unable to parse arguments to ${api}.${call}", ("api", api_name)( "call", call_name )); - dlog("Bad arguments: ${args}", ("args", body)); + fc_elog( logger, "Unable to parse arguments to ${api}.${call}", ("api", api_name)( "call", call_name ) ); + fc_dlog( logger, "Bad arguments: ${args}", ("args", body) ); } catch (fc::exception& e) { error_results results{500, "Internal Service Error", error_results::error_info(e, verbose_http_errors)}; cb( 500, fc::variant( results )); if (e.code() != chain::greylist_net_usage_exceeded::code_value && e.code() != chain::greylist_cpu_usage_exceeded::code_value) { - elog( "FC Exception encountered while processing ${api}.${call}", - ("api", api_name)( "call", call_name )); - dlog( "Exception Details: ${e}", ("e", e.to_detail_string())); + fc_elog( logger, "FC Exception encountered while processing ${api}.${call}", + ("api", api_name)( "call", call_name ) ); + fc_dlog( logger, "Exception Details: ${e}", ("e", e.to_detail_string()) ); } } catch (std::exception& e) { error_results results{500, "Internal Service Error", error_results::error_info(fc::exception( FC_LOG_MESSAGE( error, e.what())), verbose_http_errors)}; cb( 500, fc::variant( results )); - elog( "STD Exception encountered while processing ${api}.${call}", - ("api", api_name)( "call", call_name )); - dlog( "Exception Details: ${e}", ("e", e.what())); + fc_elog( logger, "STD Exception encountered while processing ${api}.${call}", + ("api", api_name)( "call", call_name ) ); + fc_dlog( logger, "Exception Details: ${e}", ("e", e.what()) ); } catch (...) { error_results results{500, "Internal Service Error", error_results::error_info(fc::exception( FC_LOG_MESSAGE( error, "Unknown Exception" )), verbose_http_errors)}; cb( 500, fc::variant( results )); - elog( "Unknown Exception encountered while processing ${api}.${call}", - ("api", api_name)( "call", call_name )); + fc_elog( logger, "Unknown Exception encountered while processing ${api}.${call}", + ("api", api_name)( "call", call_name ) ); } } catch (...) { std::cerr << "Exception attempting to handle exception for " << api_name << "." << call_name << std::endl; diff --git a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp index eaa132ce0e4..5c81279fe62 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include @@ -79,6 +75,7 @@ namespace eosio { void plugin_initialize(const variables_map& options); void plugin_startup(); void plugin_shutdown(); + void handle_sighup() override; void add_handler(const string& url, const url_handler&); void add_api(const api_description& api) { diff --git a/plugins/login_plugin/include/eosio/login_plugin/login_plugin.hpp b/plugins/login_plugin/include/eosio/login_plugin/login_plugin.hpp index 29c2660c6f3..ae8023a652f 100644 --- a/plugins/login_plugin/include/eosio/login_plugin/login_plugin.hpp +++ b/plugins/login_plugin/include/eosio/login_plugin/login_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include diff --git a/plugins/login_plugin/login_plugin.cpp b/plugins/login_plugin/login_plugin.cpp index 374a04b25dc..5d1903c0933 100644 --- a/plugins/login_plugin/login_plugin.cpp +++ b/plugins/login_plugin/login_plugin.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include diff --git a/plugins/mongo_db_plugin/CMakeLists.txt b/plugins/mongo_db_plugin/CMakeLists.txt index 29f45bd6808..3fd67671616 100644 --- a/plugins/mongo_db_plugin/CMakeLists.txt +++ b/plugins/mongo_db_plugin/CMakeLists.txt @@ -4,6 +4,9 @@ if(BUILD_MONGO_DB_PLUGIN) if (libmongoc-1.0_FOUND) + find_package(libbson-1.0 REQUIRED) + message(STATUS "Found bson headers: ${BSON_INCLUDE_DIRS}") + find_package(libbsoncxx-static REQUIRED) find_package(libmongocxx-static REQUIRED) find_package(libmongoc-static-1.0 REQUIRED) @@ -21,7 +24,7 @@ if(BUILD_MONGO_DB_PLUGIN) ${HEADERS} ) target_include_directories(mongo_db_plugin - PRIVATE ${LIBMONGOCXX_STATIC_INCLUDE_DIRS} ${LIBBSONCXX_STATIC_INCLUDE_DIRS} + PRIVATE ${LIBMONGOCXX_STATIC_INCLUDE_DIRS} ${LIBBSONCXX_STATIC_INCLUDE_DIRS} ${BSON_INCLUDE_DIRS} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/plugins/mongo_db_plugin/include/eosio/mongo_db_plugin/bson.hpp b/plugins/mongo_db_plugin/include/eosio/mongo_db_plugin/bson.hpp new file mode 100644 index 00000000000..984c5fd8b75 --- /dev/null +++ b/plugins/mongo_db_plugin/include/eosio/mongo_db_plugin/bson.hpp @@ -0,0 +1,246 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +namespace eosio { +void to_bson(const fc::variant_object& o, bsoncxx::builder::core& c); +void to_bson(const fc::variants& v, bsoncxx::builder::core& c); +void to_bson(const fc::variant& v, bsoncxx::builder::core& c); +bsoncxx::document::value to_bson(const fc::variant& v); + +void from_bson(const bsoncxx::document::view& view, fc::mutable_variant_object& o); +void from_bson(const bsoncxx::array::view& bson_array, fc::variants& a); +template void from_bson(const T& ele, fc::variant& v); +fc::variant from_bson(const bsoncxx::document::view& view); +} // namespace eosio + +namespace eosio { + +void to_bson(const fc::variant_object& o, bsoncxx::builder::core& c) +{ + auto itr = o.begin(); + while (itr != o.end()) { + c.key_owned(itr->key()); + to_bson(itr->value(), c); + ++itr; + } +} + +void to_bson(const fc::variants& a, bsoncxx::builder::core& c) +{ + auto itr = a.begin(); + while (itr != a.end()) { + to_bson(*itr, c); + ++itr; + } +} + +void to_bson(const fc::variant& v, bsoncxx::builder::core& c) +{ + switch (v.get_type()) { + case fc::variant::null_type: { + c.append(bsoncxx::types::b_null{}); + return; + } + case fc::variant::int64_type: + case fc::variant::uint64_type: { + c.append(v.as_int64()); + return; + } + case fc::variant::double_type: + c.append(v.as_double()); + return; + case fc::variant::bool_type: + c.append(v.as_bool()); + return; + case fc::variant::string_type: { + c.append(v.as_string()); + return; + } + case fc::variant::blob_type: { + bsoncxx::types::b_binary bin; + bin.sub_type = bsoncxx::binary_sub_type::k_binary; + bin.size = v.as_blob().data.size(); + bin.bytes = reinterpret_cast(&(*v.as_blob().data.begin())); + c.append(bin); + return; + } + case fc::variant::array_type: { + const fc::variants& a = v.get_array(); + bsoncxx::builder::core sub(true); + to_bson(a, sub); + c.append(sub.extract_array()); + return; + } + case fc::variant::object_type: { + const fc::variant_object& o = v.get_object(); + if (o.size() == 1) { + const auto value = o.begin()->value(); + if (o.begin()->key().compare("$oid") == 0) { + if (value.get_type() == fc::variant::string_type + && bson_oid_is_valid(value.as_string().data(), value.as_string().size())) { + bsoncxx::oid oid(value.as_string()); + c.append(oid); + break; + } + } + else if (o.begin()->key().compare("$date") == 0) { + if (value.get_type() == fc::variant::int64_type) { + bsoncxx::types::b_date date(std::chrono::milliseconds(value.as_int64())); + c.append(date); + break; + } + else if (value.get_type() == fc::variant::object_type) { + const fc::variant_object& obj = value.get_object(); + if (obj.size() == 1) { + auto number = obj.begin(); + if (number->key().compare("$numberLong") == 0) { + bsoncxx::types::b_date date(std::chrono::milliseconds(number->value().as_int64())); + c.append(date); + break; + } + } + } + } + else if (o.begin()->key().compare("$timestamp") == 0) { + if (value.get_type() == fc::variant::object_type) { + const fc::variant_object& obj = value.get_object(); + if (obj.size() == 2) { + auto t = obj.begin(); + auto i = t; + ++i; + if (t->key().compare("t") == 0 && i->key().compare("i") == 0) { + bsoncxx::types::b_timestamp ts; + ts.timestamp = static_cast(t->value().as_uint64()); + ts.increment = static_cast(i->value().as_uint64()); + c.append(ts); + break; + } + } + } + } + } + bsoncxx::builder::core sub(false); + to_bson(o, sub); + c.append(sub.extract_document()); + return; + } + default: + FC_THROW_EXCEPTION( + fc::invalid_arg_exception, + "Unsupported fc::variant type: " + std::to_string(v.get_type())); + } +} + +bsoncxx::document::value to_bson(const fc::variant& v) +{ + bsoncxx::builder::core doc(false); + if (v.get_type() == fc::variant::object_type) { + const fc::variant_object& o = v.get_object(); + to_bson(o, doc); + } + else if (v.get_type() != fc::variant::null_type) { + FC_THROW_EXCEPTION( + fc::invalid_arg_exception, + "Unsupported root fc::variant type: " + std::to_string(v.get_type())); + } + return doc.extract_document(); +} + +void from_bson(const bsoncxx::document::view& view, fc::mutable_variant_object& o) +{ + for (bsoncxx::document::element ele : view) { + fc::variant v; + from_bson(ele, v); + o(ele.key().data(), v); + } +} + +void from_bson(const bsoncxx::array::view& bson_array, fc::variants& a) +{ + a.reserve(std::distance(bson_array.cbegin(), bson_array.cend())); + for (bsoncxx::array::element ele : bson_array) { + fc::variant v; + from_bson(ele, v); + a.push_back(v); + } +} + +template +void from_bson(const T& ele, fc::variant& v) +{ + switch (ele.type()) { + case bsoncxx::type::k_double: + v = ele.get_double().value; + return; + case bsoncxx::type::k_utf8: + v = bsoncxx::string::to_string(ele.get_utf8().value); + return; + case bsoncxx::type::k_document: { + fc::mutable_variant_object o; + from_bson(ele.get_document().value, o); + v = o; + return; + } + case bsoncxx::type::k_array: { + bsoncxx::array::view sub_array{ele.get_array().value}; + fc::variants a; + from_bson(sub_array, a); + v = a; + return; + } + case bsoncxx::type::k_binary: { + fc::blob blob; + blob.data.resize(ele.get_binary().size); + std::copy(ele.get_binary().bytes, ele.get_binary().bytes + ele.get_binary().size, blob.data.begin()); + v = blob; + return; + } + case bsoncxx::type::k_undefined: + case bsoncxx::type::k_null: + v = fc::variant(); + return; + case bsoncxx::type::k_oid: + v = fc::variant_object("$oid", ele.get_oid().value.to_string()); + return; + case bsoncxx::type::k_bool: + v = ele.get_bool().value; + return; + case bsoncxx::type::k_date: + v = fc::variant_object("$date", ele.get_date().to_int64()); + return; + case bsoncxx::type::k_int32: + v = ele.get_int32().value; + return; + case bsoncxx::type::k_timestamp: + v = fc::variant_object("$timestamp", fc::mutable_variant_object("t", ele.get_timestamp().timestamp)("i", ele.get_timestamp().increment)); + return; + case bsoncxx::type::k_int64: + v = ele.get_int64().value; + return; + default: + FC_THROW_EXCEPTION( + fc::invalid_arg_exception, + "Unsupported BSON type: " + bsoncxx::to_string(ele.type())); + } +} + +fc::variant from_bson(const bsoncxx::document::view& view) +{ + fc::mutable_variant_object o; + from_bson(view, o); + return o; +} + +} // namespace eosio + diff --git a/plugins/mongo_db_plugin/include/eosio/mongo_db_plugin/mongo_db_plugin.hpp b/plugins/mongo_db_plugin/include/eosio/mongo_db_plugin/mongo_db_plugin.hpp index 3b636651056..8dfbf09bff1 100644 --- a/plugins/mongo_db_plugin/include/eosio/mongo_db_plugin/mongo_db_plugin.hpp +++ b/plugins/mongo_db_plugin/include/eosio/mongo_db_plugin/mongo_db_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index ffb07b81f4f..60cc44d47e3 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -1,8 +1,5 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include +#include #include #include #include @@ -22,9 +19,6 @@ #include #include -#include -#include -#include #include #include @@ -60,9 +54,9 @@ struct filter_entry { // receiver action actor bool match( const name& rr, const name& an, const name& ar ) const { - return (receiver.value == 0 || receiver == rr) && - (action.value == 0 || action == an) && - (actor.value == 0 || actor == ar); + return (receiver.to_uint64_t() == 0 || receiver == rr) && + (action.to_uint64_t() == 0 || action == an) && + (actor.to_uint64_t() == 0 || actor == ar); } }; @@ -231,7 +225,7 @@ bool mongo_db_plugin_impl::filter_include( const account_name& receiver, const a include = true; } else { auto itr = std::find_if( filter_on.cbegin(), filter_on.cend(), [&receiver, &act_name]( const auto& filter ) { - return filter.match( receiver, act_name, 0 ); + return filter.match( receiver, act_name, {} ); } ); if( itr != filter_on.cend() ) { include = true; @@ -252,7 +246,7 @@ bool mongo_db_plugin_impl::filter_include( const account_name& receiver, const a if( filter_out.empty() ) { return true; } auto itr = std::find_if( filter_out.cbegin(), filter_out.cend(), [&receiver, &act_name]( const auto& filter ) { - return filter.match( receiver, act_name, 0 ); + return filter.match( receiver, act_name, {} ); } ); if( itr != filter_out.cend() ) { return false; } @@ -615,7 +609,7 @@ optional mongo_db_plugin_impl::get_abi_serializer( account_name abi_def abi; if( view.find( "abi" ) != view.end()) { try { - abi = fc::json::from_string( bsoncxx::to_json( view["abi"].get_document())).as(); + abi = from_bson( view["abi"].get_document() ).as(); } catch (...) { ilog( "Unable to convert account abi to abi_def for ${n}", ( "n", n )); return optional(); @@ -748,7 +742,7 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti using bsoncxx::builder::basic::make_array; namespace bbb = bsoncxx::builder::basic; - const signed_transaction& trx = t->packed_trx->get_signed_transaction(); + const signed_transaction& trx = t->packed_trx()->get_signed_transaction(); if( !filter_include( trx ) ) return; @@ -757,48 +751,45 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti auto now = std::chrono::duration_cast( std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()} ); - const auto& trx_id = t->id; + const auto& trx_id = t->id(); const auto trx_id_str = trx_id.str(); trans_doc.append( kvp( "trx_id", trx_id_str ) ); auto v = to_variant_with_abi( trx ); - string trx_json = fc::json::to_string( v, fc::time_point::maximum() ); try { - const auto& trx_value = bsoncxx::from_json( trx_json ); + const auto& trx_value = to_bson( v ); trans_doc.append( bsoncxx::builder::concatenate_doc{trx_value.view()} ); - } catch( bsoncxx::exception& ) { + } catch( bsoncxx::exception& e) { + elog( "Unable to convert transaction to BSON: ${e}", ("e", e.what()) ); try { - trx_json = fc::prune_invalid_utf8( trx_json ); - const auto& trx_value = bsoncxx::from_json( trx_json ); - trans_doc.append( bsoncxx::builder::concatenate_doc{trx_value.view()} ); - trans_doc.append( kvp( "non-utf8-purged", b_bool{true} ) ); - } catch( bsoncxx::exception& e ) { - elog( "Unable to convert transaction JSON to MongoDB JSON: ${e}", ("e", e.what()) ); - elog( " JSON: ${j}", ("j", trx_json) ); - } + elog( " JSON: ${j}", ("j", fc::json::to_string( v, fc::time_point::now() + fc::exception::format_time_limit )) ); + } catch(...) {} } - string signing_keys_json; - if( t->signing_keys_future.valid() ) { - signing_keys_json = fc::json::to_string( std::get<2>( t->signing_keys_future.get() ), fc::time_point::maximum() ); + fc::variant signing_keys; + const flat_set& keys = t->recovered_keys(); + if( !keys.empty() ) { + signing_keys = keys; } else { - flat_set keys; - trx.get_signature_keys( *chain_id, fc::time_point::maximum(), keys, false ); - if( !keys.empty() ) { - signing_keys_json = fc::json::to_string( keys, fc::time_point::maximum() ); + flat_set pub_keys; + trx.get_signature_keys( *chain_id, fc::time_point::maximum(), pub_keys, false ); + if( !pub_keys.empty() ) { + signing_keys = pub_keys; } } - if( !signing_keys_json.empty() ) { + if( signing_keys.get_type() == fc::variant::array_type && signing_keys.get_array().size() > 0) { try { - const auto& keys_value = bsoncxx::from_json( signing_keys_json ); - trans_doc.append( kvp( "signing_keys", keys_value ) ); + bsoncxx::builder::core keys_value(true); + to_bson( signing_keys.get_array(), keys_value ); + trans_doc.append( kvp( "signing_keys", keys_value.extract_array() ) ); } catch( bsoncxx::exception& e ) { - // should never fail, so don't attempt to remove invalid utf8 - elog( "Unable to convert signing keys JSON to MongoDB JSON: ${e}", ("e", e.what()) ); - elog( " JSON: ${j}", ("j", signing_keys_json) ); + elog( "Unable to convert signing keys to BSON: ${e}", ("e", e.what()) ); + try { + elog( " JSON: ${j}", ("j", fc::json::to_string( signing_keys, fc::time_point::now() + fc::exception::format_time_limit )) ); + } catch(...) {} } } @@ -844,20 +835,13 @@ mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces action_traces_doc.append( kvp( "_id", make_custom_oid() ) ); auto v = to_variant_with_abi( atrace ); - string json = fc::json::to_string( v, fc::time_point::maximum() ); try { - const auto& value = bsoncxx::from_json( json ); - action_traces_doc.append( bsoncxx::builder::concatenate_doc{value.view()} ); - } catch( bsoncxx::exception& ) { + action_traces_doc.append( bsoncxx::builder::concatenate_doc{to_bson( v )} ); + } catch( bsoncxx::exception& e ) { + elog( "Unable to convert action trace to BSON: ${e}", ("e", e.what()) ); try { - json = fc::prune_invalid_utf8( json ); - const auto& value = bsoncxx::from_json( json ); - action_traces_doc.append( bsoncxx::builder::concatenate_doc{value.view()} ); - action_traces_doc.append( kvp( "non-utf8-purged", b_bool{true} ) ); - } catch( bsoncxx::exception& e ) { - elog( "Unable to convert action trace JSON to MongoDB JSON: ${e}", ("e", e.what()) ); - elog( " JSON: ${j}", ("j", json) ); - } + elog( " JSON: ${j}", ("j", fc::json::to_string( v, fc::time_point::now() + fc::exception::format_time_limit )) ); + } catch(...) {} } if( t->receipt.valid() ) { action_traces_doc.append( kvp( "trx_status", std::string( t->receipt->status ) ) ); @@ -904,20 +888,13 @@ void mongo_db_plugin_impl::_process_applied_transaction( const chain::transactio if( store_transaction_traces && write_ttrace ) { try { auto v = to_variant_with_abi( *t ); - string json = fc::json::to_string( v, fc::time_point::maximum() ); try { - const auto& value = bsoncxx::from_json( json ); - trans_traces_doc.append( bsoncxx::builder::concatenate_doc{value.view()} ); - } catch( bsoncxx::exception& ) { + trans_traces_doc.append( bsoncxx::builder::concatenate_doc{to_bson( v )} ); + } catch( bsoncxx::exception& e ) { + elog( "Unable to convert transaction to BSON: ${e}", ("e", e.what()) ); try { - json = fc::prune_invalid_utf8( json ); - const auto& value = bsoncxx::from_json( json ); - trans_traces_doc.append( bsoncxx::builder::concatenate_doc{value.view()} ); - trans_traces_doc.append( kvp( "non-utf8-purged", b_bool{true} ) ); - } catch( bsoncxx::exception& e ) { - elog( "Unable to convert transaction JSON to MongoDB JSON: ${e}", ("e", e.what()) ); - elog( " JSON: ${j}", ("j", json) ); - } + elog( " JSON: ${j}", ("j", fc::json::to_string( v, fc::time_point::now() + fc::exception::format_time_limit )) ); + } catch(...) {} } trans_traces_doc.append( kvp( "createdAt", b_date{now} ) ); @@ -926,7 +903,7 @@ void mongo_db_plugin_impl::_process_applied_transaction( const chain::transactio EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert trans ${id}", ("id", t->id) ); } } catch( ... ) { - handle_mongo_exception( "trans_traces insert: " + json, __LINE__ ); + handle_mongo_exception( "trans_traces insert: " + t->id.str(), __LINE__ ); } } catch( ... ) { handle_mongo_exception( "trans_traces serialization: " + t->id.str(), __LINE__ ); @@ -969,24 +946,17 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr auto block_state_doc = bsoncxx::builder::basic::document{}; block_state_doc.append( kvp( "block_num", b_int32{static_cast(block_num)} ), kvp( "block_id", block_id_str ), - kvp( "validated", b_bool{bs->validated} ) ); + kvp( "validated", b_bool{true} ) ); const chain::block_header_state& bhs = *bs; - auto json = fc::json::to_string( bhs, fc::time_point::maximum() ); try { - const auto& value = bsoncxx::from_json( json ); - block_state_doc.append( kvp( "block_header_state", value ) ); - } catch( bsoncxx::exception& ) { + block_state_doc.append( kvp( "block_header_state", to_bson( fc::variant(bhs) ) ) ); + } catch( bsoncxx::exception& e ) { + elog( "Unable to convert block_header_state to BSON: ${e}", ("e", e.what()) ); try { - json = fc::prune_invalid_utf8( json ); - const auto& value = bsoncxx::from_json( json ); - block_state_doc.append( kvp( "block_header_state", value ) ); - block_state_doc.append( kvp( "non-utf8-purged", b_bool{true} ) ); - } catch( bsoncxx::exception& e ) { - elog( "Unable to convert block_header_state JSON to MongoDB JSON: ${e}", ("e", e.what()) ); - elog( " JSON: ${j}", ("j", json) ); - } + elog( " JSON: ${j}", ("j", fc::json::to_string( bhs, fc::time_point::now() + fc::exception::format_time_limit )) ); + } catch(...) {} } block_state_doc.append( kvp( "createdAt", b_date{now} ) ); @@ -1003,7 +973,7 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr } } } catch( ... ) { - handle_mongo_exception( "block_states insert: " + json, __LINE__ ); + handle_mongo_exception( "block_states insert: " + block_id_str, __LINE__ ); } } @@ -1013,20 +983,13 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr kvp( "block_id", block_id_str ) ); auto v = to_variant_with_abi( *bs->block ); - auto json = fc::json::to_string( v, fc::time_point::maximum() ); try { - const auto& value = bsoncxx::from_json( json ); - block_doc.append( kvp( "block", value ) ); - } catch( bsoncxx::exception& ) { + block_doc.append( kvp( "block", to_bson( v ) ) ); + } catch( bsoncxx::exception& e ) { + elog( "Unable to convert block to BSON: ${e}", ("e", e.what()) ); try { - json = fc::prune_invalid_utf8( json ); - const auto& value = bsoncxx::from_json( json ); - block_doc.append( kvp( "block", value ) ); - block_doc.append( kvp( "non-utf8-purged", b_bool{true} ) ); - } catch( bsoncxx::exception& e ) { - elog( "Unable to convert block JSON to MongoDB JSON: ${e}", ("e", e.what()) ); - elog( " JSON: ${j}", ("j", json) ); - } + elog( " JSON: ${j}", ("j", fc::json::to_string( v, fc::time_point::now() + fc::exception::format_time_limit )) ); + } catch(...) {} } block_doc.append( kvp( "createdAt", b_date{now} ) ); @@ -1043,7 +1006,7 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr } } } catch( ... ) { - handle_mongo_exception( "blocks insert: " + json, __LINE__ ); + handle_mongo_exception( "blocks insert: " + block_id_str, __LINE__ ); } } } @@ -1071,7 +1034,6 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ } auto update_doc = make_document( kvp( "$set", make_document( kvp( "irreversible", b_bool{true} ), - kvp( "validated", b_bool{bs->validated} ), kvp( "updatedAt", b_date{now} ) ) ) ); _blocks.update_one( make_document( kvp( "_id", ir_block->view()["_id"].get_oid() ) ), update_doc.view() ); @@ -1086,7 +1048,6 @@ void mongo_db_plugin_impl::_process_irreversible_block(const chain::block_state_ } auto update_doc = make_document( kvp( "$set", make_document( kvp( "irreversible", b_bool{true} ), - kvp( "validated", b_bool{bs->validated} ), kvp( "updatedAt", b_date{now} ) ) ) ); _block_states.update_one( make_document( kvp( "_id", ir_block->view()["_id"].get_oid() ) ), update_doc.view() ); @@ -1321,11 +1282,11 @@ void mongo_db_plugin_impl::update_account(const chain::action& act) } if( account ) { abi_def abi_def = fc::raw::unpack( setabi.abi ); - const string json_str = fc::json::to_string( abi_def, fc::time_point::maximum() ); + auto v = fc::variant( abi_def ); - try{ + try { auto update_from = make_document( - kvp( "$set", make_document( kvp( "abi", bsoncxx::from_json( json_str )), + kvp( "$set", make_document( kvp( "abi", to_bson( v )), kvp( "updatedAt", b_date{now} )))); try { @@ -1337,8 +1298,10 @@ void mongo_db_plugin_impl::update_account(const chain::action& act) handle_mongo_exception( "account update", __LINE__ ); } } catch( bsoncxx::exception& e ) { - elog( "Unable to convert abi JSON to MongoDB JSON: ${e}", ("e", e.what())); - elog( " JSON: ${j}", ("j", json_str)); + elog( "Unable to convert abi JSON to BSON: ${e}", ("e", e.what())); + try { + elog( " JSON: ${j}", ("j", fc::json::to_string( v, fc::time_point::now() + fc::exception::format_time_limit )) ); + } catch(...) {} } } } @@ -1439,7 +1402,7 @@ void mongo_db_plugin_impl::init() { auto& mongo_conn = *client; auto accounts = mongo_conn[db_name][accounts_col]; - if( accounts.count( make_document()) == 0 ) { + if( accounts.estimated_document_count() == 0 ) { auto now = std::chrono::duration_cast( std::chrono::microseconds{fc::time_point::now().time_since_epoch().count()} ); @@ -1644,7 +1607,7 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) std::vector v; boost::split( v, s, boost::is_any_of( ":" )); EOS_ASSERT( v.size() == 3, fc::invalid_arg_exception, "Invalid value ${s} for --mongodb-filter-on", ("s", s)); - filter_entry fe{v[0], v[1], v[2]}; + filter_entry fe{eosio::chain::name(v[0]), eosio::chain::name(v[1]), eosio::chain::name(v[2])}; my->filter_on.insert( fe ); } } else { @@ -1656,7 +1619,7 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) std::vector v; boost::split( v, s, boost::is_any_of( ":" )); EOS_ASSERT( v.size() == 3, fc::invalid_arg_exception, "Invalid value ${s} for --mongodb-filter-out", ("s", s)); - filter_entry fe{v[0], v[1], v[2]}; + filter_entry fe{eosio::chain::name(v[0]), eosio::chain::name(v[1]), eosio::chain::name(v[2])}; my->filter_out.insert( fe ); } } @@ -1725,3 +1688,4 @@ void mongo_db_plugin::plugin_shutdown() } } // namespace eosio + diff --git a/plugins/net_api_plugin/include/eosio/net_api_plugin/net_api_plugin.hpp b/plugins/net_api_plugin/include/eosio/net_api_plugin/net_api_plugin.hpp index 3c7e1e232ac..25e4a539278 100644 --- a/plugins/net_api_plugin/include/eosio/net_api_plugin/net_api_plugin.hpp +++ b/plugins/net_api_plugin/include/eosio/net_api_plugin/net_api_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/plugins/net_api_plugin/net_api_plugin.cpp b/plugins/net_api_plugin/net_api_plugin.cpp index 315ea2816e9..17d6af921d5 100644 --- a/plugins/net_api_plugin/net_api_plugin.cpp +++ b/plugins/net_api_plugin/net_api_plugin.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include diff --git a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp index 735dc120725..b7b3516e18f 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include @@ -31,14 +27,11 @@ namespace eosio { void plugin_startup(); void plugin_shutdown(); - void broadcast_block(const chain::signed_block &sb); - string connect( const string& endpoint ); string disconnect( const string& endpoint ); optional status( const string& endpoint )const; vector connections()const; - size_t num_peers() const; private: std::shared_ptr my; }; diff --git a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp index 7170c1abd20..a806486b50a 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include @@ -26,7 +22,7 @@ namespace eosio { chain_id_type chain_id; ///< used to identify chain fc::sha256 node_id; ///< used to identify peers and prevent self-connect chain::public_key_type key; ///< authentication key; may be a producer or peer key, or empty - tstamp time; + tstamp time{0}; fc::sha256 token; ///< digest of time to prove we own the private key of the key above chain::signature_type sig; ///< signature for the digest string p2p_address; @@ -36,7 +32,7 @@ namespace eosio { block_id_type head_id; string os; string agent; - int16_t generation; + int16_t generation = 0; }; @@ -68,7 +64,7 @@ namespace eosio { case validation : return "invalid block"; case authentication : return "authentication failure"; case fatal_other : return "some other failure"; - case benign_other : return "some other non-fatal condition"; + case benign_other : return "some other non-fatal condition, possibly unknown block"; default : return "some crazy reason"; } } diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index ce096d6d431..d8c475dbc39 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include @@ -28,7 +24,10 @@ #include #include -using namespace eosio::chain::plugin_interface::compat; +#include +#include + +using namespace eosio::chain::plugin_interface; namespace eosio { static appbase::abstract_plugin& _net_plugin = app().register_plugin(); @@ -47,20 +46,24 @@ namespace eosio { class connection; - class sync_manager; - class dispatch_manager; - using connection_ptr = std::shared_ptr; using connection_wptr = std::weak_ptr; - using socket_ptr = std::shared_ptr; using io_work_t = boost::asio::executor_work_guard; + template + void verify_strand_in_this_thread(const Strand& strand, const char* func, int line) { + if( !strand.running_in_this_thread() ) { + elog( "wrong strand: ${f} : line ${n}, exiting", ("f", func)("n", line) ); + app().quit(); + } + } + struct node_transaction_state { transaction_id_type id; - time_point_sec expires; /// time after which this may be purged. - uint32_t block_num = 0; /// block transaction was included in - std::shared_ptr> serialized_txn; /// the received raw bundle + time_point_sec expires; /// time after which this may be purged. + uint32_t block_num = 0; /// block transaction was included in + uint32_t connection_id = 0; }; struct by_expiry; @@ -70,145 +73,233 @@ namespace eosio { node_transaction_state, indexed_by< ordered_unique< - tag< by_id >, - member < node_transaction_state, - transaction_id_type, - &node_transaction_state::id >, - sha256_less >, + tag, + composite_key< node_transaction_state, + member, + member + >, + composite_key_compare< sha256_less, std::less > + >, ordered_non_unique< tag< by_expiry >, - member< node_transaction_state, - fc::time_point_sec, - &node_transaction_state::expires > - >, + member< node_transaction_state, fc::time_point_sec, &node_transaction_state::expires > >, ordered_non_unique< tag, - member< node_transaction_state, - uint32_t, - &node_transaction_state::block_num > > + member< node_transaction_state, uint32_t, &node_transaction_state::block_num > > > > node_transaction_index; - class net_plugin_impl { + struct peer_block_state { + block_id_type id; + uint32_t block_num = 0; + uint32_t connection_id = 0; + bool have_block = false; // true if we have received the block, false if only received id notification + }; + + struct by_block_id; + + typedef multi_index_container< + eosio::peer_block_state, + indexed_by< + ordered_unique< tag, + composite_key< peer_block_state, + member, + member + >, + composite_key_compare< std::less, sha256_less > + >, + ordered_non_unique< tag, + composite_key< peer_block_state, + member, + member + >, + composite_key_compare< sha256_less, std::greater > + >, + ordered_non_unique< tag, member > + > + > peer_block_state_index; + + + struct update_block_num { + uint32_t new_bnum; + update_block_num(uint32_t bnum) : new_bnum(bnum) {} + void operator() (node_transaction_state& nts) { + nts.block_num = new_bnum; + } + }; + + class sync_manager { + private: + enum stages { + lib_catchup, + head_catchup, + in_sync + }; + + mutable std::mutex sync_mtx; + uint32_t sync_known_lib_num; + uint32_t sync_last_requested_num; + uint32_t sync_next_expected_num; + uint32_t sync_req_span; + connection_ptr sync_source; + std::atomic sync_state; + + private: + constexpr static auto stage_str( stages s ); + void set_state( stages s ); + bool is_sync_required( uint32_t fork_head_block_num ); + void request_next_chunk( std::unique_lock g_sync, const connection_ptr& conn = connection_ptr() ); + void start_sync( const connection_ptr& c, uint32_t target ); + bool verify_catchup( const connection_ptr& c, uint32_t num, const block_id_type& id ); + + public: + explicit sync_manager( uint32_t span ); + static void send_handshakes(); + bool syncing_with_peer() const { return sync_state == lib_catchup; } + void sync_reset_lib_num( const connection_ptr& conn ); + void sync_reassign_fetch( const connection_ptr& c, go_away_reason reason ); + void rejected_block( const connection_ptr& c, uint32_t blk_num ); + void sync_recv_block( const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num, bool blk_applied ); + void sync_update_expected( const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num, bool blk_applied ); + void recv_handshake( const connection_ptr& c, const handshake_message& msg ); + void sync_recv_notice( const connection_ptr& c, const notice_message& msg ); + }; + + class dispatch_manager { + mutable std::mutex blk_state_mtx; + peer_block_state_index blk_state; + mutable std::mutex local_txns_mtx; + node_transaction_index local_txns; + + public: + boost::asio::io_context::strand strand; + + explicit dispatch_manager(boost::asio::io_context& io_context) + : strand( io_context ) {} + + void bcast_transaction(const packed_transaction& trx); + void rejected_transaction(const packed_transaction_ptr& trx, uint32_t head_blk_num); + void bcast_block(const block_state_ptr& bs); + void bcast_notice( const block_id_type& id ); + void rejected_block(const block_id_type& id); + + void recv_block(const connection_ptr& conn, const block_id_type& msg, uint32_t bnum); + void expire_blocks( uint32_t bnum ); + void recv_notice(const connection_ptr& conn, const notice_message& msg, bool generated); + + void retry_fetch(const connection_ptr& conn); + + bool add_peer_block( const block_id_type& blkid, uint32_t connection_id ); + bool add_peer_block_id( const block_id_type& blkid, uint32_t connection_id ); + bool peer_has_block(const block_id_type& blkid, uint32_t connection_id) const; + bool have_block(const block_id_type& blkid) const; + size_t num_entries( uint32_t connection_id ) const; + + bool add_peer_txn( const node_transaction_state& nts ); + void update_txns_block_num( const signed_block_ptr& sb ); + void update_txns_block_num( const transaction_id_type& id, uint32_t blk_num ); + bool peer_has_txn( const transaction_id_type& tid, uint32_t connection_id ) const; + bool have_txn( const transaction_id_type& tid ) const; + void expire_txns( uint32_t lib_num ); + }; + + class net_plugin_impl : public std::enable_shared_from_this { public: unique_ptr acceptor; - tcp::endpoint listen_endpoint; - string p2p_address; - string p2p_server_address; - uint32_t max_client_count = 0; - uint32_t max_nodes_per_host = 1; - uint32_t num_clients = 0; - - vector supplied_peers; - vector allowed_peers; ///< peer keys allowed to connect - std::map private_keys; ///< overlapping with producer keys, also authenticating non-producing nodes + std::atomic current_connection_id{0}; + unique_ptr< sync_manager > sync_master; + unique_ptr< dispatch_manager > dispatcher; + + /** + * Thread safe, only updated in plugin initialize + * @{ + */ + string p2p_address; + string p2p_server_address; + + vector supplied_peers; + vector allowed_peers; ///< peer keys allowed to connect + std::map private_keys; ///< overlapping with producer keys, also authenticating non-producing nodes enum possible_connections : char { None = 0, Producers = 1 << 0, Specified = 1 << 1, Any = 1 << 2 }; - possible_connections allowed_connections{None}; - - connection_ptr find_connection(const string& host)const; - - std::set< connection_ptr > connections; - bool done = false; - unique_ptr< sync_manager > sync_master; - unique_ptr< dispatch_manager > dispatcher; + possible_connections allowed_connections{None}; - unique_ptr connector_check; - unique_ptr transaction_check; - unique_ptr keepalive_timer; boost::asio::steady_timer::duration connector_period; boost::asio::steady_timer::duration txn_exp_period; boost::asio::steady_timer::duration resp_expected_period; boost::asio::steady_timer::duration keepalive_interval{std::chrono::seconds{32}}; - int max_cleanup_time_ms = 0; - const std::chrono::system_clock::duration peer_authentication_interval{std::chrono::seconds{1}}; ///< Peer clock may be no more than 1 second skewed from our clock, including network latency. + int max_cleanup_time_ms = 0; + uint32_t max_client_count = 0; + uint32_t max_nodes_per_host = 1; - bool network_version_match = false; - chain_id_type chain_id; - fc::sha256 node_id; + /// Peer clock may be no more than 1 second skewed from our clock, including network latency. + const std::chrono::system_clock::duration peer_authentication_interval{std::chrono::seconds{1}}; - string user_agent_name; - chain_plugin* chain_plug = nullptr; - producer_plugin* producer_plug = nullptr; - int started_sessions = 0; + chain_id_type chain_id; + fc::sha256 node_id; + string user_agent_name; - node_transaction_index local_txns; + eosio::db_read_mode db_read_mode = eosio::db_read_mode::SPECULATIVE; + chain_plugin* chain_plug = nullptr; + producer_plugin* producer_plug = nullptr; + bool use_socket_read_watermark = false; + /** @} */ - bool use_socket_read_watermark = false; + mutable std::shared_mutex connections_mtx; + std::set< connection_ptr > connections; // todo: switch to a thread safe container to avoid big mutex over complete collection - channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; + std::mutex connector_check_timer_mtx; + unique_ptr connector_check_timer; + int connector_checks_in_flight{0}; - uint16_t thread_pool_size = 1; - optional thread_pool; + std::mutex expire_timer_mtx; + unique_ptr expire_timer; - void connect( const connection_ptr& c ); - void connect( const connection_ptr& c, const std::shared_ptr& resolver, tcp::resolver::results_type endpoints ); - bool start_session(const connection_ptr& c); - void start_listen_loop(); - void start_read_message(const connection_ptr& c); + std::mutex keepalive_timer_mtx; + unique_ptr keepalive_timer; - /** \brief Process the next message from the pending message buffer - * - * Process the next message from the pending_message_buffer. - * message_length is the already determined length of the data - * part of the message that will handle the message. - * Returns true is successful. Returns false if an error was - * encountered unpacking or processing the message. - */ - bool process_next_message(const connection_ptr& conn, uint32_t message_length); + std::atomic in_shutdown{false}; - void close(const connection_ptr& c); - size_t count_open_sockets() const; + compat::channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; + channels::irreversible_block::channel_type::handle incoming_irreversible_block_subscription; - template - void send_transaction_to_all( const std::shared_ptr>& send_buffer, VerifierFunc verify ); + uint16_t thread_pool_size = 2; + optional thread_pool; - void accepted_block(const block_state_ptr&); - void transaction_ack(const std::pair&); + private: + mutable std::mutex chain_info_mtx; // protects chain_* + uint32_t chain_lib_num{0}; + uint32_t chain_head_blk_num{0}; + uint32_t chain_fork_head_blk_num{0}; + block_id_type chain_lib_id; + block_id_type chain_head_blk_id; + block_id_type chain_fork_head_blk_id; - bool is_valid( const handshake_message &msg); + public: + void update_chain_info(); + // lib_num, head_block_num, fork_head_blk_num, lib_id, head_blk_id, fork_head_blk_id + std::tuple get_chain_info() const; - void handle_message(const connection_ptr& c, const handshake_message& msg); - void handle_message(const connection_ptr& c, const chain_size_message& msg); - void handle_message(const connection_ptr& c, const go_away_message& msg ); - /** \name Peer Timestamps - * Time message handling - * @{ - */ - /** \brief Process time_message - * - * Calculate offset, delay and dispersion. Note carefully the - * implied processing. The first-order difference is done - * directly in 64-bit arithmetic, then the result is converted - * to floating double. All further processing is in - * floating-double arithmetic with rounding done by the hardware. - * This is necessary in order to avoid overflow and preserve precision. - */ - void handle_message(const connection_ptr& c, const time_message& msg); - /** @} */ - void handle_message(const connection_ptr& c, const notice_message& msg); - void handle_message(const connection_ptr& c, const request_message& msg); - void handle_message(const connection_ptr& c, const sync_request_message& msg); - void handle_message(const connection_ptr& c, const signed_block& msg) = delete; // signed_block_ptr overload used instead - void handle_message(const connection_ptr& c, const signed_block_ptr& msg); - void handle_message(const connection_ptr& c, const packed_transaction& msg) = delete; // packed_transaction_ptr overload used instead - void handle_message(const connection_ptr& c, const packed_transaction_ptr& msg); + void start_listen_loop(); + + void on_accepted_block( const block_state_ptr& bs ); + void transaction_ack(const std::pair&); + void on_irreversible_block( const block_state_ptr& blk ); void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection); - void start_txn_timer(); + void start_expire_timer(); void start_monitors(); - void expire_txns(); - void expire_local_txns(); - void connection_monitor(std::weak_ptr from_connection); + void expire(); + void connection_monitor(std::weak_ptr from_connection, bool reschedule); /** \name Peer Timestamps * Time message handling * @{ @@ -241,7 +332,9 @@ namespace eosio { */ chain::signature_type sign_compact(const chain::public_key_type& signer, const fc::sha256& digest) const; - uint16_t to_protocol_version(uint16_t v); + constexpr uint16_t to_protocol_version(uint16_t v); + + connection_ptr find_connection(const string& host)const; // must call with held mutex }; const fc::string logger_name("net_plugin_impl"); @@ -289,6 +382,9 @@ namespace eosio { constexpr auto def_send_buffer_size = 1024*1024*def_send_buffer_size_mb; constexpr auto def_max_write_queue_size = def_send_buffer_size*10; constexpr auto def_max_trx_in_progress_size = 100*1024*1024; // 100 MB + constexpr auto def_max_consecutive_rejected_blocks = 3; // num of rejected blocks before disconnect + constexpr auto def_max_consecutive_immediate_connection_close = 9; // back off if client keeps closing + constexpr auto def_max_peer_block_ids_per_connection = 100*1024; // if we reach this many then the connection is spaming us, disconnect constexpr auto def_max_clients = 25; // 0 for unlimited clients constexpr auto def_max_nodes_per_host = 1; constexpr auto def_conn_retry_wait = 30; @@ -320,56 +416,9 @@ namespace eosio { */ constexpr uint16_t proto_base = 0; constexpr uint16_t proto_explicit_sync = 1; + constexpr uint16_t block_id_notify = 2; - constexpr uint16_t net_version = proto_explicit_sync; - - struct transaction_state { - transaction_id_type id; - uint32_t block_num = 0; ///< the block number the transaction was included in - time_point_sec expires; - }; - - typedef multi_index_container< - transaction_state, - indexed_by< - ordered_unique< tag, member, sha256_less >, - ordered_non_unique< tag< by_expiry >, member< transaction_state,fc::time_point_sec,&transaction_state::expires >>, - ordered_non_unique< - tag, - member< transaction_state, - uint32_t, - &transaction_state::block_num > > - > - - > transaction_state_index; - - /** - * - */ - struct peer_block_state { - block_id_type id; - uint32_t block_num; - }; - - typedef multi_index_container< - eosio::peer_block_state, - indexed_by< - ordered_unique< tag, member, sha256_less >, - ordered_unique< tag, member > - > - > peer_block_state_index; - - - struct update_block_num { - uint32_t new_bnum; - update_block_num(uint32_t bnum) : new_bnum(bnum) {} - void operator() (node_transaction_state& nts) { - nts.block_num = new_bnum; - } - void operator() (transaction_state& ts) { - ts.block_num = new_bnum; - } - }; + constexpr uint16_t net_version = block_id_notify; /** * Index by start_block_num @@ -385,36 +434,44 @@ namespace eosio { time_point start_time; ///< time request made or received }; - struct handshake_initializer { - static void populate(handshake_message &hello); - }; - + // thread safe class queued_buffer : boost::noncopyable { public: void clear_write_queue() { + std::lock_guard g( _mtx ); _write_queue.clear(); _sync_write_queue.clear(); _write_queue_size = 0; } void clear_out_queue() { + std::lock_guard g( _mtx ); while ( _out_queue.size() > 0 ) { _out_queue.pop_front(); } } - uint32_t write_queue_size() const { return _write_queue_size; } + uint32_t write_queue_size() const { + std::lock_guard g( _mtx ); + return _write_queue_size; + } - bool is_out_queue_empty() const { return _out_queue.empty(); } + bool is_out_queue_empty() const { + std::lock_guard g( _mtx ); + return _out_queue.empty(); + } bool ready_to_send() const { + std::lock_guard g( _mtx ); // if out_queue is not empty then async_write is in progress return ((!_sync_write_queue.empty() || !_write_queue.empty()) && _out_queue.empty()); } + // @param callback must not callback into queued_buffer bool add_write_queue( const std::shared_ptr>& buff, std::function callback, bool to_sync_queue ) { + std::lock_guard g( _mtx ); if( to_sync_queue ) { _sync_write_queue.push_back( {buff, callback} ); } else { @@ -428,6 +485,7 @@ namespace eosio { } void fill_out_buffer( std::vector& bufs ) { + std::lock_guard g( _mtx ); if( _sync_write_queue.size() > 0 ) { // always send msgs from sync_write_queue first fill_out_buffer( bufs, _sync_write_queue ); } else { // postpone real_time write_queue if sync queue is not empty @@ -437,6 +495,7 @@ namespace eosio { } void out_callback( boost::system::error_code ec, std::size_t w ) { + std::lock_guard g( _mtx ); for( auto& m : _out_queue ) { m.callback( ec, w ); } @@ -461,7 +520,8 @@ namespace eosio { std::function callback; }; - uint32_t _write_queue_size = 0; + mutable std::mutex _mtx; + uint32_t _write_queue_size{0}; deque _write_queue; deque _sync_write_queue; // sync_write_queue will be sent first deque _out_queue; @@ -472,47 +532,74 @@ namespace eosio { class connection : public std::enable_shared_from_this { public: explicit connection( string endpoint ); + connection(); - explicit connection( socket_ptr s ); - ~connection(); - void initialize(); + ~connection() {} + + bool start_session(); + + bool socket_is_open() const { return socket_open.load(); } // thread safe, atomic + const string& peer_address() const { return peer_addr; } // thread safe, const + + void set_connection_type( const string& peer_addr ); + bool is_transactions_only_connection()const { return connection_type == transactions_only; } + bool is_blocks_only_connection()const { return connection_type == blocks_only; } + + private: + static const string unknown; + + void update_endpoints(); - peer_block_state_index blk_state; - transaction_state_index trx_state; optional peer_requested; // this peer is requesting info from us - boost::asio::io_context& server_ioc; + + std::atomic socket_open{false}; + + const string peer_addr; + enum connection_types : char { + both, + transactions_only, + blocks_only + }; + + std::atomic connection_type{both}; + + public: boost::asio::io_context::strand strand; - socket_ptr socket; + std::shared_ptr socket; // only accessed through strand after construction fc::message_buffer<1024*1024> pending_message_buffer; - fc::optional outstanding_read_bytes; - + std::atomic outstanding_read_bytes{0}; // accessed only from strand threads queued_buffer buffer_queue; - uint32_t trx_in_progress_size = 0; - fc::sha256 node_id; - handshake_message last_handshake_recv; - handshake_message last_handshake_sent; + std::atomic trx_in_progress_size{0}; + const uint32_t connection_id; int16_t sent_handshake_count = 0; - bool connecting = false; - bool syncing = false; - uint16_t protocol_version = 0; - string peer_addr; - unique_ptr response_expected; - go_away_reason no_retry = no_reason; - block_id_type fork_head; - uint32_t fork_head_num = 0; - optional last_req; - - connection_status get_status()const { - connection_status stat; - stat.peer = peer_addr; - stat.connecting = connecting; - stat.syncing = syncing; - stat.last_handshake = last_handshake_recv; - return stat; - } + std::atomic connecting{true}; + std::atomic syncing{false}; + uint16_t protocol_version = 0; + uint16_t consecutive_rejected_blocks = 0; + std::atomic consecutive_immediate_connection_close = 0; + + std::mutex response_expected_timer_mtx; + boost::asio::steady_timer response_expected_timer; + + std::atomic no_retry{no_reason}; + + mutable std::mutex conn_mtx; //< mtx for last_req .. local_endpoint_port + optional last_req; + handshake_message last_handshake_recv; + handshake_message last_handshake_sent; + block_id_type fork_head; + uint32_t fork_head_num{0}; + fc::time_point last_close; + fc::sha256 conn_node_id; + string remote_endpoint_ip; + string remote_endpoint_port; + string local_endpoint_ip; + string local_endpoint_port; + + connection_status get_status()const; /** \name Peer Timestamps * Time message handling @@ -523,18 +610,34 @@ namespace eosio { tstamp rec{0}; //!< receive timestamp tstamp dst{0}; //!< destination timestamp tstamp xmt{0}; //!< transmit timestamp - - // Computed data - double offset{0}; //!< peer offset - - static const size_t ts_buffer_size{32}; - char ts[ts_buffer_size]; //!< working buffer for making human readable timestamps /** @} */ bool connected(); bool current(); - void reset(); - void close(); + + /// @param reconnect true if we should try and reconnect immediately after close + /// @param shutdown true only if plugin is shutting down + void close( bool reconnect = true, bool shutdown = false ); + private: + static void _close( connection* self, bool reconnect, bool shutdown ); // for easy capture + public: + + bool populate_handshake( handshake_message& hello ); + + bool resolve_and_connect(); + void connect( const std::shared_ptr& resolver, tcp::resolver::results_type endpoints ); + void start_read_message(); + + /** \brief Process the next message from the pending message buffer + * + * Process the next message from the pending_message_buffer. + * message_length is the already determined length of the data + * part of the message that will handle the message. + * Returns true is successful. Returns false if an error was + * encountered unpacking or processing the message. + */ + bool process_next_message(uint32_t message_length); + void send_handshake(); /** \name Peer Timestamps @@ -553,25 +656,22 @@ namespace eosio { * packet is placed on the send queue. Calls the kernel time of * day routine and converts to a (at least) 64 bit integer. */ - tstamp get_time() - { + static tstamp get_time() { return std::chrono::system_clock::now().time_since_epoch().count(); } /** @} */ const string peer_name(); - void txn_send_pending(const vector& ids); - void txn_send(const vector& txn_lis); - void blk_send_branch( const block_id_type& msg_head_id ); + void blk_send_branch_impl( uint32_t msg_head_num, uint32_t lib_num, uint32_t head_num ); void blk_send(const block_id_type& blkid); void stop_send(); - void enqueue( const net_message &msg, bool trigger_send = true ); - void enqueue_block( const signed_block_ptr& sb, bool trigger_send = true, bool to_sync_queue = false); + void enqueue( const net_message &msg ); + void enqueue_block( const signed_block_ptr& sb, bool to_sync_queue = false); void enqueue_buffer( const std::shared_ptr>& send_buffer, - bool trigger_send, int priority, go_away_reason close_after_send, + go_away_reason close_after_send, bool to_sync_queue = false); void cancel_sync(go_away_reason); void flush_queues(); @@ -585,290 +685,324 @@ namespace eosio { void fetch_timeout(boost::system::error_code ec); void queue_write(const std::shared_ptr>& buff, - bool trigger_send, - int priority, std::function callback, bool to_sync_queue = false); - void do_queue_write(int priority); + void do_queue_write(); - bool add_peer_block(const peer_block_state& pbs); - bool peer_has_block(const block_id_type& blkid); + static bool is_valid( const handshake_message& msg ); - fc::optional _logger_variant; - const fc::variant_object& get_logger_variant() { - if (!_logger_variant) { - boost::system::error_code ec; - auto rep = socket->remote_endpoint(ec); - string ip = ec ? "" : rep.address().to_string(); - string port = ec ? "" : std::to_string(rep.port()); - - auto lep = socket->local_endpoint(ec); - string lip = ec ? "" : lep.address().to_string(); - string lport = ec ? "" : std::to_string(lep.port()); - - _logger_variant.emplace(fc::mutable_variant_object() - ("_name", peer_name()) - ("_id", node_id) - ("_sid", ((string)node_id).substr(0, 7)) - ("_ip", ip) - ("_port", port) - ("_lip", lip) - ("_lport", lport) - ); - } - return *_logger_variant; + void handle_message( const handshake_message& msg ); + void handle_message( const chain_size_message& msg ); + void handle_message( const go_away_message& msg ); + /** \name Peer Timestamps + * Time message handling + * @{ + */ + /** \brief Process time_message + * + * Calculate offset, delay and dispersion. Note carefully the + * implied processing. The first-order difference is done + * directly in 64-bit arithmetic, then the result is converted + * to floating double. All further processing is in + * floating-double arithmetic with rounding done by the hardware. + * This is necessary in order to avoid overflow and preserve precision. + */ + void handle_message( const time_message& msg ); + /** @} */ + void handle_message( const notice_message& msg ); + void handle_message( const request_message& msg ); + void handle_message( const sync_request_message& msg ); + void handle_message( const signed_block& msg ) = delete; // signed_block_ptr overload used instead + void handle_message( const block_id_type& id, signed_block_ptr msg ); + void handle_message( const packed_transaction& msg ) = delete; // packed_transaction_ptr overload used instead + void handle_message( packed_transaction_ptr msg ); + + void process_signed_block( const block_id_type& id, signed_block_ptr msg ); + + fc::variant_object get_logger_variant() { + fc::mutable_variant_object mvo; + mvo( "_name", peer_name()); + std::lock_guard g_conn( conn_mtx ); + mvo( "_id", conn_node_id ) + ( "_sid", conn_node_id.str().substr( 0, 7 ) ) + ( "_ip", remote_endpoint_ip ) + ( "_port", remote_endpoint_port ) + ( "_lip", local_endpoint_ip ) + ( "_lport", local_endpoint_port ); + return mvo; } }; + const string connection::unknown = ""; + + // called from connection strand struct msg_handler : public fc::visitor { - net_plugin_impl &impl; connection_ptr c; - msg_handler( net_plugin_impl &imp, const connection_ptr& conn) : impl(imp), c(conn) {} + explicit msg_handler( const connection_ptr& conn) : c(conn) {} - void operator()( const signed_block& msg ) const { - EOS_ASSERT( false, plugin_config_exception, "operator()(signed_block&&) should be called" ); - } - void operator()( signed_block& msg ) const { - EOS_ASSERT( false, plugin_config_exception, "operator()(signed_block&&) should be called" ); - } - void operator()( const packed_transaction& msg ) const { - EOS_ASSERT( false, plugin_config_exception, "operator()(packed_transaction&&) should be called" ); - } - void operator()( packed_transaction& msg ) const { - EOS_ASSERT( false, plugin_config_exception, "operator()(packed_transaction&&) should be called" ); + template + void operator()( const T& ) const { + EOS_ASSERT( false, plugin_config_exception, "Not implemented, call handle_message directly instead" ); } - void operator()( signed_block&& msg ) const { - impl.handle_message( c, std::make_shared( std::move( msg ) ) ); - } - void operator()( packed_transaction&& msg ) const { - impl.handle_message( c, std::make_shared( std::move( msg ) ) ); + void operator()( const handshake_message& msg ) const { + // continue call to handle_message on connection strand + fc_dlog( logger, "handle handshake_message" ); + c->handle_message( msg ); } - template - void operator()( T&& msg ) const - { - impl.handle_message( c, std::forward(msg) ); + void operator()( const chain_size_message& msg ) const { + // continue call to handle_message on connection strand + fc_dlog( logger, "handle chain_size_message" ); + c->handle_message( msg ); } - }; - class sync_manager { - private: - enum stages { - lib_catchup, - head_catchup, - in_sync - }; + void operator()( const go_away_message& msg ) const { + // continue call to handle_message on connection strand + fc_dlog( logger, "handle go_away_message" ); + c->handle_message( msg ); + } - uint32_t sync_known_lib_num; - uint32_t sync_last_requested_num; - uint32_t sync_next_expected_num; - uint32_t sync_req_span; - connection_ptr source; - stages state; + void operator()( const time_message& msg ) const { + // continue call to handle_message on connection strand + fc_dlog( logger, "handle time_message" ); + c->handle_message( msg ); + } - chain_plugin* chain_plug = nullptr; + void operator()( const notice_message& msg ) const { + // continue call to handle_message on connection strand + fc_dlog( logger, "handle notice_message" ); + c->handle_message( msg ); + } - constexpr static auto stage_str(stages s); + void operator()( const request_message& msg ) const { + // continue call to handle_message on connection strand + fc_dlog( logger, "handle request_message" ); + c->handle_message( msg ); + } - public: - explicit sync_manager(uint32_t span); - void set_state(stages s); - bool sync_required(); - void send_handshakes(); - bool syncing_with_peer() const { return state == lib_catchup; } - bool is_active(const connection_ptr& conn); - void reset_lib_num(const connection_ptr& conn); - void request_next_chunk(const connection_ptr& conn = connection_ptr()); - void start_sync(const connection_ptr& c, uint32_t target); - void reassign_fetch(const connection_ptr& c, go_away_reason reason); - bool verify_catchup(const connection_ptr& c, uint32_t num, const block_id_type& id); - void rejected_block(const connection_ptr& c, uint32_t blk_num); - void recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num, bool blk_applied); - void sync_update_expected(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num, bool blk_applied); - void recv_handshake(const connection_ptr& c, const handshake_message& msg); - void recv_notice(const connection_ptr& c, const notice_message& msg); + void operator()( const sync_request_message& msg ) const { + // continue call to handle_message on connection strand + fc_dlog( logger, "handle sync_request_message" ); + c->handle_message( msg ); + } }; - class dispatch_manager { - public: - std::multimap received_blocks; - std::multimap received_transactions; - - void bcast_transaction(const transaction_metadata_ptr& trx); - void rejected_transaction(const transaction_id_type& msg); - void bcast_block(const block_state_ptr& bs); - void rejected_block(const block_id_type& id); - - void recv_block(const connection_ptr& conn, const block_id_type& msg, uint32_t bnum); - void expire_blocks( uint32_t bnum ); - void recv_transaction(const connection_ptr& conn, const transaction_id_type& id); - void recv_notice(const connection_ptr& conn, const notice_message& msg, bool generated); + template + void for_each_connection( Function f ) { + std::shared_lock g( my_impl->connections_mtx ); + for( auto& c : my_impl->connections ) { + if( !f( c ) ) return; + } + } - void retry_fetch(const connection_ptr& conn); - }; + template + void for_each_block_connection( Function f ) { + std::shared_lock g( my_impl->connections_mtx ); + for( auto& c : my_impl->connections ) { + if( c->is_transactions_only_connection() ) continue; + if( !f( c ) ) return; + } + } //--------------------------------------------------------------------------- connection::connection( string endpoint ) - : blk_state(), - trx_state(), - peer_requested(), - server_ioc( my_impl->thread_pool->get_executor() ), - strand( app().get_io_service() ), - socket( std::make_shared( my_impl->thread_pool->get_executor() ) ), - node_id(), + : peer_addr( endpoint ), + strand( my_impl->thread_pool->get_executor() ), + socket( new tcp::socket( my_impl->thread_pool->get_executor() ) ), + connection_id( ++my_impl->current_connection_id ), + response_expected_timer( my_impl->thread_pool->get_executor() ), last_handshake_recv(), - last_handshake_sent(), - sent_handshake_count(0), - connecting(false), - syncing(false), - protocol_version(0), - peer_addr(endpoint), - response_expected(), - no_retry(no_reason), - fork_head(), - fork_head_num(0), - last_req() + last_handshake_sent() { - fc_ilog( logger, "created connection to ${n}", ("n", endpoint) ); - initialize(); - } - - connection::connection( socket_ptr s ) - : blk_state(), - trx_state(), - peer_requested(), - server_ioc( my_impl->thread_pool->get_executor() ), - strand( app().get_io_service() ), - socket( s ), - node_id(), + fc_ilog( logger, "creating connection to ${n}", ("n", endpoint) ); + } + + connection::connection() + : peer_addr(), + strand( my_impl->thread_pool->get_executor() ), + socket( new tcp::socket( my_impl->thread_pool->get_executor() ) ), + connection_id( ++my_impl->current_connection_id ), + response_expected_timer( my_impl->thread_pool->get_executor() ), last_handshake_recv(), - last_handshake_sent(), - sent_handshake_count(0), - connecting(true), - syncing(false), - protocol_version(0), - peer_addr(), - response_expected(), - no_retry(no_reason), - fork_head(), - fork_head_num(0), - last_req() + last_handshake_sent() { fc_ilog( logger, "accepted network connection" ); - initialize(); } - connection::~connection() {} + void connection::update_endpoints() { + boost::system::error_code ec; + boost::system::error_code ec2; + auto rep = socket->remote_endpoint(ec); + auto lep = socket->local_endpoint(ec2); + std::lock_guard g_conn( conn_mtx ); + remote_endpoint_ip = ec ? unknown : rep.address().to_string(); + remote_endpoint_port = ec ? unknown : std::to_string(rep.port()); + local_endpoint_ip = ec2 ? unknown : lep.address().to_string(); + local_endpoint_port = ec2 ? unknown : std::to_string(lep.port()); + } + + void connection::set_connection_type( const string& peer_add ) { + // host:port:[|] + string::size_type colon = peer_add.find(':'); + string::size_type colon2 = peer_add.find(':', colon + 1); + string::size_type end = colon2 == string::npos + ? string::npos : peer_add.find_first_of( " :+=.,<>!$%^&(*)|-#@\t", colon2 + 1 ); // future proof by including most symbols without using regex + string host = peer_add.substr( 0, colon ); + string port = peer_add.substr( colon + 1, colon2 == string::npos ? string::npos : colon2 - (colon + 1)); + string type = colon2 == string::npos ? "" : end == string::npos ? + peer_add.substr( colon2 + 1 ) : peer_add.substr( colon2 + 1, end - (colon2 + 1) ); + + if( type.empty() ) { + fc_ilog( logger, "Setting connection type for: ${peer} to both transactions and blocks", ("peer", peer_add) ); + connection_type = both; + } else if( type == "trx" ) { + fc_ilog( logger, "Setting connection type for: ${peer} to transactions only", ("peer", peer_add) ); + connection_type = transactions_only; + } else if( type == "blk" ) { + fc_ilog( logger, "Setting connection type for: ${peer} to blocks only", ("peer", peer_add) ); + connection_type = blocks_only; + } else { + fc_wlog( logger, "Unknown connection type: ${t}", ("t", type) ); + } + } + + connection_status connection::get_status()const { + connection_status stat; + stat.peer = peer_addr; + stat.connecting = connecting; + stat.syncing = syncing; + std::lock_guard g( conn_mtx ); + stat.last_handshake = last_handshake_recv; + return stat; + } + + bool connection::start_session() { + verify_strand_in_this_thread( strand, __func__, __LINE__ ); - void connection::initialize() { - auto *rnd = node_id.data(); - rnd[0] = 0; - response_expected.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); + update_endpoints(); + boost::asio::ip::tcp::no_delay nodelay( true ); + boost::system::error_code ec; + socket->set_option( nodelay, ec ); + if( ec ) { + fc_elog( logger, "connection failed (set_option) ${peer}: ${e1}", ("peer", peer_name())( "e1", ec.message() ) ); + close(); + return false; + } else { + fc_dlog( logger, "connected to ${peer}", ("peer", peer_name()) ); + socket_open = true; + start_read_message(); + return true; + } } bool connection::connected() { - return (socket && socket->is_open() && !connecting); + return socket_is_open() && !connecting; } bool connection::current() { return (connected() && !syncing); } - void connection::reset() { - peer_requested.reset(); - blk_state.clear(); - trx_state.clear(); - } - void connection::flush_queues() { buffer_queue.clear_write_queue(); } - void connection::close() { - if(socket) { - boost::system::error_code ec; - socket->close( ec ); - socket.reset( new tcp::socket( my_impl->thread_pool->get_executor() ) ); - } - else { - fc_wlog( logger, "no socket to close!" ); - } - flush_queues(); - connecting = false; - syncing = false; - if( last_req ) { - my_impl->dispatcher->retry_fetch(shared_from_this()); - } - reset(); - sent_handshake_count = 0; - node_id = fc::sha256(); - last_handshake_recv = handshake_message(); - last_handshake_sent = handshake_message(); - my_impl->sync_master->reset_lib_num(shared_from_this()); - fc_ilog(logger, "closing ${a}, ${p}", ("a",peer_addr)("p",peer_name())); - fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); - cancel_wait(); + void connection::close( bool reconnect, bool shutdown ) { + strand.post( [self = shared_from_this(), reconnect, shutdown]() { + connection::_close( self.get(), reconnect, shutdown ); + }); } - void connection::txn_send_pending(const vector& ids) { - const std::set known_ids(ids.cbegin(), ids.cend()); - my_impl->expire_local_txns(); - for(auto tx = my_impl->local_txns.begin(); tx != my_impl->local_txns.end(); ++tx ){ - const bool found = known_ids.find( tx->id ) != known_ids.cend(); - if( !found ) { - queue_write( tx->serialized_txn, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); - } + void connection::_close( connection* self, bool reconnect, bool shutdown ) { + self->socket_open = false; + boost::system::error_code ec; + if( self->socket->is_open() ) { + self->socket->shutdown( tcp::socket::shutdown_both, ec ); + self->socket->close( ec ); + } + self->socket.reset( new tcp::socket( my_impl->thread_pool->get_executor() ) ); + self->flush_queues(); + self->connecting = false; + self->syncing = false; + self->consecutive_rejected_blocks = 0; + ++self->consecutive_immediate_connection_close; + bool has_last_req = false; + { + std::lock_guard g_conn( self->conn_mtx ); + has_last_req = !!self->last_req; + self->last_handshake_recv = handshake_message(); + self->last_handshake_sent = handshake_message(); + self->last_close = fc::time_point::now(); + self->conn_node_id = fc::sha256(); } - } + if( has_last_req && !shutdown ) { + my_impl->dispatcher->retry_fetch( self->shared_from_this() ); + } + self->peer_requested.reset(); + self->sent_handshake_count = 0; + if( !shutdown) my_impl->sync_master->sync_reset_lib_num( self->shared_from_this() ); + fc_ilog( logger, "closing '${a}', ${p}", ("a", self->peer_address())("p", self->peer_name()) ); + fc_dlog( logger, "canceling wait on ${p}", ("p", self->peer_name()) ); // peer_name(), do not hold conn_mtx + self->cancel_wait(); - void connection::txn_send(const vector& ids) { - for(const auto& t : ids) { - auto tx = my_impl->local_txns.get().find(t); - if( tx != my_impl->local_txns.end() ) { - queue_write( tx->serialized_txn, true, priority::low, []( boost::system::error_code ec, std::size_t ) {} ); - } + if( reconnect && !shutdown ) { + my_impl->start_conn_timer( std::chrono::milliseconds( 100 ), connection_wptr() ); } } void connection::blk_send_branch( const block_id_type& msg_head_id ) { - controller& cc = my_impl->chain_plug->chain(); - uint32_t head_num = cc.fork_db_pending_head_block_num(); - notice_message note; - note.known_blocks.mode = normal; - note.known_blocks.pending = 0; + uint32_t head_num = 0; + std::tie( std::ignore, std::ignore, head_num, + std::ignore, std::ignore, std::ignore ) = my_impl->get_chain_info(); + fc_dlog(logger, "head_num = ${h}",("h",head_num)); if(head_num == 0) { + notice_message note; + note.known_blocks.mode = normal; + note.known_blocks.pending = 0; enqueue(note); return; } - if (last_handshake_recv.generation >= 1) { + std::unique_lock g_conn( conn_mtx ); + if( last_handshake_recv.generation >= 1 ) { fc_dlog( logger, "maybe truncating branch at = ${h}:${id}", ("h", block_header::num_from_id(last_handshake_recv.head_id))("id", last_handshake_recv.head_id) ); } - const auto lib_id = last_handshake_recv.last_irreversible_block_id; - - auto msg_head_num = block_header::num_from_id(msg_head_id); - bool on_fork = msg_head_num == 0; - try { - on_fork = on_fork || cc.get_block_id_for_num( msg_head_num ) != msg_head_id; - } catch( ... ) { - on_fork = true; - } - if( on_fork ) msg_head_num = 0; + block_id_type lib_id = last_handshake_recv.last_irreversible_block_id; + g_conn.unlock(); const auto lib_num = block_header::num_from_id(lib_id); + if( lib_num == 0 ) return; // if last_irreversible_block_id is null (we have not received handshake or reset) + + app().post( priority::medium, [chain_plug = my_impl->chain_plug, c = shared_from_this(), + lib_num, head_num, msg_head_id]() { + auto msg_head_num = block_header::num_from_id(msg_head_id); + bool on_fork = msg_head_num == 0; + try { + const controller& cc = chain_plug->chain(); + on_fork = on_fork || cc.get_block_id_for_num( msg_head_num ) != msg_head_id; + } catch( ... ) { + on_fork = true; + } + if( on_fork ) msg_head_num = 0; + // if peer on fork, start at their last lib, otherwise we can start at msg_head+1 + c->strand.post( [c, msg_head_num, lib_num, head_num]() { + c->blk_send_branch_impl( msg_head_num, lib_num, head_num ); + } ); + } ); + } + void connection::blk_send_branch_impl( uint32_t msg_head_num, uint32_t lib_num, uint32_t head_num ) { if( !peer_requested ) { auto last = msg_head_num != 0 ? msg_head_num : lib_num; peer_requested = peer_sync_state( last+1, head_num, last ); } else { auto last = msg_head_num != 0 ? msg_head_num : std::min( peer_requested->last, lib_num ); - uint32_t end = std::max( peer_requested->end_block, head_num ); + uint32_t end = std::max( peer_requested->end_block, head_num ); peer_requested = peer_sync_state( last+1, end, last ); } - if( peer_requested->start_block <= peer_requested->end_block ) { fc_ilog( logger, "enqueue ${s} - ${e} to ${p}", ("s", peer_requested->start_block)("e", peer_requested->end_block)("p", peer_name()) ); enqueue_sync_block(); @@ -876,32 +1010,34 @@ namespace eosio { fc_ilog( logger, "nothing to enqueue ${p} to ${p}", ("p", peer_name()) ); peer_requested.reset(); } - - // still want to send transactions along during blk branch sync - syncing = false; } - void connection::blk_send(const block_id_type& blkid) { - controller &cc = my_impl->chain_plug->chain(); - try { - signed_block_ptr b = cc.fetch_block_by_id(blkid); - if(b) { - fc_dlog(logger,"found block for id at num ${n}",("n",b->block_num())); - add_peer_block({blkid, block_header::num_from_id(blkid)}); - enqueue_block( b ); - } else { - fc_ilog( logger, "fetch block by id returned null, id ${id} for ${p}", - ("id",blkid)("p",peer_name()) ); + void connection::blk_send( const block_id_type& blkid ) { + connection_wptr weak = shared_from_this(); + app().post( priority::medium, [blkid, weak{std::move(weak)}]() { + connection_ptr c = weak.lock(); + if( !c ) return; + try { + controller& cc = my_impl->chain_plug->chain(); + signed_block_ptr b = cc.fetch_block_by_id( blkid ); + if( b ) { + fc_dlog( logger, "found block for id at num ${n}", ("n", b->block_num()) ); + my_impl->dispatcher->add_peer_block( blkid, c->connection_id ); + c->strand.post( [c, b{std::move(b)}]() { + c->enqueue_block( b ); + } ); + } else { + fc_ilog( logger, "fetch block by id returned null, id ${id} for ${p}", + ("id", blkid)( "p", c->peer_address() ) ); + } + } catch( const assert_exception& ex ) { + fc_elog( logger, "caught assert on fetch_block_by_id, ${ex}, id ${id} for ${p}", + ("ex", ex.to_string())( "id", blkid )( "p", c->peer_address() ) ); + } catch( ... ) { + fc_elog( logger, "caught other exception fetching block id ${id} for ${p}", + ("id", blkid)( "p", c->peer_address() ) ); } - } - catch (const assert_exception &ex) { - fc_elog( logger, "caught assert on fetch_block_by_id, ${ex}, id ${id} for ${p}", - ("ex",ex.to_string())("id",blkid)("p",peer_name()) ); - } - catch (...) { - fc_elog( logger, "caught other exception fetching block id ${id} for ${p}", - ("id",blkid)("p",peer_name()) ); - } + }); } void connection::stop_send() { @@ -909,15 +1045,21 @@ namespace eosio { } void connection::send_handshake() { - handshake_initializer::populate(last_handshake_sent); - static_assert( std::is_same_v, "INT16_MAX based on int16_t" ); - if( sent_handshake_count == INT16_MAX ) sent_handshake_count = 1; // do not wrap - last_handshake_sent.generation = ++sent_handshake_count; - fc_ilog( logger, "Sending handshake generation ${g} to ${ep}, lib ${lib}, head ${head}, id ${id}", - ("g", last_handshake_sent.generation)("ep", peer_name()) - ("lib", last_handshake_sent.last_irreversible_block_num) - ("head", last_handshake_sent.head_num)("id", last_handshake_sent.head_id.str().substr(8,16)) ); - enqueue(last_handshake_sent); + strand.dispatch( [c = shared_from_this()]() { + std::unique_lock g_conn( c->conn_mtx ); + if( c->populate_handshake( c->last_handshake_sent ) ) { + static_assert( std::is_same_vsent_handshake_count ), int16_t>, "INT16_MAX based on int16_t" ); + if( c->sent_handshake_count == INT16_MAX ) c->sent_handshake_count = 1; // do not wrap + c->last_handshake_sent.generation = ++c->sent_handshake_count; + auto last_handshake_sent = c->last_handshake_sent; + g_conn.unlock(); + fc_ilog( logger, "Sending handshake generation ${g} to ${ep}, lib ${lib}, head ${head}, id ${id}", + ("g", last_handshake_sent.generation)( "ep", c->peer_name()) + ("lib", last_handshake_sent.last_irreversible_block_num) + ("head", last_handshake_sent.head_num)("id", last_handshake_sent.head_id.str().substr(8,16)) ); + c->enqueue( last_handshake_sent ); + } + }); } void connection::send_time() { @@ -938,80 +1080,65 @@ namespace eosio { } void connection::queue_write(const std::shared_ptr>& buff, - bool trigger_send, - int priority, std::function callback, bool to_sync_queue) { if( !buffer_queue.add_write_queue( buff, callback, to_sync_queue )) { fc_wlog( logger, "write_queue full ${s} bytes, giving up on connection ${p}", ("s", buffer_queue.write_queue_size())("p", peer_name()) ); - my_impl->close( shared_from_this() ); + close(); return; } - if( buffer_queue.is_out_queue_empty() && trigger_send) { - do_queue_write( priority ); - } + do_queue_write(); } - void connection::do_queue_write(int priority) { + void connection::do_queue_write() { if( !buffer_queue.ready_to_send() ) return; - connection_wptr c(shared_from_this()); - if(!socket->is_open()) { - fc_elog(logger,"socket not open to ${p}",("p",peer_name())); - my_impl->close(c.lock()); - return; - } + connection_ptr c(shared_from_this()); + std::vector bufs; buffer_queue.fill_out_buffer( bufs ); - boost::asio::async_write(*socket, bufs, - boost::asio::bind_executor(strand, [c, socket=socket, priority]( boost::system::error_code ec, std::size_t w ) { - app().post(priority, [c, priority, ec, w]() { + strand.dispatch( [c{std::move(c)}, bufs{std::move(bufs)}]() { + boost::asio::async_write( *c->socket, bufs, + boost::asio::bind_executor( c->strand, [c, socket=c->socket]( boost::system::error_code ec, std::size_t w ) { try { - auto conn = c.lock(); - if(!conn) + c->buffer_queue.clear_out_queue(); + // May have closed connection and cleared buffer_queue + if( !c->socket_is_open() || socket != c->socket ) { + fc_ilog( logger, "async write socket ${r} before callback: ${p}", + ("r", c->socket_is_open() ? "changed" : "closed")("p", c->peer_name()) ); return; + } - conn->buffer_queue.out_callback( ec, w ); - - if(ec) { - string pname = conn ? conn->peer_name() : "no connection name"; - if( ec.value() != boost::asio::error::eof) { - fc_elog( logger, "Error sending to peer ${p}: ${i}", ("p",pname)("i", ec.message()) ); - } - else { - fc_wlog( logger, "connection closure detected on write to ${p}",("p",pname) ); + if( ec ) { + if( ec.value() != boost::asio::error::eof ) { + fc_elog( logger, "Error sending to peer ${p}: ${i}", ("p", c->peer_name())( "i", ec.message() ) ); + } else { + fc_wlog( logger, "connection closure detected on write to ${p}", ("p", c->peer_name()) ); } - my_impl->close(conn); + c->close(); return; } - conn->buffer_queue.clear_out_queue(); - conn->enqueue_sync_block(); - conn->do_queue_write( priority ); - } - catch(const std::exception &ex) { - auto conn = c.lock(); - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger,"Exception in do_queue_write to ${p} ${s}", ("p",pname)("s",ex.what()) ); - } - catch(const fc::exception &ex) { - auto conn = c.lock(); - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger,"Exception in do_queue_write to ${p} ${s}", ("p",pname)("s",ex.to_string()) ); - } - catch(...) { - auto conn = c.lock(); - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger,"Exception in do_queue_write to ${p}", ("p",pname) ); + + c->buffer_queue.out_callback( ec, w ); + + c->enqueue_sync_block(); + c->do_queue_write(); + } catch( const std::exception& ex ) { + fc_elog( logger, "Exception in do_queue_write to ${p} ${s}", ("p", c->peer_name())( "s", ex.what() ) ); + } catch( const fc::exception& ex ) { + fc_elog( logger, "Exception in do_queue_write to ${p} ${s}", ("p", c->peer_name())( "s", ex.to_string() ) ); + } catch( ... ) { + fc_elog( logger, "Exception in do_queue_write to ${p}", ("p", c->peer_name()) ); } - }); - })); + })); + }); } void connection::cancel_sync(go_away_reason reason) { - fc_dlog(logger,"cancel sync reason = ${m}, write queue size ${o} bytes peer ${p}", - ("m",reason_str(reason)) ("o", buffer_queue.write_queue_size())("p", peer_name())); + fc_dlog( logger, "cancel sync reason = ${m}, write queue size ${o} bytes peer ${p}", + ("m", reason_str( reason ))( "o", buffer_queue.write_queue_size() )( "p", peer_address() ) ); cancel_wait(); flush_queues(); switch (reason) { @@ -1022,34 +1149,45 @@ namespace eosio { break; } default: - fc_ilog(logger, "sending empty request but not calling sync wait on ${p}", ("p",peer_name())); + fc_ilog(logger, "sending empty request but not calling sync wait on ${p}", ("p",peer_address())); enqueue( ( sync_request_message ) {0,0} ); } } bool connection::enqueue_sync_block() { - if (!peer_requested) + if( !peer_requested ) { return false; + } else { + fc_dlog( logger, "enqueue sync block ${num}", ("num", peer_requested->last + 1) ); + } uint32_t num = ++peer_requested->last; - bool trigger_send = num == peer_requested->start_block; if(num == peer_requested->end_block) { peer_requested.reset(); fc_ilog( logger, "completing enqueue_sync_block ${num} to ${p}", ("num", num)("p", peer_name()) ); } - try { + connection_wptr weak = shared_from_this(); + app().post( priority::medium, [num, weak{std::move(weak)}]() { + connection_ptr c = weak.lock(); + if( !c ) return; controller& cc = my_impl->chain_plug->chain(); - signed_block_ptr sb = cc.fetch_block_by_number(num); - if(sb) { - enqueue_block( sb, trigger_send, true); - return true; + signed_block_ptr sb = cc.fetch_block_by_number( num ); + if( sb ) { + c->strand.post( [c, sb{std::move(sb)}]() { + c->enqueue_block( sb, true ); + }); + } else { + c->strand.post( [c, num]() { + peer_ilog( c, "enqueue sync, unable to fetch block ${num}", ("num", num) ); + c->send_handshake(); + }); } - } catch ( ... ) { - fc_wlog( logger, "write loop exception" ); - } - return false; + }); + + return true; } - void connection::enqueue( const net_message& m, bool trigger_send ) { + void connection::enqueue( const net_message& m ) { + verify_strand_in_this_thread( strand, __func__, __LINE__ ); go_away_reason close_after_send = no_reason; if (m.contains()) { close_after_send = m.get().reason; @@ -1067,7 +1205,7 @@ namespace eosio { ds.write( header, header_size ); fc::raw::pack( ds, m ); - enqueue_buffer( send_buffer, trigger_send, priority::low, close_after_send ); + enqueue_buffer( send_buffer, close_after_send ); } template< typename T> @@ -1093,6 +1231,7 @@ namespace eosio { static std::shared_ptr> create_send_buffer( const signed_block_ptr& sb ) { // this implementation is to avoid copy of signed_block to net_message // matches which of net_message for signed_block + fc_dlog( logger, "sending block ${bn}", ("bn", sb->block_num()) ); return create_send_buffer( signed_block_which, *sb ); } @@ -1102,131 +1241,101 @@ namespace eosio { return create_send_buffer( packed_transaction_which, trx ); } - void connection::enqueue_block( const signed_block_ptr& sb, bool trigger_send, bool to_sync_queue) { - enqueue_buffer( create_send_buffer( sb ), trigger_send, priority::low, no_reason, to_sync_queue); + void connection::enqueue_block( const signed_block_ptr& sb, bool to_sync_queue) { + fc_dlog( logger, "enqueue block ${num}", ("num", sb->block_num()) ); + verify_strand_in_this_thread( strand, __func__, __LINE__ ); + enqueue_buffer( create_send_buffer( sb ), no_reason, to_sync_queue); } void connection::enqueue_buffer( const std::shared_ptr>& send_buffer, - bool trigger_send, int priority, go_away_reason close_after_send, + go_away_reason close_after_send, bool to_sync_queue) { - connection_wptr weak_this = shared_from_this(); - queue_write(send_buffer,trigger_send, priority, - [weak_this, close_after_send](boost::system::error_code ec, std::size_t ) { - connection_ptr conn = weak_this.lock(); - if (conn) { + connection_ptr self = shared_from_this(); + queue_write(send_buffer, + [conn{std::move(self)}, close_after_send](boost::system::error_code ec, std::size_t ) { + if (ec) return; if (close_after_send != no_reason) { - fc_elog( logger, "sent a go away message: ${r}, closing connection to ${p}", + fc_ilog( logger, "sent a go away message: ${r}, closing connection to ${p}", ("r", reason_str(close_after_send))("p", conn->peer_name()) ); - my_impl->close(conn); + conn->close(); return; } - } else { - fc_wlog(logger, "connection expired before enqueued net_message called callback!"); - } }, to_sync_queue); } + // thread safe void connection::cancel_wait() { - if (response_expected) - response_expected->cancel(); + std::lock_guard g( response_expected_timer_mtx ); + response_expected_timer.cancel(); } + // thread safe void connection::sync_wait() { - response_expected->expires_from_now( my_impl->resp_expected_period); - connection_wptr c(shared_from_this()); - response_expected->async_wait( [c]( boost::system::error_code ec ) { - app().post(priority::low, [c, ec]() { - connection_ptr conn = c.lock(); - if (!conn) { - // connection was destroyed before this lambda was delivered - return; - } - - conn->sync_timeout(ec); - }); - } ); + connection_ptr c(shared_from_this()); + std::lock_guard g( response_expected_timer_mtx ); + response_expected_timer.expires_from_now( my_impl->resp_expected_period ); + response_expected_timer.async_wait( + boost::asio::bind_executor( c->strand, [c]( boost::system::error_code ec ) { + c->sync_timeout( ec ); + } ) ); } + // thread safe void connection::fetch_wait() { - response_expected->expires_from_now( my_impl->resp_expected_period); - connection_wptr c(shared_from_this()); - response_expected->async_wait( [c]( boost::system::error_code ec ) { - app().post(priority::low, [c, ec]() { - connection_ptr conn = c.lock(); - if (!conn) { - // connection was destroyed before this lambda was delivered - return; - } - - conn->fetch_timeout(ec); - }); - } ); + connection_ptr c( shared_from_this() ); + std::lock_guard g( response_expected_timer_mtx ); + response_expected_timer.expires_from_now( my_impl->resp_expected_period ); + response_expected_timer.async_wait( + boost::asio::bind_executor( c->strand, [c]( boost::system::error_code ec ) { + c->fetch_timeout(ec); + } ) ); } + // called from connection strand void connection::sync_timeout( boost::system::error_code ec ) { if( !ec ) { - my_impl->sync_master->reassign_fetch(shared_from_this(), benign_other); - } - else if( ec == boost::asio::error::operation_aborted) { - } - else { - fc_elog( logger,"setting timer for sync request got error ${ec}",("ec", ec.message()) ); + my_impl->sync_master->sync_reassign_fetch( shared_from_this(), benign_other ); + } else if( ec == boost::asio::error::operation_aborted ) { + } else { + fc_elog( logger, "setting timer for sync request got error ${ec}", ("ec", ec.message()) ); } } + // locks conn_mtx, do not call while holding conn_mtx const string connection::peer_name() { + std::lock_guard g_conn( conn_mtx ); if( !last_handshake_recv.p2p_address.empty() ) { return last_handshake_recv.p2p_address; } - if( !peer_addr.empty() ) { - return peer_addr; + if( !peer_address().empty() ) { + return peer_address(); } - if( socket != nullptr ) { - boost::system::error_code ec; - auto rep = socket->remote_endpoint(ec); - if( !ec ) { - return rep.address().to_string() + ':' + std::to_string( rep.port() ); - } + if( remote_endpoint_port != unknown ) { + return remote_endpoint_ip + ":" + remote_endpoint_port; } return "connecting client"; } void connection::fetch_timeout( boost::system::error_code ec ) { if( !ec ) { - my_impl->dispatcher->retry_fetch(shared_from_this()); - } - else if( ec == boost::asio::error::operation_aborted ) { + my_impl->dispatcher->retry_fetch( shared_from_this() ); + } else if( ec == boost::asio::error::operation_aborted ) { if( !connected() ) { - fc_dlog(logger, "fetch timeout was cancelled due to dead connection"); + fc_dlog( logger, "fetch timeout was cancelled due to dead connection" ); } - } - else { + } else { fc_elog( logger, "setting timer for fetch request got error ${ec}", ("ec", ec.message() ) ); } } void connection::request_sync_blocks(uint32_t start, uint32_t end) { sync_request_message srm = {start,end}; - enqueue( net_message(srm)); + enqueue( net_message(srm) ); sync_wait(); } - bool connection::add_peer_block(const peer_block_state& entry) { - auto bptr = blk_state.get().find(entry.id); - bool added = (bptr == blk_state.end()); - if (added){ - blk_state.insert(entry); - } - return added; - } - - bool connection::peer_has_block( const block_id_type& blkid ) { - auto blk_itr = blk_state.get().find(blkid); - return blk_itr != blk_state.end(); - } - //----------------------------------------------------------- sync_manager::sync_manager( uint32_t req_span ) @@ -1234,11 +1343,9 @@ namespace eosio { ,sync_last_requested_num( 0 ) ,sync_next_expected_num( 1 ) ,sync_req_span( req_span ) - ,source() - ,state(in_sync) + ,sync_source() + ,sync_state(in_sync) { - chain_plug = app().find_plugin(); - EOS_ASSERT( chain_plug, chain::missing_chain_plugin_exception, "" ); } constexpr auto sync_manager::stage_str(stages s) { @@ -1251,51 +1358,43 @@ namespace eosio { } void sync_manager::set_state(stages newstate) { - if (state == newstate) { + if( sync_state == newstate ) { return; } - fc_dlog(logger, "old state ${os} becoming ${ns}",("os",stage_str(state))("ns",stage_str(newstate))); - state = newstate; + fc_ilog( logger, "old state ${os} becoming ${ns}", ("os", stage_str( sync_state ))( "ns", stage_str( newstate ) ) ); + sync_state = newstate; } - bool sync_manager::is_active(const connection_ptr& c) { - if (state == head_catchup && c) { - bool fhset = c->fork_head != block_id_type(); - fc_dlog(logger, "fork_head_num = ${fn} fork_head set = ${s}", - ("fn", c->fork_head_num)("s", fhset)); - return c->fork_head != block_id_type() && c->fork_head_num < chain_plug->chain().fork_db_pending_head_block_num(); - } - return state != in_sync; - } - - void sync_manager::reset_lib_num(const connection_ptr& c) { - if(state == in_sync) { - source.reset(); + void sync_manager::sync_reset_lib_num(const connection_ptr& c) { + std::unique_lock g( sync_mtx ); + if( sync_state == in_sync ) { + sync_source.reset(); } + if( !c ) return; if( c->current() ) { - if( c->last_handshake_recv.last_irreversible_block_num > sync_known_lib_num) { - sync_known_lib_num =c->last_handshake_recv.last_irreversible_block_num; + std::lock_guard g_conn( c->conn_mtx ); + if( c->last_handshake_recv.last_irreversible_block_num > sync_known_lib_num ) { + sync_known_lib_num = c->last_handshake_recv.last_irreversible_block_num; } - } else if( c == source ) { + } else if( c == sync_source ) { sync_last_requested_num = 0; - request_next_chunk(); + request_next_chunk( std::move(g) ); } } - bool sync_manager::sync_required() { - fc_dlog(logger, "last req = ${req}, last recv = ${recv} known = ${known} our head = ${head}", - ("req",sync_last_requested_num)("recv",sync_next_expected_num)("known",sync_known_lib_num)("head",chain_plug->chain().fork_db_pending_head_block_num())); - - return( sync_last_requested_num < sync_known_lib_num || - chain_plug->chain().fork_db_pending_head_block_num() < sync_last_requested_num ); - } + // call with g_sync locked + void sync_manager::request_next_chunk( std::unique_lock g_sync, const connection_ptr& conn ) { + uint32_t fork_head_block_num = 0; + uint32_t lib_block_num = 0; + std::tie( lib_block_num, std::ignore, fork_head_block_num, + std::ignore, std::ignore, std::ignore ) = my_impl->get_chain_info(); - void sync_manager::request_next_chunk( const connection_ptr& conn ) { - uint32_t head_block = chain_plug->chain().fork_db_pending_head_block_num(); + fc_dlog( logger, "sync_last_requested_num: ${r}, sync_next_expected_num: ${e}, sync_known_lib_num: ${k}, sync_req_span: ${s}", + ("r", sync_last_requested_num)("e", sync_next_expected_num)("k", sync_known_lib_num)("s", sync_req_span) ); - if (head_block < sync_last_requested_num && source && source->current()) { - fc_ilog(logger, "ignoring request, head is ${h} last req = ${r} source is ${p}", - ("h",head_block)("r",sync_last_requested_num)("p",source->peer_name())); + if( fork_head_block_num < sync_last_requested_num && sync_source && sync_source->current() ) { + fc_ilog( logger, "ignoring request, head is ${h} last req = ${r} source is ${p}", + ("h", fork_head_block_num)( "r", sync_last_requested_num )( "p", sync_source->peer_name() ) ); return; } @@ -1306,27 +1405,27 @@ namespace eosio { */ if (conn && conn->current() ) { - source = conn; - } - else { + sync_source = conn; + } else { + std::shared_lock g( my_impl->connections_mtx ); if( my_impl->connections.size() == 0 ) { - source.reset(); + sync_source.reset(); } else if( my_impl->connections.size() == 1 ) { - if (!source) { - source = *my_impl->connections.begin(); + if (!sync_source) { + sync_source = *my_impl->connections.begin(); } } else { // init to a linear array search auto cptr = my_impl->connections.begin(); auto cend = my_impl->connections.end(); // do we remember the previous source? - if( source ) { + if (sync_source) { //try to find it in the list - cptr = my_impl->connections.find( source ); + cptr = my_impl->connections.find( sync_source ); cend = cptr; if( cptr == my_impl->connections.end() ) { //not there - must have been closed! cend is now connections.end, so just flatten the ring. - source.reset(); + sync_source.reset(); cptr = my_impl->connections.begin(); } else { //was found - advance the start to the next. cend is the old source. @@ -1341,8 +1440,8 @@ namespace eosio { auto cstart_it = cptr; do { //select the first one which is current and break out. - if( (*cptr)->current() ) { - source = *cptr; + if( !(*cptr)->is_transactions_only_connection() && (*cptr)->current() ) { + sync_source = *cptr; break; } if( ++cptr == my_impl->connections.end() ) @@ -1354,11 +1453,11 @@ namespace eosio { } // verify there is an available source - if (!source || !source->current() ) { + if( !sync_source || !sync_source->current() || sync_source->is_transactions_only_connection() ) { fc_elog( logger, "Unable to continue syncing at this time"); - sync_known_lib_num = chain_plug->chain().last_irreversible_block_num(); + sync_known_lib_num = lib_block_num; sync_last_requested_num = 0; - set_state(in_sync); // probably not, but we can't do anything else + set_state( in_sync ); // probably not, but we can't do anything else return; } @@ -1369,68 +1468,95 @@ namespace eosio { if( end > sync_known_lib_num ) end = sync_known_lib_num; if( end > 0 && end >= start ) { - fc_ilog(logger, "requesting range ${s} to ${e}, from ${n}", - ("n",source->peer_name())("s",start)("e",end)); - request_sent = true; - source->request_sync_blocks(start, end); sync_last_requested_num = end; + connection_ptr c = sync_source; + g_sync.unlock(); + request_sent = true; + c->strand.post( [c, start, end]() { + fc_ilog( logger, "requesting range ${s} to ${e}, from ${n}", ("n", c->peer_name())( "s", start )( "e", end ) ); + c->request_sync_blocks( start, end ); + } ); } } if( !request_sent ) { - source->send_handshake(); + connection_ptr c = sync_source; + g_sync.unlock(); + c->send_handshake(); } } - void sync_manager::send_handshakes() - { - for( auto &ci : my_impl->connections) { - if( ci->current()) { + // static, thread safe + void sync_manager::send_handshakes() { + for_each_connection( []( auto& ci ) { + if( ci->current() ) { ci->send_handshake(); } - } + return true; + } ); + } + + bool sync_manager::is_sync_required( uint32_t fork_head_block_num ) { + fc_dlog( logger, "last req = ${req}, last recv = ${recv} known = ${known} our head = ${head}", + ("req", sync_last_requested_num)( "recv", sync_next_expected_num )( "known", sync_known_lib_num ) + ("head", fork_head_block_num ) ); + + return( sync_last_requested_num < sync_known_lib_num || + fork_head_block_num < sync_last_requested_num ); } void sync_manager::start_sync(const connection_ptr& c, uint32_t target) { + std::unique_lock g_sync( sync_mtx ); if( target > sync_known_lib_num) { sync_known_lib_num = target; } - uint32_t bnum = chain_plug->chain().last_irreversible_block_num(); - if (!sync_required() || target <= bnum) { - uint32_t hnum = chain_plug->chain().fork_db_pending_head_block_num(); + uint32_t lib_num = 0; + uint32_t fork_head_block_num = 0; + std::tie( lib_num, std::ignore, fork_head_block_num, + std::ignore, std::ignore, std::ignore ) = my_impl->get_chain_info(); + + if( !is_sync_required( fork_head_block_num ) || target <= lib_num ) { fc_dlog( logger, "We are already caught up, my irr = ${b}, head = ${h}, target = ${t}", - ("b",bnum)("h",hnum)("t",target)); + ("b", lib_num)( "h", fork_head_block_num )( "t", target ) ); return; } - if (state == in_sync) { - set_state(lib_catchup); - sync_next_expected_num = chain_plug->chain().last_irreversible_block_num() + 1; + if( sync_state == in_sync ) { + set_state( lib_catchup ); + sync_next_expected_num = std::max( lib_num + 1, sync_next_expected_num ); } - fc_ilog(logger, "Catching up with chain, our last req is ${cc}, theirs is ${t} peer ${p}", - ( "cc",sync_last_requested_num)("t",target)("p",c->peer_name())); + fc_ilog( logger, "Catching up with chain, our last req is ${cc}, theirs is ${t} peer ${p}", + ("cc", sync_last_requested_num)( "t", target )( "p", c->peer_name() ) ); - request_next_chunk(c); + request_next_chunk( std::move( g_sync ), c ); } - void sync_manager::reassign_fetch(const connection_ptr& c, go_away_reason reason) { - fc_ilog(logger, "reassign_fetch, our last req is ${cc}, next expected is ${ne} peer ${p}", - ( "cc",sync_last_requested_num)("ne",sync_next_expected_num)("p",c->peer_name())); + // called from connection strand + void sync_manager::sync_reassign_fetch(const connection_ptr& c, go_away_reason reason) { + std::unique_lock g( sync_mtx ); + fc_ilog( logger, "reassign_fetch, our last req is ${cc}, next expected is ${ne} peer ${p}", + ("cc", sync_last_requested_num)( "ne", sync_next_expected_num )( "p", c->peer_name() ) ); - if (c == source) { + if( c == sync_source ) { c->cancel_sync(reason); sync_last_requested_num = 0; - request_next_chunk(); + request_next_chunk( std::move(g) ); } } - void sync_manager::recv_handshake(const connection_ptr& c, const handshake_message& msg) { - controller& cc = chain_plug->chain(); - uint32_t lib_num = cc.last_irreversible_block_num(); + void sync_manager::recv_handshake( const connection_ptr& c, const handshake_message& msg ) { + + if( c->is_transactions_only_connection() ) return; + + uint32_t lib_num = 0; uint32_t peer_lib = msg.last_irreversible_block_num; - reset_lib_num(c); - c->syncing = false; + uint32_t head = 0; + block_id_type head_id; + std::tie( lib_num, std::ignore, head, + std::ignore, std::ignore, head_id ) = my_impl->get_chain_info(); + + sync_reset_lib_num(c); //-------------------------------- // sync need checks; (lib == last irreversible block) @@ -1446,17 +1572,15 @@ namespace eosio { // //----------------------------- - uint32_t head = cc.fork_db_pending_head_block_num(); - block_id_type head_id = cc.fork_db_pending_head_block_id(); if (head_id == msg.head_id) { fc_ilog( logger, "handshake from ${ep}, lib ${lib}, head ${head}, head id ${id}.. sync 0", ("ep", c->peer_name())("lib", msg.last_irreversible_block_num)("head", msg.head_num) ("id", msg.head_id.str().substr(8,16)) ); - // notify peer of our pending transactions + c->syncing = false; notice_message note; note.known_blocks.mode = none; note.known_trx.mode = catch_up; - note.known_trx.pending = my_impl->local_txns.size(); + note.known_trx.pending = 0; c->enqueue( note ); return; } @@ -1464,9 +1588,10 @@ namespace eosio { fc_ilog( logger, "handshake from ${ep}, lib ${lib}, head ${head}, head id ${id}.. sync 1", ("ep", c->peer_name())("lib", msg.last_irreversible_block_num)("head", msg.head_num) ("id", msg.head_id.str().substr(8,16)) ); + c->syncing = false; // wait for receipt of a notice message before initiating sync if (c->protocol_version < proto_explicit_sync) { - start_sync( c, peer_lib); + start_sync( c, peer_lib ); } return; } @@ -1490,10 +1615,10 @@ namespace eosio { fc_ilog( logger, "handshake from ${ep}, lib ${lib}, head ${head}, head id ${id}.. sync 3", ("ep", c->peer_name())("lib", msg.last_irreversible_block_num)("head", msg.head_num) ("id", msg.head_id.str().substr(8,16)) ); + c->syncing = false; verify_catchup(c, msg.head_num, msg.head_id); return; - } - else { + } else { fc_ilog( logger, "handshake from ${ep}, lib ${lib}, head ${head}, head id ${id}.. sync 4", ("ep", c->peer_name())("lib", msg.last_irreversible_block_num)("head", msg.head_num) ("id", msg.head_id.str().substr(8,16)) ); @@ -1506,16 +1631,22 @@ namespace eosio { c->enqueue( note ); } c->syncing = false; - bool on_fork = true; - try { - on_fork = cc.get_block_id_for_num( msg.head_num ) != msg.head_id; - } catch( ... ) {} - if( on_fork ) { - request_message req; - req.req_blocks.mode = catch_up; - req.req_trx.mode = none; - c->enqueue( req ); - } + app().post( priority::medium, [chain_plug = my_impl->chain_plug, c, + msg_head_num = msg.head_num, msg_head_id = msg.head_id]() { + bool on_fork = true; + try { + controller& cc = chain_plug->chain(); + on_fork = cc.get_block_id_for_num( msg_head_num ) != msg_head_id; + } catch( ... ) {} + if( on_fork ) { + c->strand.post( [c]() { + request_message req; + req.req_blocks.mode = catch_up; + req.req_trx.mode = none; + c->enqueue( req ); + } ); + } + } ); return; } } @@ -1523,32 +1654,42 @@ namespace eosio { bool sync_manager::verify_catchup(const connection_ptr& c, uint32_t num, const block_id_type& id) { request_message req; req.req_blocks.mode = catch_up; - for (const auto& cc : my_impl->connections) { - if (cc->fork_head == id || - cc->fork_head_num > num) { + for_each_block_connection( [num, &id, &req]( const auto& cc ) { + std::lock_guard g_conn( cc->conn_mtx ); + if( cc->fork_head_num > num || cc->fork_head == id ) { req.req_blocks.mode = none; - break; + return false; } - } + return true; + } ); if( req.req_blocks.mode == catch_up ) { - const controller& cc = chain_plug->chain(); - const auto lib = cc.last_irreversible_block_num(); - fc_ilog( logger, "catch_up while in ${s}, fork head num = ${fhn} " - "target LIB = ${lib} next_expected = ${ne}, id ${id}..., peer ${p}", - ("s", stage_str( state ))("fhn", num)("lib", sync_known_lib_num) - ("ne", sync_next_expected_num)("id", id.str().substr(8,16))("p", c->peer_name()) ); - if (state == lib_catchup || num < lib ) + { + std::lock_guard g( sync_mtx ); + fc_ilog( logger, "catch_up while in ${s}, fork head num = ${fhn} " + "target LIB = ${lib} next_expected = ${ne}, id ${id}..., peer ${p}", + ("s", stage_str( sync_state ))("fhn", num)("lib", sync_known_lib_num) + ("ne", sync_next_expected_num)("id", id.str().substr( 8, 16 ))("p", c->peer_name()) ); + } + uint32_t lib; + block_id_type head_id; + std::tie( lib, std::ignore, std::ignore, + std::ignore, std::ignore, head_id ) = my_impl->get_chain_info(); + if( sync_state == lib_catchup || num < lib ) return false; - set_state(head_catchup); - c->fork_head = id; - c->fork_head_num = num; - block_id_type head_id = cc.fork_db_pending_head_block_id(); + set_state( head_catchup ); + { + std::lock_guard g_conn( c->conn_mtx ); + c->fork_head = id; + c->fork_head_num = num; + } + req.req_blocks.ids.emplace_back( head_id ); - } - else { + } else { fc_ilog( logger, "none notice while in ${s}, fork head num = ${fhn}, id ${id}..., peer ${p}", - ("s", stage_str( state ))("fhn", num) + ("s", stage_str( sync_state ))("fhn", num) ("id", id.str().substr(8,16))("p", c->peer_name()) ); + std::lock_guard g_conn( c->conn_mtx ); + c->fork_head = block_id_type(); c->fork_head_num = 0; } req.req_trx.mode = none; @@ -1556,14 +1697,10 @@ namespace eosio { return true; } - void sync_manager::recv_notice(const connection_ptr& c, const notice_message& msg) { - fc_ilog(logger, "sync_manager got ${m} block notice",("m",modes_str(msg.known_blocks.mode))); - if( msg.known_blocks.ids.size() > 1 ) { - fc_elog( logger, "Invalid notice_message, known_blocks.ids.size ${s}, closing connection: ${p}", - ("s", msg.known_blocks.ids.size())("p", c->peer_name()) ); - my_impl->close(c); - return; - } + void sync_manager::sync_recv_notice( const connection_ptr& c, const notice_message& msg) { + fc_dlog( logger, "sync_manager got ${m} block notice", ("m", modes_str( msg.known_blocks.mode )) ); + EOS_ASSERT( msg.known_blocks.mode == catch_up || msg.known_blocks.mode == last_irr_catch_up, plugin_exception, + "sync_recv_notice only called on catch_up" ); if (msg.known_blocks.mode == catch_up) { if (msg.known_blocks.ids.size() == 0) { fc_elog( logger,"got a catch up with ids size = 0" ); @@ -1571,85 +1708,107 @@ namespace eosio { const block_id_type& id = msg.known_blocks.ids.back(); fc_ilog( logger, "notice_message, pending ${p}, blk_num ${n}, id ${id}...", ("p", msg.known_blocks.pending)("n", block_header::num_from_id(id))("id",id.str().substr(8,16)) ); - controller& cc = chain_plug->chain(); - if( !cc.fetch_block_by_id( id ) ) { + if( !my_impl->dispatcher->have_block( id ) ) { verify_catchup( c, msg.known_blocks.pending, id ); } else { // we already have the block, so update peer with our view of the world c->send_handshake(); } } - } - else { - c->last_handshake_recv.last_irreversible_block_num = msg.known_trx.pending; - reset_lib_num(c); + } else if (msg.known_blocks.mode == last_irr_catch_up) { + { + std::lock_guard g_conn( c->conn_mtx ); + c->last_handshake_recv.last_irreversible_block_num = msg.known_trx.pending; + } + sync_reset_lib_num(c); start_sync(c, msg.known_trx.pending); } } - void sync_manager::rejected_block(const connection_ptr& c, uint32_t blk_num) { - if (state != in_sync ) { - fc_wlog( logger, "block ${bn} not accepted from ${p}, closing connection", ("bn",blk_num)("p",c->peer_name()) ); + // called from connection strand + void sync_manager::rejected_block( const connection_ptr& c, uint32_t blk_num ) { + std::unique_lock g( sync_mtx ); + if( ++c->consecutive_rejected_blocks > def_max_consecutive_rejected_blocks ) { + fc_wlog( logger, "block ${bn} not accepted from ${p}, closing connection", ("bn", blk_num)("p", c->peer_name()) ); sync_last_requested_num = 0; - source.reset(); - my_impl->close(c); - set_state(in_sync); - send_handshakes(); + sync_source.reset(); + g.unlock(); + c->close(); } else { c->send_handshake(); } } - void sync_manager::sync_update_expected(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num, bool blk_applied) { + // called from connection strand + void sync_manager::sync_update_expected( const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num, bool blk_applied ) { + std::unique_lock g_sync( sync_mtx ); if( blk_num <= sync_last_requested_num ) { fc_dlog( logger, "sync_last_requested_num: ${r}, sync_next_expected_num: ${e}, sync_known_lib_num: ${k}, sync_req_span: ${s}", ("r", sync_last_requested_num)("e", sync_next_expected_num)("k", sync_known_lib_num)("s", sync_req_span) ); if (blk_num != sync_next_expected_num && !blk_applied) { + auto sync_next_expected = sync_next_expected_num; + g_sync.unlock(); fc_dlog( logger, "expected block ${ne} but got ${bn}, from connection: ${p}", - ("ne", sync_next_expected_num)( "bn", blk_num )( "p", c->peer_name() ) ); + ("ne", sync_next_expected)( "bn", blk_num )( "p", c->peer_name() ) ); return; } sync_next_expected_num = blk_num + 1; } } - void sync_manager::recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num, bool blk_applied) { - fc_dlog(logger, "got block ${bn} from ${p}",("bn",blk_num)("p",c->peer_name())); - sync_update_expected(c, blk_id, blk_num, blk_applied); - if (state == head_catchup) { - fc_dlog(logger, "sync_manager in head_catchup state"); - set_state(in_sync); - source.reset(); + // called from connection strand + void sync_manager::sync_recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num, bool blk_applied) { + fc_dlog( logger, "got block ${bn} from ${p}", ("bn", blk_num)( "p", c->peer_name() ) ); + if( app().is_quiting() ) { + c->close( false, true ); + return; + } + c->consecutive_rejected_blocks = 0; + sync_update_expected( c, blk_id, blk_num, blk_applied ); + std::unique_lock g_sync( sync_mtx ); + stages state = sync_state; + fc_dlog( logger, "state ${s}", ("s", stage_str( state )) ); + if( state == head_catchup ) { + fc_dlog( logger, "sync_manager in head_catchup state" ); + sync_source.reset(); + g_sync.unlock(); block_id_type null_id; - for (const auto& cp : my_impl->connections) { - if (cp->fork_head == null_id) { - continue; - } - if (cp->fork_head == blk_id || cp->fork_head_num < blk_num) { + bool set_state_to_head_catchup = false; + for_each_block_connection( [&null_id, blk_num, &blk_id, &c, &set_state_to_head_catchup]( const auto& cp ) { + std::unique_lock g_cp_conn( cp->conn_mtx ); + uint32_t fork_head_num = cp->fork_head_num; + block_id_type fork_head_id = cp->fork_head; + g_cp_conn.unlock(); + if( fork_head_id == null_id ) { + // continue + } else if( fork_head_num < blk_num || fork_head_id == blk_id ) { + std::lock_guard g_conn( c->conn_mtx ); c->fork_head = null_id; c->fork_head_num = 0; + } else { + set_state_to_head_catchup = true; } - else { - set_state(head_catchup); - } - } + return true; + } ); - if (state == in_sync) { + if( set_state_to_head_catchup ) { + set_state( head_catchup ); + } else { + set_state( in_sync ); send_handshakes(); } - } - else if (state == lib_catchup) { + } else if( state == lib_catchup ) { if( blk_num == sync_known_lib_num ) { - fc_dlog( logger, "All caught up with last known last irreversible block resending handshake"); - set_state(in_sync); + fc_dlog( logger, "All caught up with last known last irreversible block resending handshake" ); + set_state( in_sync ); + g_sync.unlock(); send_handshakes(); - } - else if (blk_num == sync_last_requested_num) { - request_next_chunk(); - } - else { - fc_dlog(logger,"calling sync_wait on connection ${p}",("p",c->peer_name())); + } else if( blk_num == sync_last_requested_num ) { + request_next_chunk( std::move( g_sync) ); + } else { + g_sync.unlock(); + fc_dlog( logger, "calling sync_wait on connection ${p}", ("p", c->peer_name()) ); c->sync_wait(); } } @@ -1657,372 +1816,464 @@ namespace eosio { //------------------------------------------------------------------------ - void dispatch_manager::bcast_block(const block_state_ptr& bs) { - std::set skips; - auto range = received_blocks.equal_range(bs->id); - for (auto org = range.first; org != range.second; ++org) { - skips.insert(org->second); + // thread safe + bool dispatch_manager::add_peer_block( const block_id_type& blkid, uint32_t connection_id) { + std::lock_guard g( blk_state_mtx ); + auto bptr = blk_state.get().find( std::make_tuple( connection_id, std::ref( blkid ))); + bool added = (bptr == blk_state.end()); + if( added ) { + blk_state.insert( {blkid, block_header::num_from_id( blkid ), connection_id, true} ); + } else if( !bptr->have_block ) { + blk_state.modify( bptr, []( auto& pb ) { + pb.have_block = true; + }); } - received_blocks.erase(range.first, range.second); - - uint32_t bnum = bs->block_num; - peer_block_state pbstate{bs->id, bnum}; + return added; + } - std::shared_ptr> send_buffer; - for( auto& cp : my_impl->connections ) { - if( skips.find( cp ) != skips.end() || !cp->current() ) { - continue; - } - bool has_block = cp->last_handshake_recv.last_irreversible_block_num >= bnum; - if( !has_block ) { - if( !cp->add_peer_block( pbstate ) ) { - fc_dlog( logger, "not bcast block ${b} to ${p}", ("b", bnum)("p", cp->peer_name()) ); - continue; - } - if( !send_buffer ) { - send_buffer = create_send_buffer( bs->block ); - } - fc_dlog(logger, "bcast block ${b} to ${p}", ("b", bnum)("p", cp->peer_name())); - cp->enqueue_buffer( send_buffer, true, priority::high, no_reason ); - } + bool dispatch_manager::add_peer_block_id( const block_id_type& blkid, uint32_t connection_id) { + std::lock_guard g( blk_state_mtx ); + auto bptr = blk_state.get().find( std::make_tuple( connection_id, std::ref( blkid ))); + bool added = (bptr == blk_state.end()); + if( added ) { + blk_state.insert( {blkid, block_header::num_from_id( blkid ), connection_id, false} ); } + return added; + } + bool dispatch_manager::peer_has_block( const block_id_type& blkid, uint32_t connection_id ) const { + std::lock_guard g(blk_state_mtx); + const auto blk_itr = blk_state.get().find( std::make_tuple( connection_id, std::ref( blkid ))); + return blk_itr != blk_state.end(); } - void dispatch_manager::recv_block(const connection_ptr& c, const block_id_type& id, uint32_t bnum) { - received_blocks.insert(std::make_pair(id, c)); - if (c && - c->last_req && - c->last_req->req_blocks.mode != none && - !c->last_req->req_blocks.ids.empty() && - c->last_req->req_blocks.ids.back() == id) { - c->last_req.reset(); + bool dispatch_manager::have_block( const block_id_type& blkid ) const { + std::lock_guard g(blk_state_mtx); + // by_block_id sorts have_block by greater so have_block == true will be the first one found + const auto& index = blk_state.get(); + auto blk_itr = index.find( blkid ); + if( blk_itr != index.end() ) { + return blk_itr->have_block; } + return false; + } - fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); - c->cancel_wait(); + size_t dispatch_manager::num_entries( uint32_t connection_id ) const { + std::lock_guard g(blk_state_mtx); + return blk_state.get().count( connection_id ); } - void dispatch_manager::rejected_block(const block_id_type& id) { - fc_dlog( logger, "rejected block ${id}", ("id", id) ); - auto range = received_blocks.equal_range(id); - received_blocks.erase(range.first, range.second); + bool dispatch_manager::add_peer_txn( const node_transaction_state& nts ) { + std::lock_guard g( local_txns_mtx ); + auto tptr = local_txns.get().find( std::make_tuple( std::ref( nts.id ), nts.connection_id ) ); + bool added = (tptr == local_txns.end()); + if( added ) { + local_txns.insert( nts ); + } + return added; } - void dispatch_manager::expire_blocks( uint32_t lib_num ) { - for( auto i = received_blocks.begin(); i != received_blocks.end(); ) { - const block_id_type& blk_id = i->first; - uint32_t blk_num = block_header::num_from_id( blk_id ); - if( blk_num <= lib_num ) { - i = received_blocks.erase( i ); - } else { - ++i; + // thread safe + void dispatch_manager::update_txns_block_num( const signed_block_ptr& sb ) { + update_block_num ubn( sb->block_num() ); + std::lock_guard g( local_txns_mtx ); + for( const auto& recpt : sb->transactions ) { + const transaction_id_type& id = (recpt.trx.which() == 0) ? recpt.trx.get() + : recpt.trx.get().id(); + auto range = local_txns.get().equal_range( id ); + for( auto itr = range.first; itr != range.second; ++itr ) { + local_txns.modify( itr, ubn ); } } } - void dispatch_manager::bcast_transaction(const transaction_metadata_ptr& ptrx) { - std::set skips; - const auto& id = ptrx->id; - - auto range = received_transactions.equal_range(id); - for (auto org = range.first; org != range.second; ++org) { - skips.insert(org->second); + // thread safe + void dispatch_manager::update_txns_block_num( const transaction_id_type& id, uint32_t blk_num ) { + update_block_num ubn( blk_num ); + std::lock_guard g( local_txns_mtx ); + auto range = local_txns.get().equal_range( id ); + for( auto itr = range.first; itr != range.second; ++itr ) { + local_txns.modify( itr, ubn ); } - received_transactions.erase(range.first, range.second); + } - if( my_impl->local_txns.get().find( id ) != my_impl->local_txns.end() ) { //found - fc_dlog(logger, "found trxid in local_trxs" ); - return; - } + bool dispatch_manager::peer_has_txn( const transaction_id_type& tid, uint32_t connection_id ) const { + std::lock_guard g( local_txns_mtx ); + const auto tptr = local_txns.get().find( std::make_tuple( std::ref( tid ), connection_id ) ); + return tptr != local_txns.end(); + } - time_point_sec trx_expiration = ptrx->packed_trx->expiration(); - const packed_transaction& trx = *ptrx->packed_trx; + bool dispatch_manager::have_txn( const transaction_id_type& tid ) const { + std::lock_guard g( local_txns_mtx ); + const auto tptr = local_txns.get().find( tid ); + return tptr != local_txns.end(); + } + + void dispatch_manager::expire_txns( uint32_t lib_num ) { + size_t start_size = 0, end_size = 0; - auto buff = create_send_buffer( trx ); + std::unique_lock g( local_txns_mtx ); + start_size = local_txns.size(); + auto& old = local_txns.get(); + auto ex_lo = old.lower_bound( fc::time_point_sec( 0 ) ); + auto ex_up = old.upper_bound( time_point::now() ); + old.erase( ex_lo, ex_up ); + g.unlock(); // allow other threads opportunity to use local_txns - node_transaction_state nts = {id, trx_expiration, 0, buff}; - my_impl->local_txns.insert(std::move(nts)); + g.lock(); + auto& stale = local_txns.get(); + stale.erase( stale.lower_bound( 1 ), stale.upper_bound( lib_num ) ); + end_size = local_txns.size(); + g.unlock(); - my_impl->send_transaction_to_all( buff, [&id, &skips, trx_expiration](const connection_ptr& c) -> bool { - if( skips.find(c) != skips.end() || c->syncing ) { - return false; - } - const auto& bs = c->trx_state.find(id); - bool unknown = bs == c->trx_state.end(); - if( unknown ) { - c->trx_state.insert(transaction_state({id,0,trx_expiration})); - fc_dlog(logger, "sending trx to ${n}", ("n",c->peer_name() ) ); - } - return unknown; - }); + fc_dlog( logger, "expire_local_txns size ${s} removed ${r}", ("s", start_size)( "r", start_size - end_size ) ); + } + + void dispatch_manager::expire_blocks( uint32_t lib_num ) { + std::lock_guard g(blk_state_mtx); + auto& stale_blk = blk_state.get(); + stale_blk.erase( stale_blk.lower_bound(1), stale_blk.upper_bound(lib_num) ); + } + + // thread safe + void dispatch_manager::bcast_block(const block_state_ptr& bs) { + fc_dlog( logger, "bcast block ${b}", ("b", bs->block_num) ); + + bool have_connection = false; + for_each_block_connection( [&have_connection]( auto& cp ) { + peer_dlog( cp, "socket_is_open ${s}, connecting ${c}, syncing ${ss}", + ("s", cp->socket_is_open())("c", cp->connecting.load())("ss", cp->syncing.load()) ); + + if( !cp->current() ) { + return true; + } + have_connection = true; + return false; + } ); + + if( !have_connection ) return; + std::shared_ptr> send_buffer = create_send_buffer( bs->block ); + + for_each_block_connection( [this, bs, send_buffer]( auto& cp ) { + if( !cp->current() ) { + return true; + } + cp->strand.post( [this, cp, bs, send_buffer]() { + uint32_t bnum = bs->block_num; + std::unique_lock g_conn( cp->conn_mtx ); + bool has_block = cp->last_handshake_recv.last_irreversible_block_num >= bnum; + g_conn.unlock(); + if( !has_block ) { + if( !add_peer_block( bs->id, cp->connection_id ) ) { + fc_dlog( logger, "not bcast block ${b} to ${p}", ("b", bnum)("p", cp->peer_name()) ); + return; + } + fc_dlog( logger, "bcast block ${b} to ${p}", ("b", bnum)("p", cp->peer_name()) ); + cp->enqueue_buffer( send_buffer, no_reason ); + } + }); + return true; + } ); + } + + void dispatch_manager::bcast_notice( const block_id_type& id ) { + if( my_impl->sync_master->syncing_with_peer() ) return; + fc_dlog( logger, "bcast notice ${b}", ("b", block_header::num_from_id( id )) ); + notice_message note; + note.known_blocks.mode = normal; + note.known_blocks.pending = 1; // 1 indicates this is a block id notice + note.known_blocks.ids.emplace_back( id ); + + for_each_block_connection( [this, note]( auto& cp ) { + if( !cp->current() ) { + return true; + } + cp->strand.post( [this, cp, note]() { + // check protocol_version here since only accessed from strand + if( cp->protocol_version < block_id_notify ) return; + const block_id_type& id = note.known_blocks.ids.back(); + if( peer_has_block( id, cp->connection_id ) ) { + return; + } + fc_dlog( logger, "bcast block id ${b} to ${p}", ("b", block_header::num_from_id( id ))("p", cp->peer_name()) ); + cp->enqueue( note ); + } ); + return true; + } ); } - void dispatch_manager::recv_transaction(const connection_ptr& c, const transaction_id_type& id) { - received_transactions.insert(std::make_pair(id, c)); + // called from connection strand + void dispatch_manager::recv_block(const connection_ptr& c, const block_id_type& id, uint32_t bnum) { + std::unique_lock g( c->conn_mtx ); if (c && c->last_req && - c->last_req->req_trx.mode != none && - !c->last_req->req_trx.ids.empty() && - c->last_req->req_trx.ids.back() == id) { + c->last_req->req_blocks.mode != none && + !c->last_req->req_blocks.ids.empty() && + c->last_req->req_blocks.ids.back() == id) { + fc_dlog( logger, "reseting last_req" ); c->last_req.reset(); } + g.unlock(); fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); c->cancel_wait(); } - void dispatch_manager::rejected_transaction(const transaction_id_type& id) { - fc_dlog(logger,"not sending rejected transaction ${tid}",("tid",id)); - auto range = received_transactions.equal_range(id); - received_transactions.erase(range.first, range.second); + void dispatch_manager::rejected_block(const block_id_type& id) { + fc_dlog( logger, "rejected block ${id}", ("id", id) ); + } + + void dispatch_manager::bcast_transaction(const packed_transaction& trx) { + const auto& id = trx.id(); + time_point_sec trx_expiration = trx.expiration(); + node_transaction_state nts = {id, trx_expiration, 0, 0}; + + std::shared_ptr> send_buffer; + for_each_connection( [this, &trx, &nts, &send_buffer]( auto& cp ) { + if( cp->is_blocks_only_connection() || !cp->current() ) { + return true; + } + nts.connection_id = cp->connection_id; + if( !add_peer_txn(nts) ) { + return true; + } + if( !send_buffer ) { + send_buffer = create_send_buffer( trx ); + } + + cp->strand.post( [cp, send_buffer]() { + fc_dlog( logger, "sending trx to ${n}", ("n", cp->peer_name()) ); + cp->enqueue_buffer( send_buffer, no_reason ); + } ); + return true; + } ); + } + + void dispatch_manager::rejected_transaction(const packed_transaction_ptr& trx, uint32_t head_blk_num) { + fc_dlog( logger, "not sending rejected transaction ${tid}", ("tid", trx->id()) ); + // keep rejected transaction around for awhile so we don't broadcast it + // update its block number so it will be purged when current block number is lib + if( trx->expiration() > fc::time_point::now() ) { // no need to update blk_num if already expired + update_txns_block_num( trx->id(), head_blk_num ); + } } + // called from connection strand void dispatch_manager::recv_notice(const connection_ptr& c, const notice_message& msg, bool generated) { - request_message req; - req.req_trx.mode = none; - req.req_blocks.mode = none; - bool send_req = false; if (msg.known_trx.mode == normal) { - req.req_trx.mode = normal; - req.req_trx.pending = 0; - send_req = false; - } - else if (msg.known_trx.mode != none) { - fc_elog( logger,"passed a notice_message with something other than a normal on none known_trx" ); + } else if (msg.known_trx.mode != none) { + fc_elog( logger, "passed a notice_message with something other than a normal on none known_trx" ); return; } if (msg.known_blocks.mode == normal) { - req.req_blocks.mode = normal; - controller& cc = my_impl->chain_plug->chain(); // known_blocks.ids is never > 1 if( !msg.known_blocks.ids.empty() ) { + if( num_entries( c->connection_id ) > def_max_peer_block_ids_per_connection ) { + fc_elog( logger, "received too many notice_messages, disconnecting" ); + c->close( false ); + } const block_id_type& blkid = msg.known_blocks.ids.back(); - signed_block_ptr b; - try { - b = cc.fetch_block_by_id(blkid); // if exists - if(b) { - c->add_peer_block({blkid, block_header::num_from_id(blkid)}); - } - } catch (const assert_exception &ex) { - fc_ilog( logger, "caught assert on fetch_block_by_id, ${ex}",("ex",ex.what()) ); - // keep going, client can ask another peer - } catch (...) { - fc_elog( logger, "failed to retrieve block for id"); + if( have_block( blkid )) { + add_peer_block( blkid, c->connection_id ); + return; + } else { + add_peer_block_id( blkid, c->connection_id ); } - if (!b) { - send_req = true; - req.req_blocks.ids.push_back( blkid ); + if( msg.known_blocks.pending == 1 ) { // block id notify + return; } } - } - else if (msg.known_blocks.mode != none) { + } else if (msg.known_blocks.mode != none) { fc_elog( logger, "passed a notice_message with something other than a normal on none known_blocks" ); return; } - fc_dlog( logger, "send req = ${sr}", ("sr",send_req)); - if( send_req) { - c->enqueue(req); - c->fetch_wait(); - c->last_req = std::move(req); - } } void dispatch_manager::retry_fetch(const connection_ptr& c) { - if (!c->last_req) { - return; - } - fc_wlog( logger, "failed to fetch from ${p}",("p",c->peer_name())); - transaction_id_type tid; + fc_dlog( logger, "retry fetch" ); + request_message last_req; block_id_type bid; - bool is_txn = false; - if( c->last_req->req_trx.mode == normal && !c->last_req->req_trx.ids.empty() ) { - is_txn = true; - tid = c->last_req->req_trx.ids.back(); - } - else if( c->last_req->req_blocks.mode == normal && !c->last_req->req_blocks.ids.empty() ) { - bid = c->last_req->req_blocks.ids.back(); - } - else { - fc_wlog( logger,"no retry, block mpde = ${b} trx mode = ${t}", - ("b",modes_str(c->last_req->req_blocks.mode))("t",modes_str(c->last_req->req_trx.mode))); - return; - } - for (auto& conn : my_impl->connections) { - if (conn == c || conn->last_req) { - continue; - } - bool sendit = false; - if (is_txn) { - auto trx = conn->trx_state.get().find(tid); - sendit = trx != conn->trx_state.end(); - } - else { - sendit = conn->peer_has_block(bid); + { + std::lock_guard g_c_conn( c->conn_mtx ); + if( !c->last_req ) { + return; } - if (sendit) { - conn->enqueue(*c->last_req); - conn->fetch_wait(); - conn->last_req = c->last_req; + fc_wlog( logger, "failed to fetch from ${p}", ("p", c->peer_address()) ); + if( c->last_req->req_blocks.mode == normal && !c->last_req->req_blocks.ids.empty() ) { + bid = c->last_req->req_blocks.ids.back(); + } else { + fc_wlog( logger, "no retry, block mpde = ${b} trx mode = ${t}", + ("b", modes_str( c->last_req->req_blocks.mode ))( "t", modes_str( c->last_req->req_trx.mode ) ) ); return; } + last_req = *c->last_req; } + for_each_block_connection( [this, &c, &last_req, &bid]( auto& conn ) { + if( conn == c ) + return true; + { + std::lock_guard guard( conn->conn_mtx ); + if( conn->last_req ) { + return true; + } + } + + bool sendit = peer_has_block( bid, conn->connection_id ); + if( sendit ) { + conn->strand.post( [conn, last_req{std::move(last_req)}]() { + conn->enqueue( last_req ); + conn->fetch_wait(); + std::lock_guard g_conn_conn( conn->conn_mtx ); + conn->last_req = last_req; + } ); + return false; + } + return true; + } ); // at this point no other peer has it, re-request or do nothing? + fc_wlog( logger, "no peer has last_req" ); if( c->connected() ) { - c->enqueue(*c->last_req); + c->enqueue( last_req ); c->fetch_wait(); } } //------------------------------------------------------------------------ - void net_plugin_impl::connect(const connection_ptr& c) { - if( c->no_retry != go_away_reason::no_reason) { - fc_dlog( logger, "Skipping connect due to go_away reason ${r}",("r", reason_str( c->no_retry ))); - return; + // called from any thread + bool connection::resolve_and_connect() { + switch ( no_retry ) { + case no_reason: + case wrong_version: + case benign_other: + break; + default: + fc_dlog( logger, "Skipping connect due to go_away reason ${r}",("r", reason_str( no_retry ))); + return false; } - auto colon = c->peer_addr.find(':'); - + string::size_type colon = peer_address().find(':'); if (colon == std::string::npos || colon == 0) { - fc_elog( logger, "Invalid peer address. must be \"host:port\": ${p}", ("p",c->peer_addr) ); - for ( auto itr : connections ) { - if((*itr).peer_addr == c->peer_addr) { - (*itr).reset(); - close(itr); - connections.erase(itr); - break; - } + fc_elog( logger, "Invalid peer address. must be \"host:port[:|]\": ${p}", ("p", peer_address()) ); + return false; + } + + connection_ptr c = shared_from_this(); + + if( consecutive_immediate_connection_close > def_max_consecutive_immediate_connection_close || no_retry == benign_other ) { + auto connector_period_us = std::chrono::duration_cast( my_impl->connector_period ); + std::lock_guard g( c->conn_mtx ); + if( last_close == fc::time_point() || last_close > fc::time_point::now() - fc::microseconds( connector_period_us.count() ) ) { + return true; // true so doesn't remove from valid connections } - return; } - shared_ptr resolver = std::make_shared( my_impl->thread_pool->get_executor() ); - c->strand.post( [this, c, resolver{std::move(resolver)}](){ - auto colon = c->peer_addr.find(':'); - auto host = c->peer_addr.substr( 0, colon ); - auto port = c->peer_addr.substr( colon + 1); + strand.post([c]() { + string::size_type colon = c->peer_address().find(':'); + string::size_type colon2 = c->peer_address().find(':', colon + 1); + string host = c->peer_address().substr( 0, colon ); + string port = c->peer_address().substr( colon + 1, colon2 == string::npos ? string::npos : colon2 - (colon + 1)); idump((host)(port)); + c->set_connection_type( c->peer_address() ); + tcp::resolver::query query( tcp::v4(), host, port ); // Note: need to add support for IPv6 too - tcp::resolver::query query( tcp::v4(), host.c_str(), port.c_str() ); + + auto resolver = std::make_shared( my_impl->thread_pool->get_executor() ); connection_wptr weak_conn = c; resolver->async_resolve( query, boost::asio::bind_executor( c->strand, - [weak_conn, resolver, this]( const boost::system::error_code& err, tcp::resolver::results_type endpoints ) { - app().post( priority::low, [err, resolver, endpoints, weak_conn, this]() { - auto c = weak_conn.lock(); - if( !c ) return; - if( !err ) { - connect( c, resolver, endpoints ); - } else { - fc_elog( logger, "Unable to resolve ${peer_addr}: ${error}", - ("peer_addr", c->peer_name())( "error", err.message()) ); - } - } ); + [resolver, weak_conn]( const boost::system::error_code& err, tcp::resolver::results_type endpoints ) { + auto c = weak_conn.lock(); + if( !c ) return; + if( !err ) { + c->connect( resolver, endpoints ); + } else { + fc_elog( logger, "Unable to resolve ${add}: ${error}", ("add", c->peer_name())( "error", err.message() ) ); + ++c->consecutive_immediate_connection_close; + } } ) ); } ); + return true; } - void net_plugin_impl::connect( const connection_ptr& c, const std::shared_ptr& resolver, tcp::resolver::results_type endpoints ) { - if( c->no_retry != go_away_reason::no_reason) { - string rsn = reason_str(c->no_retry); - return; + // called from connection strand + void connection::connect( const std::shared_ptr& resolver, tcp::resolver::results_type endpoints ) { + switch ( no_retry ) { + case no_reason: + case wrong_version: + case benign_other: + break; + default: + return; } - c->connecting = true; - c->pending_message_buffer.reset(); - c->buffer_queue.clear_out_queue(); - connection_wptr weak_conn = c; - boost::asio::async_connect( *c->socket, endpoints, - boost::asio::bind_executor( c->strand, - [weak_conn, resolver, socket=c->socket, this]( const boost::system::error_code& err, const tcp::endpoint& endpoint ) { - app().post( priority::low, [weak_conn, this, err]() { - auto c = weak_conn.lock(); - if( !c ) return; - if( !err && c->socket->is_open()) { - if( start_session( c )) { + connecting = true; + pending_message_buffer.reset(); + buffer_queue.clear_out_queue(); + boost::asio::async_connect( *socket, endpoints, + boost::asio::bind_executor( strand, + [resolver, c = shared_from_this(), socket=socket]( const boost::system::error_code& err, const tcp::endpoint& endpoint ) { + if( !err && socket->is_open() && socket == c->socket ) { + if( c->start_session() ) { c->send_handshake(); } } else { - elog( "connection failed to ${peer}: ${error}", ("peer", c->peer_name())( "error", err.message()) ); - c->connecting = false; - my_impl->close( c ); + fc_elog( logger, "connection failed to ${peer}: ${error}", ("peer", c->peer_name())( "error", err.message())); + c->close( false ); } - } ); } ) ); } - bool net_plugin_impl::start_session(const connection_ptr& con) { - boost::asio::ip::tcp::no_delay nodelay( true ); - boost::system::error_code ec; - con->socket->set_option( nodelay, ec ); - if (ec) { - fc_elog( logger, "connection failed to ${peer}: ${error}", ( "peer", con->peer_name())("error",ec.message()) ); - con->connecting = false; - close(con); - return false; - } - else { - start_read_message( con ); - ++started_sessions; - return true; - // for now, we can just use the application main loop. - // con->readloop_complete = bf::async( [=](){ read_loop( con ); } ); - // con->writeloop_complete = bf::async( [=](){ write_loop con ); } ); - } - } - - void net_plugin_impl::start_listen_loop() { - auto socket = std::make_shared( my_impl->thread_pool->get_executor() ); - acceptor->async_accept( *socket, [socket, this]( boost::system::error_code ec ) { - app().post( priority::low, [socket, this, ec]() { + connection_ptr new_connection = std::make_shared(); + new_connection->connecting = true; + new_connection->strand.post( [this, new_connection = std::move( new_connection )](){ + acceptor->async_accept( *new_connection->socket, + boost::asio::bind_executor( new_connection->strand, [new_connection, socket=new_connection->socket, this]( boost::system::error_code ec ) { if( !ec ) { uint32_t visitors = 0; uint32_t from_addr = 0; boost::system::error_code rec; - auto paddr = socket->remote_endpoint(rec).address(); - if (rec) { - fc_elog(logger,"Error getting remote endpoint: ${m}",("m", rec.message())); - } - else { - for (auto &conn : connections) { - if(conn->socket->is_open()) { - if (conn->peer_addr.empty()) { - visitors++; - boost::system::error_code ec; - if (paddr == conn->socket->remote_endpoint(ec).address()) { - from_addr++; + const auto& paddr_add = socket->remote_endpoint( rec ).address(); + string paddr_str; + if( rec ) { + fc_elog( logger, "Error getting remote endpoint: ${m}", ("m", rec.message())); + } else { + paddr_str = paddr_add.to_string(); + for_each_connection( [&visitors, &from_addr, &paddr_str]( auto& conn ) { + if( conn->socket_is_open()) { + if( conn->peer_address().empty()) { + ++visitors; + std::lock_guard g_conn( conn->conn_mtx ); + if( paddr_str == conn->remote_endpoint_ip ) { + ++from_addr; } } } - } - if (num_clients != visitors) { - fc_ilog( logger,"checking max client, visitors = ${v} num clients ${n}",("v",visitors)("n",num_clients) ); - num_clients = visitors; - } - if( from_addr < max_nodes_per_host && (max_client_count == 0 || num_clients < max_client_count )) { - ++num_clients; - connection_ptr c = std::make_shared( socket ); - connections.insert( c ); - start_session( c ); - - } - else { - if (from_addr >= max_nodes_per_host) { - fc_elog(logger, "Number of connections (${n}) from ${ra} exceeds limit", - ("n", from_addr+1)("ra",paddr.to_string())); + return true; + } ); + if( from_addr < max_nodes_per_host && (max_client_count == 0 || visitors < max_client_count)) { + fc_ilog( logger, "Accepted new connection: " + paddr_str ); + if( new_connection->start_session()) { + std::lock_guard g_unique( connections_mtx ); + connections.insert( new_connection ); } - else { - fc_elog(logger, "Error max_client_count ${m} exceeded", - ( "m", max_client_count) ); + + } else { + if( from_addr >= max_nodes_per_host ) { + fc_elog( logger, "Number of connections (${n}) from ${ra} exceeds limit ${l}", + ("n", from_addr + 1)( "ra", paddr_str )( "l", max_nodes_per_host )); + } else { + fc_elog( logger, "Error max_client_count ${m} exceeded", ("m", max_client_count)); } + // new_connection never added to connections and start_session not called, lifetime will end boost::system::error_code ec; + socket->shutdown( tcp::socket::shutdown_both, ec ); socket->close( ec ); } } } else { - fc_elog( logger, "Error accepting connection: ${m}",( "m", ec.message() ) ); + fc_elog( logger, "Error accepting connection: ${m}", ("m", ec.message())); // For the listed error codes below, recall start_listen_loop() switch (ec.value()) { case ECONNABORTED: @@ -2037,28 +2288,25 @@ namespace eosio { } } start_listen_loop(); - }); - }); + })); + } ); } - void net_plugin_impl::start_read_message(const connection_ptr& conn) { - + // only called from strand thread + void connection::start_read_message() { try { - if(!conn->socket) { - return; - } - connection_wptr weak_conn = conn; + std::size_t minimum_read = + std::atomic_exchange( &outstanding_read_bytes, 0 ); + minimum_read = minimum_read != 0 ? minimum_read : message_header_size; - std::size_t minimum_read = conn->outstanding_read_bytes ? *conn->outstanding_read_bytes : message_header_size; - - if (use_socket_read_watermark) { + if (my_impl->use_socket_read_watermark) { const size_t max_socket_read_watermark = 4096; std::size_t socket_read_watermark = std::min(minimum_read, max_socket_read_watermark); boost::asio::socket_base::receive_low_watermark read_watermark_opt(socket_read_watermark); boost::system::error_code ec; - conn->socket->set_option( read_watermark_opt, ec ); + socket->set_option( read_watermark_opt, ec ); if( ec ) { - fc_elog( logger, "unable to set read watermark ${peer}: ${e1}", ("peer", conn->peer_name())( "e1", ec.message() ) ); + fc_elog( logger, "unable to set read watermark ${peer}: ${e1}", ("peer", peer_name())( "e1", ec.message() ) ); } } @@ -2070,25 +2318,22 @@ namespace eosio { } }; - if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size ) { + uint32_t write_queue_size = buffer_queue.write_queue_size(); + if( write_queue_size > def_max_write_queue_size ) { fc_elog( logger, "write queue full ${s} bytes, giving up on connection, closing connection to: ${p}", - ("s", conn->buffer_queue.write_queue_size())("p", conn->peer_name()) ); - my_impl->close( conn ); + ("s", write_queue_size)("p", peer_name()) ); + close( false ); return; } - boost::asio::async_read(*conn->socket, - conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, - boost::asio::bind_executor( conn->strand, - [this,weak_conn,socket=conn->socket]( boost::system::error_code ec, std::size_t bytes_transferred ) { - app().post( priority::medium, [this,weak_conn, socket, ec, bytes_transferred]() { - auto conn = weak_conn.lock(); - if (!conn || !conn->socket || !conn->socket->is_open() || !socket->is_open()) { - return; - } - - conn->outstanding_read_bytes.reset(); + boost::asio::async_read( *socket, + pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, + boost::asio::bind_executor( strand, + [conn = shared_from_this(), socket=socket]( boost::system::error_code ec, std::size_t bytes_transferred ) { + // may have closed connection and cleared pending_message_buffer + if( !conn->socket_is_open() || socket != conn->socket ) return; + bool close_connection = false; try { if( !ec ) { if (bytes_transferred > conn->pending_message_buffer.bytes_to_write()) { @@ -2101,25 +2346,24 @@ namespace eosio { uint32_t bytes_in_buffer = conn->pending_message_buffer.bytes_to_read(); if (bytes_in_buffer < message_header_size) { - conn->outstanding_read_bytes.emplace(message_header_size - bytes_in_buffer); + conn->outstanding_read_bytes = message_header_size - bytes_in_buffer; break; } else { uint32_t message_length; auto index = conn->pending_message_buffer.read_index(); conn->pending_message_buffer.peek(&message_length, sizeof(message_length), index); if(message_length > def_send_buffer_size*2 || message_length == 0) { - boost::system::error_code ec; - fc_elog( logger,"incoming message length unexpected (${i}), from ${p}", - ("i", message_length)("p",boost::lexical_cast(conn->socket->remote_endpoint(ec))) ); - close(conn); - return; + fc_elog( logger,"incoming message length unexpected (${i})", ("i", message_length) ); + close_connection = true; + break; } auto total_message_bytes = message_length + message_header_size; if (bytes_in_buffer >= total_message_bytes) { conn->pending_message_buffer.advance_read_ptr(message_header_size); - if (!process_next_message(conn, message_length)) { + conn->consecutive_immediate_connection_close = 0; + if (!conn->process_next_message(message_length)) { return; } } else { @@ -2129,125 +2373,145 @@ namespace eosio { conn->pending_message_buffer.add_space( outstanding_message_bytes - available_buffer_bytes ); } - conn->outstanding_read_bytes.emplace(outstanding_message_bytes); + conn->outstanding_read_bytes = outstanding_message_bytes; break; } } } - start_read_message(conn); + if( !close_connection ) conn->start_read_message(); } else { - auto pname = conn->peer_name(); if (ec.value() != boost::asio::error::eof) { - fc_elog( logger, "Error reading message from ${p}: ${m}",("p",pname)( "m", ec.message() ) ); + fc_elog( logger, "Error reading message: ${m}", ( "m", ec.message() ) ); } else { - fc_ilog( logger, "Peer ${p} closed connection",("p",pname) ); + fc_ilog( logger, "Peer closed connection" ); } - close( conn ); + close_connection = true; } } catch(const std::exception &ex) { - fc_elog( logger, "Exception in handling read data from ${p}: ${s}", - ("p",conn->peer_name())("s",ex.what()) ); - close( conn ); + fc_elog( logger, "Exception in handling read data: ${s}", ("s",ex.what()) ); + close_connection = true; } catch(const fc::exception &ex) { - fc_elog( logger, "Exception in handling read data from ${p}: ${s}", - ("p",conn->peer_name())("s",ex.to_string()) ); - close( conn ); + fc_elog( logger, "Exception in handling read data ${s}", ("s",ex.to_string()) ); + close_connection = true; } catch (...) { - fc_elog( logger, "Undefined exception handling the read data from ${p}",( "p",conn->peer_name()) ); - close( conn ); + fc_elog( logger, "Undefined exception handling read data" ); + close_connection = true; + } + + if( close_connection ) { + fc_elog( logger, "Closing connection to: ${p}", ("p", conn->peer_name()) ); + conn->close(); } - }); })); } catch (...) { - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger, "Undefined exception handling reading ${p}",("p",pname) ); - close( conn ); + fc_elog( logger, "Undefined exception in start_read_message, closing connection to: ${p}", ("p", peer_name()) ); + close(); } } - bool net_plugin_impl::process_next_message(const connection_ptr& conn, uint32_t message_length) { + // called from connection strand + bool connection::process_next_message( uint32_t message_length ) { try { // if next message is a block we already have, exit early - auto peek_ds = conn->pending_message_buffer.create_peek_datastream(); + auto peek_ds = pending_message_buffer.create_peek_datastream(); unsigned_int which{}; fc::raw::unpack( peek_ds, which ); if( which == signed_block_which ) { block_header bh; fc::raw::unpack( peek_ds, bh ); - const controller& cc = chain_plug->chain(); const block_id_type blk_id = bh.id(); const uint32_t blk_num = bh.block_num(); - if( !sync_master->syncing_with_peer() ) { - uint32_t lib = cc.last_irreversible_block_num(); + if( my_impl->dispatcher->have_block( blk_id ) ) { + fc_dlog( logger, "canceling wait on ${p}, already received block ${num}, id ${id}...", + ("p", peer_name())("num", blk_num)("id", blk_id.str().substr(8,16)) ); + my_impl->sync_master->sync_recv_block( shared_from_this(), blk_id, blk_num, false ); + cancel_wait(); + + pending_message_buffer.advance_read_ptr( message_length ); + return true; + } + fc_dlog( logger, "${p} received block ${num}, id ${id}...", + ("p", peer_name())("num", bh.block_num())("id", blk_id.str().substr(8,16)) ); + if( !my_impl->sync_master->syncing_with_peer() ) { // guard against peer thinking it needs to send us old blocks + uint32_t lib = 0; + std::tie( lib, std::ignore, std::ignore, std::ignore, std::ignore, std::ignore ) = my_impl->get_chain_info(); if( blk_num < lib ) { - const auto last_sent_lib = conn->last_handshake_sent.last_irreversible_block_num; - if( !conn->peer_requested && blk_num < last_sent_lib ) { + std::unique_lock g( conn_mtx ); + const auto last_sent_lib = last_handshake_sent.last_irreversible_block_num; + g.unlock(); + if( blk_num < last_sent_lib ) { fc_ilog( logger, "received block ${n} less than sent lib ${lib}", ("n", blk_num)("lib", last_sent_lib) ); - close( conn ); + close(); } else { fc_ilog( logger, "received block ${n} less than lib ${lib}", ("n", blk_num)("lib", lib) ); - conn->enqueue( (sync_request_message) {0, 0} ); - conn->send_handshake(); - conn->cancel_wait(); + enqueue( (sync_request_message) {0, 0} ); + send_handshake(); + cancel_wait(); } - conn->pending_message_buffer.advance_read_ptr( message_length ); + pending_message_buffer.advance_read_ptr( message_length ); return true; } } - if( cc.fetch_block_by_id( blk_id ) ) { - sync_master->recv_block( conn, blk_id, blk_num, false ); - conn->cancel_wait(); - conn->pending_message_buffer.advance_read_ptr( message_length ); - return true; - } - } - auto ds = conn->pending_message_buffer.create_datastream(); - net_message msg; - fc::raw::unpack( ds, msg ); - msg_handler m( *this, conn ); - if( msg.contains() ) { - m( std::move( msg.get() ) ); - } else if( msg.contains() ) { - m( std::move( msg.get() ) ); + auto ds = pending_message_buffer.create_datastream(); + fc::raw::unpack( ds, which ); // throw away + shared_ptr ptr = std::make_shared(); + fc::raw::unpack( ds, *ptr ); + handle_message( blk_id, std::move( ptr ) ); + + } else if( which == packed_transaction_which ) { + auto ds = pending_message_buffer.create_datastream(); + fc::raw::unpack( ds, which ); // throw away + shared_ptr ptr = std::make_shared(); + fc::raw::unpack( ds, *ptr ); + handle_message( std::move( ptr ) ); + } else { + auto ds = pending_message_buffer.create_datastream(); + net_message msg; + fc::raw::unpack( ds, msg ); + msg_handler m( shared_from_this() ); msg.visit( m ); } + } catch( const fc::exception& e ) { fc_elog( logger, "Exception in handling message from ${p}: ${s}", - ("p", conn->peer_name())("s", e.to_detail_string()) ); - close( conn ); + ("p", peer_name())("s", e.to_detail_string()) ); + close(); return false; } return true; } - size_t net_plugin_impl::count_open_sockets() const - { - size_t count = 0; - for( auto &c : connections) { - if(c->socket->is_open()) - ++count; - } - return count; - } - - - template - void net_plugin_impl::send_transaction_to_all(const std::shared_ptr>& send_buffer, VerifierFunc verify) { - for( auto &c : connections) { - if( c->current() && verify( c )) { - c->enqueue_buffer( send_buffer, true, priority::low, no_reason ); - } - } - } - - bool net_plugin_impl::is_valid(const handshake_message& msg) { + // call only from main application thread + void net_plugin_impl::update_chain_info() { + controller& cc = chain_plug->chain(); + std::lock_guard g( chain_info_mtx ); + chain_lib_num = cc.last_irreversible_block_num(); + chain_lib_id = cc.last_irreversible_block_id(); + chain_head_blk_num = cc.head_block_num(); + chain_head_blk_id = cc.head_block_id(); + chain_fork_head_blk_num = cc.fork_db_pending_head_block_num(); + chain_fork_head_blk_id = cc.fork_db_pending_head_block_id(); + fc_dlog( logger, "updating chain info lib ${lib}, head ${head}, fork ${fork}", + ("lib", chain_lib_num)("head", chain_head_blk_num)("fork", chain_fork_head_blk_num) ); + } + + // lib_num, head_blk_num, fork_head_blk_num, lib_id, head_blk_id, fork_head_blk_id + std::tuple + net_plugin_impl::get_chain_info() const { + std::lock_guard g( chain_info_mtx ); + return std::make_tuple( + chain_lib_num, chain_head_blk_num, chain_fork_head_blk_num, + chain_lib_id, chain_head_blk_id, chain_fork_head_blk_id ); + } + + bool connection::is_valid( const handshake_message& msg ) { // Do some basic validation of an incoming handshake_message, so things // that really aren't handshake messages can be quickly discarded without // affecting state. @@ -2272,200 +2536,237 @@ namespace eosio { return valid; } - void net_plugin_impl::handle_message(const connection_ptr& c, const chain_size_message& msg) { - peer_ilog(c, "received chain_size_message"); + void connection::handle_message( const chain_size_message& msg ) { + peer_dlog(this, "received chain_size_message"); } - void net_plugin_impl::handle_message(const connection_ptr& c, const handshake_message& msg) { - peer_ilog(c, "received handshake_message"); - if (!is_valid(msg)) { - peer_elog( c, "bad handshake message"); - c->enqueue( go_away_message( fatal_other )); + void connection::handle_message( const handshake_message& msg ) { + peer_dlog( this, "received handshake_message" ); + if( !is_valid( msg ) ) { + peer_elog( this, "bad handshake message"); + enqueue( go_away_message( fatal_other ) ); return; } - controller& cc = chain_plug->chain(); - uint32_t lib_num = cc.last_irreversible_block_num(); - uint32_t peer_lib = msg.last_irreversible_block_num; - if( c->connecting ) { - c->connecting = false; - } + fc_dlog( logger, "received handshake gen ${g} from ${ep}, lib ${lib}, head ${head}", + ("g", msg.generation)( "ep", peer_name() ) + ( "lib", msg.last_irreversible_block_num )( "head", msg.head_num ) ); + + connecting = false; if (msg.generation == 1) { - if( msg.node_id == node_id) { - fc_elog( logger, "Self connection detected. Closing connection" ); - c->enqueue( go_away_message( self ) ); + if( msg.node_id == my_impl->node_id) { + fc_elog( logger, "Self connection detected node_id ${id}. Closing connection", ("id", msg.node_id) ); + enqueue( go_away_message( self ) ); return; } - if( c->peer_addr.empty() || c->last_handshake_recv.node_id == fc::sha256()) { + if( peer_address().empty() ) { + set_connection_type( msg.p2p_address ); + } + + std::unique_lock g_conn( conn_mtx ); + if( peer_address().empty() || last_handshake_recv.node_id == fc::sha256()) { + g_conn.unlock(); fc_dlog(logger, "checking for duplicate" ); - for(const auto &check : connections) { - if(check == c) + std::shared_lock g_cnts( my_impl->connections_mtx ); + for(const auto& check : my_impl->connections) { + if(check.get() == this) continue; if(check->connected() && check->peer_name() == msg.p2p_address) { // It's possible that both peers could arrive here at relatively the same time, so // we need to avoid the case where they would both tell a different connection to go away. // Using the sum of the initial handshake times of the two connections, we will // arbitrarily (but consistently between the two peers) keep one of them. - if (msg.time + c->last_handshake_sent.time <= check->last_handshake_sent.time + check->last_handshake_recv.time) + std::unique_lock g_check_conn( check->conn_mtx ); + auto check_time = check->last_handshake_sent.time + check->last_handshake_recv.time; + g_check_conn.unlock(); + g_conn.lock(); + auto c_time = last_handshake_sent.time; + g_conn.unlock(); + if (msg.time + c_time <= check_time) continue; + g_cnts.unlock(); fc_dlog( logger, "sending go_away duplicate to ${ep}", ("ep",msg.p2p_address) ); go_away_message gam(duplicate); - gam.node_id = node_id; - c->enqueue(gam); - c->no_retry = duplicate; + g_conn.lock(); + gam.node_id = conn_node_id; + g_conn.unlock(); + enqueue(gam); + no_retry = duplicate; return; } } - } - else { - fc_dlog(logger, "skipping duplicate check, addr == ${pa}, id = ${ni}",("pa",c->peer_addr)("ni",c->last_handshake_recv.node_id)); + } else { + fc_dlog( logger, "skipping duplicate check, addr == ${pa}, id = ${ni}", + ("pa", peer_address())( "ni", last_handshake_recv.node_id ) ); + g_conn.unlock(); } - if( msg.chain_id != chain_id) { + if( msg.chain_id != my_impl->chain_id ) { fc_elog( logger, "Peer on a different chain. Closing connection" ); - c->enqueue( go_away_message(go_away_reason::wrong_chain) ); + enqueue( go_away_message(go_away_reason::wrong_chain) ); return; } - c->protocol_version = to_protocol_version(msg.network_version); - if(c->protocol_version != net_version) { - if (network_version_match) { - fc_elog( logger, "Peer network version does not match expected ${nv} but got ${mnv}", - ("nv", net_version)("mnv", c->protocol_version) ); - c->enqueue(go_away_message(wrong_version)); - return; - } else { - fc_ilog( logger, "Local network version: ${nv} Remote version: ${mnv}", - ("nv", net_version)("mnv", c->protocol_version)); - } + protocol_version = my_impl->to_protocol_version(msg.network_version); + if( protocol_version != net_version ) { + fc_ilog( logger, "Local network version: ${nv} Remote version: ${mnv}", + ("nv", net_version)( "mnv", protocol_version ) ); } - if( c->node_id != msg.node_id) { - c->node_id = msg.node_id; + g_conn.lock(); + if( conn_node_id != msg.node_id ) { + conn_node_id = msg.node_id; } + g_conn.unlock(); - if(!authenticate_peer(msg)) { + if( !my_impl->authenticate_peer( msg ) ) { fc_elog( logger, "Peer not authenticated. Closing connection." ); - c->enqueue(go_away_message(authentication)); + enqueue( go_away_message( authentication ) ); return; } - bool on_fork = false; - fc_dlog(logger, "lib_num = ${ln} peer_lib = ${pl}",("ln",lib_num)("pl",peer_lib)); + uint32_t peer_lib = msg.last_irreversible_block_num; + connection_wptr weak = shared_from_this(); + app().post( priority::low, [peer_lib, chain_plug = my_impl->chain_plug, weak{std::move(weak)}, + msg_lib_id = msg.last_irreversible_block_id]() { + connection_ptr c = weak.lock(); + if( !c ) return; + controller& cc = chain_plug->chain(); + uint32_t lib_num = cc.last_irreversible_block_num(); + + fc_dlog( logger, "handshake, check for fork lib_num = ${ln} peer_lib = ${pl}", ("ln", lib_num)( "pl", peer_lib ) ); - if( peer_lib <= lib_num && peer_lib > 0) { - try { - block_id_type peer_lib_id = cc.get_block_id_for_num( peer_lib); - on_fork =( msg.last_irreversible_block_id != peer_lib_id); - } - catch( const unknown_block_exception &ex) { - fc_wlog( logger, "peer last irreversible block ${pl} is unknown", ("pl", peer_lib) ); - on_fork = true; - } - catch( ...) { - fc_wlog( logger, "caught an exception getting block id for ${pl}",("pl",peer_lib) ); - on_fork = true; - } - if( on_fork) { - fc_elog( logger, "Peer chain is forked" ); - c->enqueue( go_away_message( forked )); - return; + if( peer_lib <= lib_num && peer_lib > 0 ) { + bool on_fork = false; + bool unknown_block = false; + try { + block_id_type peer_lib_id = cc.get_block_id_for_num( peer_lib ); + on_fork = (msg_lib_id != peer_lib_id); + } catch( const unknown_block_exception& ) { + peer_ilog( c, "peer last irreversible block ${pl} is unknown", ("pl", peer_lib) ); + unknown_block = true; + } catch( ... ) { + peer_wlog( c, "caught an exception getting block id for ${pl}", ("pl", peer_lib) ); + on_fork = true; + } + if( on_fork || unknown_block ) { + c->strand.post( [on_fork, unknown_block, c]() { + if( on_fork ) { + peer_elog( c, "Peer chain is forked, sending: forked go away" ); + c->enqueue( go_away_message( forked ) ); + } else if( unknown_block ) { + peer_ilog( c, "Peer asked for unknown block, sending: benign_other go away" ); + c->no_retry = benign_other; + c->enqueue( go_away_message( benign_other ) ); + } + } ); + } } - } + }); - if (c->sent_handshake_count == 0) { - c->send_handshake(); + if( sent_handshake_count == 0 ) { + send_handshake(); } } - c->last_handshake_recv = msg; - c->_logger_variant.reset(); - sync_master->recv_handshake(c,msg); + std::unique_lock g_conn( conn_mtx ); + last_handshake_recv = msg; + g_conn.unlock(); + my_impl->sync_master->recv_handshake( shared_from_this(), msg ); } - void net_plugin_impl::handle_message(const connection_ptr& c, const go_away_message& msg) { - peer_wlog(c, "received go_away_message, reason = ${r}", ("r",reason_str( msg.reason )) ); - c->no_retry = msg.reason; - if(msg.reason == duplicate ) { - c->node_id = msg.node_id; + void connection::handle_message( const go_away_message& msg ) { + peer_wlog( this, "received go_away_message, reason = ${r}", ("r", reason_str( msg.reason )) ); + bool retry = no_retry == no_reason; // if no previous go away message + no_retry = msg.reason; + if( msg.reason == duplicate ) { + std::lock_guard g_conn( conn_mtx ); + conn_node_id = msg.node_id; } - c->flush_queues(); - close(c); + if( msg.reason == wrong_version ) { + if( !retry ) no_retry = fatal_other; // only retry once on wrong version + } else { + retry = false; + } + flush_queues(); + close( retry ); // reconnect if wrong_version } - void net_plugin_impl::handle_message(const connection_ptr& c, const time_message& msg) { - peer_ilog(c, "received time_message"); + void connection::handle_message( const time_message& msg ) { + peer_dlog( this, "received time_message" ); /* We've already lost however many microseconds it took to dispatch * the message, but it can't be helped. */ - msg.dst = c->get_time(); + msg.dst = get_time(); // If the transmit timestamp is zero, the peer is horribly broken. if(msg.xmt == 0) return; /* invalid timestamp */ - if(msg.xmt == c->xmt) + if(msg.xmt == xmt) return; /* duplicate packet */ - c->xmt = msg.xmt; - c->rec = msg.rec; - c->dst = msg.dst; + xmt = msg.xmt; + rec = msg.rec; + dst = msg.dst; - if(msg.org == 0) - { - c->send_time(msg); - return; // We don't have enough data to perform the calculation yet. - } + if( msg.org == 0 ) { + send_time( msg ); + return; // We don't have enough data to perform the calculation yet. + } - c->offset = (double(c->rec - c->org) + double(msg.xmt - c->dst)) / 2; + double offset = (double(rec - org) + double(msg.xmt - dst)) / 2; double NsecPerUsec{1000}; - if(logger.is_enabled(fc::log_level::all)) - logger.log(FC_LOG_MESSAGE(all, "Clock offset is ${o}ns (${us}us)", ("o", c->offset)("us", c->offset/NsecPerUsec))); - c->org = 0; - c->rec = 0; + if( logger.is_enabled( fc::log_level::all ) ) + logger.log( FC_LOG_MESSAGE( all, "Clock offset is ${o}ns (${us}us)", + ("o", offset)( "us", offset / NsecPerUsec ) ) ); + org = 0; + rec = 0; + + std::unique_lock g_conn( conn_mtx ); + if( last_handshake_recv.generation == 0 ) { + g_conn.unlock(); + send_handshake(); + } } - void net_plugin_impl::handle_message(const connection_ptr& c, const notice_message& msg) { + void connection::handle_message( const notice_message& msg ) { // peer tells us about one or more blocks or txns. When done syncing, forward on // notices of previously unknown blocks or txns, // - peer_ilog(c, "received notice_message"); - c->connecting = false; - request_message req; - bool send_req = false; - if (msg.known_trx.mode != none) { - fc_dlog(logger,"this is a ${m} notice with ${n} transactions", ("m",modes_str(msg.known_trx.mode))("n",msg.known_trx.pending)); + peer_dlog( this, "received notice_message" ); + connecting = false; + if( msg.known_blocks.ids.size() > 1 ) { + fc_elog( logger, "Invalid notice_message, known_blocks.ids.size ${s}, closing connection: ${p}", + ("s", msg.known_blocks.ids.size())("p", peer_address()) ); + close( false ); + return; + } + if( msg.known_trx.mode != none ) { + fc_dlog( logger, "this is a ${m} notice with ${n} transactions", + ("m", modes_str( msg.known_trx.mode ))( "n", msg.known_trx.pending ) ); } switch (msg.known_trx.mode) { case none: break; case last_irr_catch_up: { - c->last_handshake_recv.head_num = msg.known_blocks.pending; - req.req_trx.mode = none; + std::unique_lock g_conn( conn_mtx ); + last_handshake_recv.head_num = msg.known_blocks.pending; + g_conn.unlock(); break; } case catch_up : { - if( msg.known_trx.pending > 0) { - // plan to get all except what we already know about. - req.req_trx.mode = catch_up; - send_req = true; - size_t known_sum = local_txns.size(); - if( known_sum ) { - for( const auto& t : local_txns.get() ) { - req.req_trx.ids.push_back( t.id ); - } - } - } break; } case normal: { - dispatcher->recv_notice(c, msg, false); + my_impl->dispatcher->recv_notice( shared_from_this(), msg, false ); } } - if (msg.known_blocks.mode != none) { - fc_dlog(logger,"this is a ${m} notice with ${n} blocks", ("m",modes_str(msg.known_blocks.mode))("n",msg.known_blocks.pending)); + if( msg.known_blocks.mode != none ) { + fc_dlog( logger, "this is a ${m} notice with ${n} blocks", + ("m", modes_str( msg.known_blocks.mode ))( "n", msg.known_blocks.pending ) ); } switch (msg.known_blocks.mode) { case none : { @@ -2473,40 +2774,37 @@ namespace eosio { } case last_irr_catch_up: case catch_up: { - sync_master->recv_notice(c,msg); + my_impl->sync_master->sync_recv_notice( shared_from_this(), msg ); break; } case normal : { - dispatcher->recv_notice(c, msg, false); + my_impl->dispatcher->recv_notice( shared_from_this(), msg, false ); break; } default: { - peer_elog(c, "bad notice_message : invalid known_blocks.mode ${m}",("m",static_cast(msg.known_blocks.mode))); - } + peer_elog( this, "bad notice_message : invalid known_blocks.mode ${m}", + ("m", static_cast(msg.known_blocks.mode)) ); } - fc_dlog(logger, "send req = ${sr}", ("sr",send_req)); - if( send_req) { - c->enqueue(req); } } - void net_plugin_impl::handle_message(const connection_ptr& c, const request_message& msg) { + void connection::handle_message( const request_message& msg ) { if( msg.req_blocks.ids.size() > 1 ) { fc_elog( logger, "Invalid request_message, req_blocks.ids.size ${s}, closing ${p}", - ("s", msg.req_blocks.ids.size())("p",c->peer_name()) ); - close(c); + ("s", msg.req_blocks.ids.size())( "p", peer_name() ) ); + close(); return; } switch (msg.req_blocks.mode) { case catch_up : - peer_ilog(c, "received request_message:catch_up"); - c->blk_send_branch( msg.req_blocks.ids.empty() ? block_id_type() : msg.req_blocks.ids.back() ); + peer_dlog( this, "received request_message:catch_up" ); + blk_send_branch( msg.req_blocks.ids.empty() ? block_id_type() : msg.req_blocks.ids.back() ); break; case normal : - peer_ilog(c, "received request_message:normal"); + peer_dlog( this, "received request_message:normal" ); if( !msg.req_blocks.ids.empty() ) { - c->blk_send(msg.req_blocks.ids.back()); + blk_send( msg.req_blocks.ids.back() ); } break; default:; @@ -2515,27 +2813,31 @@ namespace eosio { switch (msg.req_trx.mode) { case catch_up : - c->txn_send_pending(msg.req_trx.ids); - break; - case normal : - c->txn_send(msg.req_trx.ids); break; case none : - if(msg.req_blocks.mode == none) - c->stop_send(); + if( msg.req_blocks.mode == none ) { + stop_send(); + } + // no break + case normal : + if( !msg.req_trx.ids.empty() ) { + fc_elog( logger, "Invalid request_message, req_trx.ids.size ${s}", ("s", msg.req_trx.ids.size()) ); + close(); + return; + } break; default:; } - } - void net_plugin_impl::handle_message(const connection_ptr& c, const sync_request_message& msg) { - if( msg.end_block == 0) { - c->peer_requested.reset(); - c->flush_queues(); + void connection::handle_message( const sync_request_message& msg ) { + fc_dlog( logger, "peer requested ${start} to ${end}", ("start", msg.start_block)("end", msg.end_block) ); + if( msg.end_block == 0 ) { + peer_requested.reset(); + flush_queues(); } else { - c->peer_requested = peer_sync_state( msg.start_block,msg.end_block,msg.start_block-1); - c->enqueue_sync_block(); + peer_requested = peer_sync_state( msg.start_block, msg.end_block, msg.start_block-1); + enqueue_sync_block(); } } @@ -2546,64 +2848,81 @@ namespace eosio { trx->get_signatures().size() * sizeof(signature_type); } - void net_plugin_impl::handle_message(const connection_ptr& c, const packed_transaction_ptr& trx) { - fc_dlog(logger, "got a packed transaction, cancel wait"); - peer_ilog(c, "received packed_transaction"); - controller& cc = my_impl->chain_plug->chain(); - if( cc.get_read_mode() == eosio::db_read_mode::READ_ONLY ) { - fc_dlog(logger, "got a txn in read-only mode - dropping"); - return; - } - if( sync_master->is_active(c) ) { - fc_dlog(logger, "got a txn during sync - dropping"); + void connection::handle_message( packed_transaction_ptr trx ) { + if( my_impl->db_read_mode == eosio::db_read_mode::READ_ONLY ) { + fc_dlog( logger, "got a txn in read-only mode - dropping" ); return; } - auto ptrx = std::make_shared( trx ); - const auto& tid = ptrx->id; + const auto& tid = trx->id(); + peer_dlog( this, "received packed_transaction ${id}", ("id", tid) ); - if( c->trx_in_progress_size > def_max_trx_in_progress_size ) { + uint32_t trx_in_progress_sz = this->trx_in_progress_size.load(); + if( trx_in_progress_sz > def_max_trx_in_progress_size ) { fc_wlog( logger, "Dropping trx ${id}, too many trx in progress ${s} bytes", - ("id", tid)("s", c->trx_in_progress_size) ); + ("id", tid)("s", trx_in_progress_sz) ); return; } - if(local_txns.get().find(tid) != local_txns.end()) { - fc_dlog(logger, "got a duplicate transaction - dropping"); + bool have_trx = my_impl->dispatcher->have_txn( tid ); + node_transaction_state nts = {tid, trx->expiration(), 0, connection_id}; + my_impl->dispatcher->add_peer_txn( nts ); + + if( have_trx ) { + fc_dlog( logger, "got a duplicate transaction - dropping ${id}", ("id", tid) ); return; } - dispatcher->recv_transaction(c, tid); - c->trx_in_progress_size += calc_trx_size( ptrx->packed_trx ); - chain_plug->accept_transaction(ptrx, [c, this, ptrx](const static_variant& result) { - c->trx_in_progress_size -= calc_trx_size( ptrx->packed_trx ); + + trx_in_progress_size += calc_trx_size( trx ); + app().post( priority::low, [trx{std::move(trx)}, weak = weak_from_this()]() { + my_impl->chain_plug->accept_transaction( trx, + [weak, trx](const static_variant& result) mutable { + // next (this lambda) called from application thread if (result.contains()) { - peer_dlog(c, "bad packed_transaction : ${m}", ("m",result.get()->what())); + fc_dlog( logger, "bad packed_transaction : ${m}", ("m", result.get()->what()) ); } else { - auto trace = result.get(); - if (!trace->except) { - fc_dlog(logger, "chain accepted transaction"); - this->dispatcher->bcast_transaction(ptrx); - return; + const transaction_trace_ptr& trace = result.get(); + if( !trace->except ) { + fc_dlog( logger, "chain accepted transaction, bcast ${id}", ("id", trace->id) ); + } else { + fc_elog( logger, "bad packed_transaction : ${m}", ("m", trace->except->what())); } - - peer_elog(c, "bad packed_transaction : ${m}", ("m",trace->except->what())); } + connection_ptr conn = weak.lock(); + if( conn ) { + conn->trx_in_progress_size -= calc_trx_size( trx ); + } + }); + }); + } - dispatcher->rejected_transaction(ptrx->id); + // called from connection strand + void connection::handle_message( const block_id_type& id, signed_block_ptr ptr ) { + peer_dlog( this, "received signed_block ${id}", ("id", ptr->block_num() ) ); + app().post(priority::high, [ptr{std::move(ptr)}, id, c = shared_from_this()]() mutable { + c->process_signed_block( id, std::move( ptr ) ); }); + my_impl->dispatcher->bcast_notice( id ); } - void net_plugin_impl::handle_message(const connection_ptr& c, const signed_block_ptr& msg) { - controller &cc = chain_plug->chain(); - block_id_type blk_id = msg->id(); + // called from application thread + void connection::process_signed_block( const block_id_type& blk_id, signed_block_ptr msg ) { + controller& cc = my_impl->chain_plug->chain(); uint32_t blk_num = msg->block_num(); - fc_dlog(logger, "canceling wait on ${p}", ("p",c->peer_name())); - c->cancel_wait(); + // use c in this method instead of this to highlight that all methods called on c-> must be thread safe + connection_ptr c = shared_from_this(); + + // if we have closed connection then stop processing + if( !c->socket_is_open() ) + return; try { - if( cc.fetch_block_by_id(blk_id)) { - sync_master->recv_block( c, blk_id, blk_num, false ); - c->cancel_wait(); + if( cc.fetch_block_by_id(blk_id) ) { + c->strand.post( [sync_master = my_impl->sync_master.get(), + dispatcher = my_impl->dispatcher.get(), c, blk_id, blk_num]() { + dispatcher->add_peer_block( blk_id, c->connection_id ); + sync_master->sync_recv_block( c, blk_id, blk_num, false ); + }); return; } } catch( ...) { @@ -2611,14 +2930,14 @@ namespace eosio { fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); } - dispatcher->recv_block(c, blk_id, blk_num); fc::microseconds age( fc::time_point::now() - msg->timestamp); - peer_ilog(c, "received signed_block : #${n} block age in secs = ${age}", - ("n",blk_num)("age",age.to_seconds())); + peer_dlog( c, "received signed_block : #${n} block age in secs = ${age}", + ("n", blk_num)( "age", age.to_seconds() ) ); go_away_reason reason = fatal_other; try { - chain_plug->accept_block(msg); //, sync_master->is_active(c)); + my_impl->chain_plug->accept_block(msg); + my_impl->update_chain_info(); reason = no_reason; } catch( const unlinkable_block_exception &ex) { peer_elog(c, "bad signed_block ${n} ${id}...: ${m}", ("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.what())); @@ -2635,170 +2954,179 @@ namespace eosio { fc_elog( logger, "accept_block threw a non-assert exception ${x} from ${p}",( "x",ex.to_string())("p",c->peer_name())); } catch( ...) { peer_elog(c, "bad signed_block ${n} ${id}...: unknown exception", ("n", blk_num)("id", blk_id.str().substr(8,16))); - fc_elog( logger, "handle sync block caught something else from ${p}",("num",blk_num)("p",c->peer_name())); + fc_elog( logger, "handle sync block caught something else from ${p}",("p",c->peer_name())); } - update_block_num ubn(blk_num); if( reason == no_reason ) { - for (const auto &recpt : msg->transactions) { - auto id = (recpt.trx.which() == 0) ? recpt.trx.get() : recpt.trx.get().id(); - auto ltx = local_txns.get().find(id); - if( ltx != local_txns.end()) { - local_txns.modify( ltx, ubn ); - } - auto ctx = c->trx_state.get().find(id); - if( ctx != c->trx_state.end()) { - c->trx_state.modify( ctx, ubn ); - } - } - sync_master->recv_block(c, blk_id, blk_num, true); - } - else { - sync_master->rejected_block(c, blk_num); - dispatcher->rejected_block( blk_id ); + boost::asio::post( my_impl->thread_pool->get_executor(), [dispatcher = my_impl->dispatcher.get(), cid=c->connection_id, blk_id, msg]() { + dispatcher->add_peer_block( blk_id, cid ); + dispatcher->update_txns_block_num( msg ); + }); + c->strand.post( [sync_master = my_impl->sync_master.get(), dispatcher = my_impl->dispatcher.get(), c, blk_id, blk_num]() { + dispatcher->recv_block( c, blk_id, blk_num ); + sync_master->sync_recv_block( c, blk_id, blk_num, true ); + }); + } else { + c->strand.post( [sync_master = my_impl->sync_master.get(), dispatcher = my_impl->dispatcher.get(), c, blk_id, blk_num]() { + sync_master->rejected_block( c, blk_num ); + dispatcher->rejected_block( blk_id ); + }); } } + // called from any thread void net_plugin_impl::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection) { - connector_check->expires_from_now( du); - connector_check->async_wait( [this, from_connection](boost::system::error_code ec) { - app().post( priority::low, [this, from_connection, ec]() { - if( !ec) { - connection_monitor(from_connection); - } - else { - fc_elog( logger, "Error from connection check monitor: ${m}",( "m", ec.message())); - start_conn_timer( connector_period, std::weak_ptr()); - } - }); - }); - } - - void net_plugin_impl::start_txn_timer() { - transaction_check->expires_from_now( txn_exp_period); - transaction_check->async_wait( [this]( boost::system::error_code ec ) { - int lower_than_low = priority::low - 1; - app().post( lower_than_low, [this, ec]() { + if( in_shutdown ) return; + std::lock_guard g( connector_check_timer_mtx ); + ++connector_checks_in_flight; + connector_check_timer->expires_from_now( du ); + connector_check_timer->async_wait( [my = shared_from_this(), from_connection](boost::system::error_code ec) { + std::unique_lock g( my->connector_check_timer_mtx ); + int num_in_flight = --my->connector_checks_in_flight; + g.unlock(); if( !ec ) { - expire_txns(); + my->connection_monitor(from_connection, num_in_flight == 0 ); } else { - fc_elog( logger, "Error from transaction check monitor: ${m}", ("m", ec.message())); - start_txn_timer(); + if( num_in_flight == 0 ) { + if( my->in_shutdown ) return; + fc_elog( logger, "Error from connection check monitor: ${m}", ("m", ec.message())); + my->start_conn_timer( my->connector_period, std::weak_ptr() ); + } } - } ); }); } + // thread safe + void net_plugin_impl::start_expire_timer() { + if( in_shutdown ) return; + std::lock_guard g( expire_timer_mtx ); + expire_timer->expires_from_now( txn_exp_period); + expire_timer->async_wait( [my = shared_from_this()]( boost::system::error_code ec ) { + if( !ec ) { + my->expire(); + } else { + if( my->in_shutdown ) return; + fc_elog( logger, "Error from transaction check monitor: ${m}", ("m", ec.message()) ); + my->start_expire_timer(); + } + } ); + } + + // thread safe void net_plugin_impl::ticker() { + if( in_shutdown ) return; + std::lock_guard g( keepalive_timer_mtx ); keepalive_timer->expires_from_now(keepalive_interval); - keepalive_timer->async_wait( [this]( boost::system::error_code ec ) { - app().post( priority::low, [this, ec]() { - ticker(); + keepalive_timer->async_wait( [my = shared_from_this()]( boost::system::error_code ec ) { + my->ticker(); if( ec ) { + if( my->in_shutdown ) return; fc_wlog( logger, "Peer keepalive ticked sooner than expected: ${m}", ("m", ec.message()) ); } - for( auto& c : connections ) { - if( c->socket->is_open()) { - c->send_time(); + for_each_connection( []( auto& c ) { + if( c->socket_is_open() ) { + c->strand.post( [c]() { + c->send_time(); + } ); } - } + return true; + } ); } ); - } ); } void net_plugin_impl::start_monitors() { - connector_check.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); - transaction_check.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); + { + std::lock_guard g( connector_check_timer_mtx ); + connector_check_timer.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); + } + { + std::lock_guard g( expire_timer_mtx ); + expire_timer.reset( new boost::asio::steady_timer( my_impl->thread_pool->get_executor() ) ); + } start_conn_timer(connector_period, std::weak_ptr()); - start_txn_timer(); + start_expire_timer(); } - void net_plugin_impl::expire_txns() { - start_txn_timer(); - + void net_plugin_impl::expire() { auto now = time_point::now(); - auto start_size = local_txns.size(); - - expire_local_txns(); - - controller& cc = chain_plug->chain(); - uint32_t lib = cc.last_irreversible_block_num(); + uint32_t lib = 0; + std::tie( lib, std::ignore, std::ignore, std::ignore, std::ignore, std::ignore ) = get_chain_info(); dispatcher->expire_blocks( lib ); - for ( auto &c : connections ) { - auto &stale_txn = c->trx_state.get(); - stale_txn.erase( stale_txn.lower_bound(1), stale_txn.upper_bound(lib) ); - auto &stale_txn_e = c->trx_state.get(); - stale_txn_e.erase(stale_txn_e.lower_bound(time_point_sec()), stale_txn_e.upper_bound(time_point::now())); - auto &stale_blk = c->blk_state.get(); - stale_blk.erase( stale_blk.lower_bound(1), stale_blk.upper_bound(lib) ); - } - fc_dlog(logger, "expire_txns ${n}us size ${s} removed ${r}", - ("n", time_point::now() - now)("s", start_size)("r", start_size - local_txns.size()) ); - } - - void net_plugin_impl::expire_local_txns() { - auto& old = local_txns.get(); - auto ex_lo = old.lower_bound( fc::time_point_sec(0) ); - auto ex_up = old.upper_bound( time_point::now() ); - old.erase( ex_lo, ex_up ); + dispatcher->expire_txns( lib ); + fc_dlog( logger, "expire_txns ${n}us", ("n", time_point::now() - now) ); - auto& stale = local_txns.get(); - controller& cc = chain_plug->chain(); - uint32_t lib = cc.last_irreversible_block_num(); - stale.erase( stale.lower_bound(1), stale.upper_bound(lib) ); + start_expire_timer(); } - void net_plugin_impl::connection_monitor(std::weak_ptr from_connection) { + // called from any thread + void net_plugin_impl::connection_monitor(std::weak_ptr from_connection, bool reschedule ) { auto max_time = fc::time_point::now(); max_time += fc::milliseconds(max_cleanup_time_ms); auto from = from_connection.lock(); + std::unique_lock g( connections_mtx ); auto it = (from ? connections.find(from) : connections.begin()); if (it == connections.end()) it = connections.begin(); + size_t num_rm = 0; while (it != connections.end()) { if (fc::time_point::now() >= max_time) { - start_conn_timer(std::chrono::milliseconds(1), *it); // avoid exhausting + connection_wptr wit = *it; + g.unlock(); + fc_dlog( logger, "Exiting connection monitor early, ran out of time: ${t}", ("t", max_time - fc::time_point::now()) ); + if( reschedule ) { + start_conn_timer( std::chrono::milliseconds( 1 ), wit ); // avoid exhausting + } return; } - if( !(*it)->socket->is_open() && !(*it)->connecting) { - if( (*it)->peer_addr.length() > 0) { - connect(*it); - } - else { + if( !(*it)->socket_is_open() && !(*it)->connecting) { + if( (*it)->peer_address().length() > 0) { + if( !(*it)->resolve_and_connect() ) { + it = connections.erase(it); + continue; + } + } else { it = connections.erase(it); continue; } } ++it; } - start_conn_timer(connector_period, std::weak_ptr()); + g.unlock(); + fc_dlog( logger, "connection monitor, removed ${n} connections", ("n", num_rm) ); + if( reschedule ) { + start_conn_timer( connector_period, std::weak_ptr()); + } } - void net_plugin_impl::close(const connection_ptr& c) { - if( c->peer_addr.empty() && c->socket->is_open() ) { - if (num_clients == 0) { - fc_wlog( logger, "num_clients already at 0"); - } - else { - --num_clients; - } - } - c->close(); + // called from application thread + void net_plugin_impl::on_accepted_block(const block_state_ptr& block) { + update_chain_info(); + dispatcher->strand.post( [this, block]() { + fc_dlog( logger, "signaled, blk num = ${num}, id = ${id}", ("num", block->block_num)("id", block->id) ); + dispatcher->bcast_block( block ); + }); } - void net_plugin_impl::accepted_block(const block_state_ptr& block) { - fc_dlog(logger,"signaled, id = ${id}",("id", block->id)); - dispatcher->bcast_block(block); + // called from application thread + void net_plugin_impl::on_irreversible_block( const block_state_ptr& block) { + fc_dlog( logger, "on_irreversible_block, blk num = ${num}, id = ${id}", ("num", block->block_num)("id", block->id) ); + update_chain_info(); } + // called from application thread void net_plugin_impl::transaction_ack(const std::pair& results) { - const auto& id = results.second->id; - if (results.first) { - fc_ilog(logger,"signaled NACK, trx-id = ${id} : ${why}",("id", id)("why", results.first->to_detail_string())); - dispatcher->rejected_transaction(id); - } else { - fc_ilog(logger,"signaled ACK, trx-id = ${id}",("id", id)); - dispatcher->bcast_transaction(results.second); - } + boost::asio::post( my_impl->thread_pool->get_executor(), [this, results]() { + const auto& id = results.second->id(); + if (results.first) { + fc_dlog( logger, "signaled NACK, trx-id = ${id} : ${why}", ("id", id)( "why", results.first->to_detail_string() ) ); + + uint32_t head_blk_num = 0; + std::tie( std::ignore, head_blk_num, std::ignore, std::ignore, std::ignore, std::ignore ) = get_chain_info(); + dispatcher->rejected_transaction(results.second->packed_trx(), head_blk_num); + } else { + fc_dlog( logger, "signaled ACK, trx-id = ${id}", ("id", id) ); + dispatcher->bcast_transaction(*results.second->packed_trx()); + } + }); } bool net_plugin_impl::authenticate_peer(const handshake_message& msg) const { @@ -2875,10 +3203,27 @@ namespace eosio { return chain::signature_type(); } - void - handshake_initializer::populate( handshake_message &hello) { + // call from connection strand + bool connection::populate_handshake( handshake_message& hello ) { namespace sc = std::chrono; - hello.network_version = net_version_base + net_version; + bool send = false; + if( no_retry == wrong_version ) { + hello.network_version = net_version_base + proto_explicit_sync; // try previous version + send = true; + } else { + hello.network_version = net_version_base + net_version; + } + const auto prev_head_id = hello.head_id; + uint32_t lib, head; + std::tie( lib, std::ignore, head, + hello.last_irreversible_block_id, std::ignore, hello.head_id ) = my_impl->get_chain_info(); + // only send handshake if state has changed since last handshake + send |= lib != hello.last_irreversible_block_num; + send |= head != hello.head_num; + send |= prev_head_id != hello.head_id; + if( !send ) return false; + hello.last_irreversible_block_num = lib; + hello.head_num = head; hello.chain_id = my_impl->chain_id; hello.node_id = my_impl->node_id; hello.key = my_impl->get_authentication_key(); @@ -2888,41 +3233,22 @@ namespace eosio { // If we couldn't sign, don't send a token. if(hello.sig == chain::signature_type()) hello.token = sha256(); - hello.p2p_address = my_impl->p2p_address + " - " + hello.node_id.str().substr(0,7); + hello.p2p_address = my_impl->p2p_address; + if( is_transactions_only_connection() ) hello.p2p_address += ":trx"; + if( is_blocks_only_connection() ) hello.p2p_address += ":blk"; + hello.p2p_address += " - " + hello.node_id.str().substr(0,7); #if defined( __APPLE__ ) hello.os = "osx"; #elif defined( __linux__ ) hello.os = "linux"; -#elif defined( _MSC_VER ) +#elif defined( _WIN32 ) hello.os = "win32"; #else hello.os = "other"; #endif hello.agent = my_impl->user_agent_name; - - controller& cc = my_impl->chain_plug->chain(); - hello.head_id = fc::sha256(); - hello.last_irreversible_block_id = fc::sha256(); - hello.head_num = cc.fork_db_pending_head_block_num(); - hello.last_irreversible_block_num = cc.last_irreversible_block_num(); - if( hello.last_irreversible_block_num ) { - try { - hello.last_irreversible_block_id = cc.get_block_id_for_num(hello.last_irreversible_block_num); - } - catch( const unknown_block_exception &ex) { - fc_wlog( logger, "caught unkown_block" ); - hello.last_irreversible_block_num = 0; - } - } - if( hello.head_num ) { - try { - hello.head_id = cc.get_block_id_for_num( hello.head_num ); - } - catch( const unknown_block_exception &ex) { - hello.head_num = 0; - } - } + return true; } net_plugin::net_plugin() @@ -2938,7 +3264,14 @@ namespace eosio { cfg.add_options() ( "p2p-listen-endpoint", bpo::value()->default_value( "0.0.0.0:9876" ), "The actual host:port used to listen for incoming p2p connections.") ( "p2p-server-address", bpo::value(), "An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint.") - ( "p2p-peer-address", bpo::value< vector >()->composing(), "The public endpoint of a peer node to connect to. Use multiple p2p-peer-address options as needed to compose a network.") + ( "p2p-peer-address", bpo::value< vector >()->composing(), + "The public endpoint of a peer node to connect to. Use multiple p2p-peer-address options as needed to compose a network.\n" + " Syntax: host:port[:|]\n" + " The optional 'trx' and 'blk' indicates to node that only transactions 'trx' or blocks 'blk' should be sent." + " Examples:\n" + " p2p.eos.io:9876\n" + " p2p.trx.eos.io:9876:trx\n" + " p2p.blk.eos.io:9876:blk\n") ( "p2p-max-nodes-per-host", bpo::value()->default_value(def_max_nodes_per_host), "Maximum number of client nodes from any single IP address") ( "agent-name", bpo::value()->default_value("\"EOS Test Agent\""), "The name supplied to identify this node amongst the peers.") ( "allowed-connection", bpo::value>()->multitoken()->default_value({"any"}, "any"), "Can be 'any' or 'producers' or 'specified' or 'none'. If 'specified', peer-key must be specified at least once. If only 'producers', peer-key is not required. 'producers' and 'specified' may be combined.") @@ -2948,8 +3281,6 @@ namespace eosio { ( "max-clients", bpo::value()->default_value(def_max_clients), "Maximum number of clients from which connections are accepted, use 0 for no limit") ( "connection-cleanup-period", bpo::value()->default_value(def_conn_retry_wait), "number of seconds to wait before cleaning up dead connections") ( "max-cleanup-time-msec", bpo::value()->default_value(10), "max connection cleanup time per cleanup call in millisec") - ( "network-version-match", bpo::value()->default_value(false), - "DEPRECATED, needless restriction. True to require exact match of peer network version.") ( "net-threads", bpo::value()->default_value(my->thread_pool_size), "Number of worker threads in net_plugin thread pool" ) ( "sync-fetch-span", bpo::value()->default_value(def_sync_fetch_span), "number of blocks to retrieve in a chunk from any individual peer during synchronization") @@ -2977,12 +3308,7 @@ namespace eosio { try { peer_log_format = options.at( "peer-log-format" ).as(); - my->network_version_match = options.at( "network-version-match" ).as(); - if( my->network_version_match ) - wlog( "network-version-match is DEPRECATED as it is a needless restriction" ); - my->sync_master.reset( new sync_manager( options.at( "sync-fetch-span" ).as())); - my->dispatcher.reset( new dispatch_manager ); my->connector_period = std::chrono::seconds( options.at( "connection-cleanup-period" ).as()); my->max_cleanup_time_ms = options.at("max-cleanup-time-msec").as(); @@ -2990,8 +3316,6 @@ namespace eosio { my->resp_expected_period = def_resp_expected_wait; my->max_client_count = options.at( "max-clients" ).as(); my->max_nodes_per_host = options.at( "p2p-max-nodes-per-host" ).as(); - my->num_clients = 0; - my->started_sessions = 0; my->use_socket_read_watermark = options.at( "use-socket-read-watermark" ).as(); @@ -3052,7 +3376,6 @@ namespace eosio { EOS_ASSERT( my->chain_plug, chain::missing_chain_plugin_exception, "" ); my->chain_id = my->chain_plug->get_chain_id(); fc::rand_pseudo_bytes( my->node_id.data(), my->node_id.data_size()); - fc_ilog( logger, "my node_id is ${id}", ("id", my->node_id )); } FC_LOG_AND_RETHROW() } @@ -3060,26 +3383,38 @@ namespace eosio { void net_plugin::plugin_startup() { handle_sighup(); try { + + fc_ilog( logger, "my node_id is ${id}", ("id", my->node_id )); + my->producer_plug = app().find_plugin(); - // currently thread_pool only used for server_ioc my->thread_pool.emplace( "net", my->thread_pool_size ); - shared_ptr resolver = std::make_shared( my_impl->thread_pool->get_executor() ); + my->dispatcher.reset( new dispatch_manager( my_impl->thread_pool->get_executor() ) ); + + chain::controller&cc = my->chain_plug->chain(); + my->db_read_mode = cc.get_read_mode(); + if( my->db_read_mode == chain::db_read_mode::READ_ONLY && my->p2p_address.size() ) { + my->p2p_address.clear(); + fc_wlog( logger, "node in read-only mode disabling incoming p2p connections" ); + } + + tcp::endpoint listen_endpoint; if( my->p2p_address.size() > 0 ) { auto host = my->p2p_address.substr( 0, my->p2p_address.find( ':' )); auto port = my->p2p_address.substr( host.size() + 1, my->p2p_address.size()); tcp::resolver::query query( tcp::v4(), host.c_str(), port.c_str()); // Note: need to add support for IPv6 too? - my->listen_endpoint = *resolver->resolve( query ); + tcp::resolver resolver( my->thread_pool->get_executor() ); + listen_endpoint = *resolver.resolve( query ); my->acceptor.reset( new tcp::acceptor( my_impl->thread_pool->get_executor() ) ); if( !my->p2p_server_address.empty() ) { my->p2p_address = my->p2p_server_address; } else { - if( my->listen_endpoint.address().to_v4() == address_v4::any()) { + if( listen_endpoint.address().to_v4() == address_v4::any()) { boost::system::error_code ec; auto host = host_name( ec ); if( ec.value() != boost::system::errc::success ) { @@ -3096,39 +3431,44 @@ namespace eosio { if( my->acceptor ) { try { - my->acceptor->open(my->listen_endpoint.protocol()); + my->acceptor->open(listen_endpoint.protocol()); my->acceptor->set_option(tcp::acceptor::reuse_address(true)); - my->acceptor->bind(my->listen_endpoint); + my->acceptor->bind(listen_endpoint); my->acceptor->listen(); } catch (const std::exception& e) { - elog( "net_plugin::plugin_startup failed to bind to port ${port}", ("port", my->listen_endpoint.port())); + elog( "net_plugin::plugin_startup failed to bind to port ${port}", ("port", listen_endpoint.port()) ); throw e; } fc_ilog( logger, "starting listener, max clients is ${mc}",("mc",my->max_client_count) ); my->start_listen_loop(); } - chain::controller&cc = my->chain_plug->chain(); { - cc.accepted_block.connect( boost::bind(&net_plugin_impl::accepted_block, my.get(), _1)); + cc.accepted_block.connect( [my = my]( const block_state_ptr& s ) { + my->on_accepted_block( s ); + } ); + cc.irreversible_block.connect( [my = my]( const block_state_ptr& s ) { + my->on_irreversible_block( s ); + } ); } - my->keepalive_timer.reset( new boost::asio::steady_timer( my->thread_pool->get_executor() ) ); + { + std::lock_guard g( my->keepalive_timer_mtx ); + my->keepalive_timer.reset( new boost::asio::steady_timer( my->thread_pool->get_executor() ) ); + } my->ticker(); - my->incoming_transaction_ack_subscription = app().get_channel().subscribe(boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); - - if( cc.get_read_mode() == chain::db_read_mode::READ_ONLY ) { - my->max_nodes_per_host = 0; - fc_ilog( logger, "node in read-only mode setting max_nodes_per_host to 0 to prevent connections" ); - } + my->incoming_transaction_ack_subscription = app().get_channel().subscribe( + boost::bind(&net_plugin_impl::transaction_ack, my.get(), _1)); my->start_monitors(); - for( auto seed_node : my->supplied_peers ) { + my->update_chain_info(); + + for( const auto& seed_node : my->supplied_peers ) { connect( seed_node ); } - } catch (...) { + } catch( ... ) { // always want plugin_shutdown even on exception plugin_shutdown(); throw; @@ -3142,24 +3482,27 @@ namespace eosio { void net_plugin::plugin_shutdown() { try { fc_ilog( logger, "shutdown.." ); - if( my->connector_check ) - my->connector_check->cancel(); - if( my->transaction_check ) - my->transaction_check->cancel(); - if( my->keepalive_timer ) - my->keepalive_timer->cancel(); - - my->done = true; - if( my->acceptor ) { - fc_ilog( logger, "close acceptor" ); - boost::system::error_code ec; - my->acceptor->cancel( ec ); - my->acceptor->close( ec ); + my->in_shutdown = true; + { + std::lock_guard g( my->connector_check_timer_mtx ); + if( my->connector_check_timer ) + my->connector_check_timer->cancel(); + }{ + std::lock_guard g( my->expire_timer_mtx ); + if( my->expire_timer ) + my->expire_timer->cancel(); + }{ + std::lock_guard g( my->keepalive_timer_mtx ); + if( my->keepalive_timer ) + my->keepalive_timer->cancel(); + } - fc_ilog( logger, "close ${s} connections",( "s",my->connections.size()) ); + { + fc_ilog( logger, "close ${s} connections", ("s", my->connections.size()) ); + std::lock_guard g( my->connections_mtx ); for( auto& con : my->connections ) { - fc_dlog( logger, "close: ${p}", ("p",con->peer_name()) ); - my->close( con ); + fc_dlog( logger, "close: ${p}", ("p", con->peer_name()) ); + con->close( false, true ); } my->connections.clear(); } @@ -3168,38 +3511,41 @@ namespace eosio { my->thread_pool->stop(); } - app().post( 0, [me = my](){} ); // keep my pointer alive until queue is drained + if( my->acceptor ) { + boost::system::error_code ec; + my->acceptor->cancel( ec ); + my->acceptor->close( ec ); + } + app().post( 0, [me = my](){} ); // keep my pointer alive until queue is drained fc_ilog( logger, "exit shutdown" ); } FC_CAPTURE_AND_RETHROW() } - size_t net_plugin::num_peers() const { - return my->count_open_sockets(); - } - /** * Used to trigger a new connection from RPC API */ string net_plugin::connect( const string& host ) { + std::lock_guard g( my->connections_mtx ); if( my->find_connection( host ) ) return "already connected"; - connection_ptr c = std::make_shared(host); - fc_dlog(logger,"adding new connection to the list"); - my->connections.insert( c ); - fc_dlog(logger,"calling active connector"); - my->connect( c ); + connection_ptr c = std::make_shared( host ); + fc_dlog( logger, "calling active connector: ${h}", ("h", host) ); + if( c->resolve_and_connect() ) { + fc_dlog( logger, "adding new connection to the list: ${c}", ("c", c->peer_name()) ); + my->connections.insert( c ); + } return "added connection"; } string net_plugin::disconnect( const string& host ) { + std::lock_guard g( my->connections_mtx ); for( auto itr = my->connections.begin(); itr != my->connections.end(); ++itr ) { - if( (*itr)->peer_addr == host ) { - (*itr)->reset(); + if( (*itr)->peer_address() == host ) { fc_ilog( logger, "disconnecting: ${p}", ("p", (*itr)->peer_name()) ); - my->close(*itr); + (*itr)->close(); my->connections.erase(itr); return "connection removed"; } @@ -3208,6 +3554,7 @@ namespace eosio { } optional net_plugin::status( const string& host )const { + std::shared_lock g( my->connections_mtx ); auto con = my->find_connection( host ); if( con ) return con->get_status(); @@ -3216,19 +3563,22 @@ namespace eosio { vector net_plugin::connections()const { vector result; + std::shared_lock g( my->connections_mtx ); result.reserve( my->connections.size() ); for( const auto& c : my->connections ) { result.push_back( c->get_status() ); } return result; } - connection_ptr net_plugin_impl::find_connection(const string& host )const { + + // call with connections_mtx + connection_ptr net_plugin_impl::find_connection( const string& host )const { for( const auto& c : connections ) - if( c->peer_addr == host ) return c; + if( c->peer_address() == host ) return c; return connection_ptr(); } - uint16_t net_plugin_impl::to_protocol_version(uint16_t v) { + constexpr uint16_t net_plugin_impl::to_protocol_version(uint16_t v) { if (v >= net_version_base) { v -= net_version_base; return (v > net_version_range) ? 0 : v; diff --git a/plugins/producer_api_plugin/include/eosio/producer_api_plugin/producer_api_plugin.hpp b/plugins/producer_api_plugin/include/eosio/producer_api_plugin/producer_api_plugin.hpp index 137ef7b0703..fa63fef0c68 100644 --- a/plugins/producer_api_plugin/include/eosio/producer_api_plugin/producer_api_plugin.hpp +++ b/plugins/producer_api_plugin/include/eosio/producer_api_plugin/producer_api_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/plugins/producer_api_plugin/producer_api_plugin.cpp b/plugins/producer_api_plugin/producer_api_plugin.cpp index b513ae6a442..bd3feb1e9bb 100644 --- a/plugins/producer_api_plugin/producer_api_plugin.cpp +++ b/plugins/producer_api_plugin/producer_api_plugin.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index 878ca8de8b1..e572fa2e5d8 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -1,8 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ - #pragma once #include @@ -114,7 +109,7 @@ class producer_plugin : public appbase::plugin { fc::variants get_supported_protocol_features( const get_supported_protocol_features_params& params ) const; get_account_ram_corrections_result get_account_ram_corrections( const get_account_ram_corrections_params& params ) const; - + private: std::shared_ptr my; }; diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 4b36400534a..506050155a6 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1,14 +1,11 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include #include +#include #include #include -#include +#include #include #include @@ -173,22 +170,6 @@ enum class pending_block_mode { producing, speculating }; -#define CATCH_AND_CALL(NEXT)\ - catch ( const fc::exception& err ) {\ - NEXT(err.dynamic_copy_exception());\ - } catch ( const std::exception& e ) {\ - fc::exception fce( \ - FC_LOG_MESSAGE( warn, "rethrow ${what}: ", ("what",e.what())),\ - fc::std_exception_code,\ - BOOST_CORE_TYPEID(e).name(),\ - e.what() ) ;\ - NEXT(fce.dynamic_copy_exception());\ - } catch( ... ) {\ - fc::unhandled_exception e(\ - FC_LOG_MESSAGE(warn, "rethrow"),\ - std::current_exception());\ - NEXT(e.dynamic_copy_exception());\ - } class producer_plugin_impl : public std::enable_shared_from_this { public: @@ -205,13 +186,12 @@ class producer_plugin_impl : public std::enable_shared_from_this; std::map _signature_providers; @@ -219,21 +199,20 @@ class producer_plugin_impl : public std::enable_shared_from_this _producer_watermarks; pending_block_mode _pending_block_mode; - transaction_id_with_expiry_index _persistent_transactions; + unapplied_transaction_queue _unapplied_transactions; fc::optional _thread_pool; - int32_t _max_transaction_time_ms; + std::atomic _max_transaction_time_ms; // modified by app thread, read by net_plugin thread pool fc::microseconds _max_irreversible_block_age_us; int32_t _produce_time_offset_us = 0; int32_t _last_block_time_offset_us = 0; - int32_t _max_scheduled_transaction_time_per_block_ms; + int32_t _max_scheduled_transaction_time_per_block_ms = 0; fc::time_point _irreversible_block_time; fc::microseconds _keosd_provider_timeout_us; std::vector _protocol_features_to_activate; bool _protocol_features_signaled = false; // to mark whether it has been signaled in start_block - producer_plugin* _self = nullptr; chain_plugin* chain_plug = nullptr; incoming::channels::block::channel_type::handle _incoming_block_subscription; @@ -287,6 +266,7 @@ class producer_plugin_impl : public std::enable_shared_from_thisid(); + auto blk_num = block->block_num(); - fc_dlog(_log, "received incoming block ${id}", ("id", id)); + fc_dlog(_log, "received incoming block ${n} ${id}", ("n", blk_num)("id", id)); EOS_ASSERT( block->timestamp < (fc::time_point::now() + fc::seconds( 7 )), block_from_the_future, "received a block from the future, ignoring it: ${id}", ("id", id) ); @@ -370,7 +351,7 @@ class producer_plugin_impl : public std::enable_shared_from_thistimestamp < fc::minutes(5) || (block->block_num() % 1000 == 0) ) { + if( fc::time_point::now() - block->timestamp < fc::minutes(5) || (blk_num % 1000 == 0) ) { ilog("Received block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, conf: ${confs}, latency: ${latency} ms]", - ("p",block->producer)("id",id.str().substr(8,16))("n",block->block_num())("t",block->timestamp) + ("p",block->producer)("id",id.str().substr(8,16))("n",blk_num)("t",block->timestamp) ("count",block->transactions.size())("lib",chain.last_irreversible_block_num()) ("confs", block->confirmed)("latency", (fc::time_point::now() - block->timestamp).count()/1000 ) ); if( chain.get_read_mode() != db_read_mode::IRREVERSIBLE && hbs->id != id && hbs->block != nullptr ) { // not applied to head @@ -412,30 +397,71 @@ class producer_plugin_impl : public std::enable_shared_from_this>> _pending_incoming_transactions; + class incoming_transaction_queue { + uint64_t max_incoming_transaction_queue_size = 0; + uint64_t size_in_bytes = 0; + std::deque>> _incoming_transactions; + + private: + static uint64_t calc_size( const transaction_metadata_ptr& trx ) { + return trx->packed_trx()->get_unprunable_size() + trx->packed_trx()->get_prunable_size() + sizeof( *trx ); + } + + void add_size( const transaction_metadata_ptr& trx ) { + auto size = calc_size( trx ); + EOS_ASSERT( size_in_bytes + size < max_incoming_transaction_queue_size, tx_resource_exhaustion, "Transaction exceeded producer resource limit" ); + size_in_bytes += size; + } + + public: + void set_max_incoming_transaction_queue_size( uint64_t v ) { max_incoming_transaction_queue_size = v; } + + void add( const transaction_metadata_ptr& trx, bool persist_until_expired, next_function next ) { + add_size( trx ); + _incoming_transactions.emplace_back( trx, persist_until_expired, std::move( next ) ); + } + + void add_front( const transaction_metadata_ptr& trx, bool persist_until_expired, next_function next ) { + add_size( trx ); + _incoming_transactions.emplace_front( trx, persist_until_expired, std::move( next ) ); + } + + auto pop_front() { + EOS_ASSERT( !_incoming_transactions.empty(), producer_exception, "logic error, front() called on empty incoming_transactions" ); + auto intrx = _incoming_transactions.front(); + _incoming_transactions.pop_front(); + const transaction_metadata_ptr& trx = std::get<0>( intrx ); + size_in_bytes -= calc_size( trx ); + return intrx; + } + + bool empty()const { return _incoming_transactions.empty(); } + size_t size()const { return _incoming_transactions.size(); } + }; + + incoming_transaction_queue _pending_incoming_transactions; - void on_incoming_transaction_async(const transaction_metadata_ptr& trx, bool persist_until_expired, next_function next) { + void on_incoming_transaction_async(const packed_transaction_ptr& trx, bool persist_until_expired, next_function next) { chain::controller& chain = chain_plug->chain(); - const auto& cfg = chain.get_global_properties().configuration; - signing_keys_future_type future = transaction_metadata::start_recover_keys( trx, _thread_pool->get_executor(), - chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); - boost::asio::post( _thread_pool->get_executor(), [self = this, future, trx, persist_until_expired, next]() { - if( future.valid() ) + const auto max_trx_time_ms = _max_transaction_time_ms.load(); + fc::microseconds max_trx_cpu_usage = max_trx_time_ms < 0 ? fc::microseconds::maximum() : fc::milliseconds( max_trx_time_ms ); + + auto future = transaction_metadata::start_recover_keys( trx, _thread_pool->get_executor(), + chain.get_chain_id(), fc::microseconds( max_trx_cpu_usage ), chain.configured_subjective_signature_length_limit() ); + boost::asio::post( _thread_pool->get_executor(), [self = this, future{std::move(future)}, persist_until_expired, next{std::move(next)}]() mutable { + if( future.valid() ) { future.wait(); - app().post(priority::low, [self, trx, persist_until_expired, next]() { - self->process_incoming_transaction_async( trx, persist_until_expired, next ); - }); + app().post( priority::low, [self, future{std::move(future)}, persist_until_expired, next{std::move( next )}]() mutable { + try { + self->process_incoming_transaction_async( future.get(), persist_until_expired, std::move( next ) ); + } CATCH_AND_CALL(next); + } ); + } }); } void process_incoming_transaction_async(const transaction_metadata_ptr& trx, bool persist_until_expired, next_function next) { chain::controller& chain = chain_plug->chain(); - if (!chain.is_building_block()) { - _pending_incoming_transactions.emplace_back(trx, persist_until_expired, next); - return; - } - - auto block_time = chain.pending_block_time(); auto send_response = [this, &trx, &chain, &next](const fc::static_variant& response) { next(response); @@ -445,11 +471,11 @@ class producer_plugin_impl : public std::enable_shared_from_thisid) + ("txid", trx->id()) ("why",response.get()->what())); } else { fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${txid} : ${why} ", - ("txid", trx->id) + ("txid", trx->id()) ("why",response.get()->what())); } } else { @@ -458,58 +484,70 @@ class producer_plugin_impl : public std::enable_shared_from_thisid)); + ("txid", trx->id())); } else { fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${txid}", - ("txid", trx->id)); + ("txid", trx->id())); } } }; - const auto& id = trx->id; - if( fc::time_point(trx->packed_trx->expiration()) < block_time ) { - send_response(std::static_pointer_cast(std::make_shared(FC_LOG_MESSAGE(error, "expired transaction ${id}", ("id", id)) ))); - return; - } + try { + const auto& id = trx->id(); + + fc::time_point bt = chain.is_building_block() ? chain.pending_block_time() : chain.head_block_time(); + if( fc::time_point( trx->packed_trx()->expiration()) < bt ) { + send_response( std::static_pointer_cast( + std::make_shared( + FC_LOG_MESSAGE( error, "expired transaction ${id}, expiration ${e}, block time ${bt}", + ("id", id)("e", trx->packed_trx()->expiration())( "bt", bt ))))); + return; + } - if( chain.is_known_unexpired_transaction(id) ) { - send_response(std::static_pointer_cast(std::make_shared(FC_LOG_MESSAGE(error, "duplicate transaction ${id}", ("id", id)) ))); - return; - } + if( chain.is_known_unexpired_transaction( id )) { + send_response( std::static_pointer_cast( std::make_shared( + FC_LOG_MESSAGE( error, "duplicate transaction ${id}", ("id", id)))) ); + return; + } - auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); - bool deadline_is_subjective = false; - const auto block_deadline = calculate_block_deadline(block_time); - if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && block_deadline < deadline) ) { - deadline_is_subjective = true; - deadline = block_deadline; - } + if( !chain.is_building_block()) { + _pending_incoming_transactions.add( trx, persist_until_expired, next ); + return; + } - try { - auto trace = chain.push_transaction(trx, deadline); - if (trace->except) { - if (failure_is_subjective(*trace->except, deadline_is_subjective)) { - _pending_incoming_transactions.emplace_back(trx, persist_until_expired, next); - if (_pending_block_mode == pending_block_mode::producing) { - fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING ", - ("block_num", chain.head_block_num() + 1) - ("prod", chain.pending_block_producer()) - ("txid", trx->id)); + auto deadline = fc::time_point::now() + fc::milliseconds( _max_transaction_time_ms ); + bool deadline_is_subjective = false; + const auto block_deadline = calculate_block_deadline( chain.pending_block_time() ); + if( _max_transaction_time_ms < 0 || + (_pending_block_mode == pending_block_mode::producing && block_deadline < deadline)) { + deadline_is_subjective = true; + deadline = block_deadline; + } + + auto trace = chain.push_transaction( trx, deadline ); + if( trace->except ) { + if( failure_is_subjective( *trace->except, deadline_is_subjective )) { + _pending_incoming_transactions.add( trx, persist_until_expired, next ); + if( _pending_block_mode == pending_block_mode::producing ) { + fc_dlog( _trx_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING ", + ("block_num", chain.head_block_num() + 1) + ("prod", chain.pending_block_producer()) + ("txid", trx->id())); } else { - fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution COULD NOT FIT tx: ${txid} RETRYING", - ("txid", trx->id)); + fc_dlog( _trx_trace_log, "[TRX_TRACE] Speculative execution COULD NOT FIT tx: ${txid} RETRYING", + ("txid", trx->id())); } } else { auto e_ptr = trace->except->dynamic_copy_exception(); - send_response(e_ptr); + send_response( e_ptr ); } } else { - if (persist_until_expired) { + if( persist_until_expired ) { // if this trx didnt fail/soft-fail and the persist flag is set, store its ID so that we can // ensure its applied to all future speculative blocks as well. - _persistent_transactions.insert(transaction_id_with_expiry{trx->id, trx->packed_trx->expiration()}); + _unapplied_transactions.add_persisted( trx ); } - send_response(trace); + send_response( trace ); } } catch ( const guard_exception& e ) { @@ -573,7 +611,6 @@ void new_chain_banner(const eosio::chain::controller& db) producer_plugin::producer_plugin() : my(new producer_plugin_impl(app().get_io_service())){ - my->_self = this; } producer_plugin::~producer_plugin() {} @@ -622,6 +659,8 @@ void producer_plugin::set_program_options( "Time in microseconds allowed for a transaction that starts with insufficient CPU quota to complete and cover its CPU usage.") ("incoming-defer-ratio", bpo::value()->default_value(1.0), "ratio between incoming transations and deferred transactions when both are exhausted") + ("incoming-transaction-queue-size-mb", bpo::value()->default_value( 1024 ), + "Maximum size (in MiB) of the incoming transaction queue. Exceeding this value will subjectively drop transaction with resource exhaustion.") ("producer-threads", bpo::value()->default_value(config::default_controller_thread_pool_size), "Number of worker threads in producer thread pool") ("snapshots-dir", bpo::value()->default_value("snapshots"), @@ -656,10 +695,12 @@ T dejsonify(const string& s) { return fc::json::from_string(s).as(); } -#define LOAD_VALUE_SET(options, name, container, type) \ -if( options.count(name) ) { \ - const std::vector& ops = options[name].as>(); \ - std::copy(ops.begin(), ops.end(), std::inserter(container, container.end())); \ +#define LOAD_VALUE_SET(options, op_name, container) \ +if( options.count(op_name) ) { \ + const std::vector& ops = options[op_name].as>(); \ + for( const auto& v : ops ) { \ + container.emplace( eosio::chain::name( v ) ); \ + } \ } static producer_plugin_impl::signature_provider_type @@ -698,9 +739,14 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ my->chain_plug = app().find_plugin(); EOS_ASSERT( my->chain_plug, plugin_config_exception, "chain_plugin not found" ); my->_options = &options; - LOAD_VALUE_SET(options, "producer-name", my->_producers, types::account_name) + LOAD_VALUE_SET(options, "producer-name", my->_producers) chain::controller& chain = my->chain_plug->chain(); + unapplied_transaction_queue::process_mode unapplied_mode = + (chain.get_read_mode() != chain::db_read_mode::SPECULATIVE) ? unapplied_transaction_queue::process_mode::non_speculative : + my->_producers.empty() ? unapplied_transaction_queue::process_mode::speculative_non_producer : + unapplied_transaction_queue::process_mode::speculative_producer; + my->_unapplied_transactions.set_mode( unapplied_mode ); if( options.count("private-key") ) { @@ -762,6 +808,13 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ my->_max_irreversible_block_age_us = fc::seconds(options.at("max-irreversible-block-age").as()); + auto max_incoming_transaction_queue_size = options.at("incoming-transaction-queue-size-mb").as() * 1024*1024; + + EOS_ASSERT( max_incoming_transaction_queue_size > 0, plugin_config_exception, + "incoming-transaction-queue-size-mb ${mb} must be greater than 0", ("mb", max_incoming_transaction_queue_size) ); + + my->_pending_incoming_transactions.set_max_incoming_transaction_queue_size( max_incoming_transaction_queue_size ); + my->_incoming_defer_ratio = options.at("incoming-defer-ratio").as(); auto thread_pool_size = options.at( "producer-threads" ).as(); @@ -784,23 +837,27 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ "No such directory '${dir}'", ("dir", my->_snapshots_dir.generic_string()) ); } - my->_incoming_block_subscription = app().get_channel().subscribe([this](const signed_block_ptr& block){ + my->_incoming_block_subscription = app().get_channel().subscribe( + [this](const signed_block_ptr& block) { try { my->on_incoming_block(block); } LOG_AND_DROP(); }); - my->_incoming_transaction_subscription = app().get_channel().subscribe([this](const transaction_metadata_ptr& trx){ + my->_incoming_transaction_subscription = app().get_channel().subscribe( + [this](const packed_transaction_ptr& trx) { try { my->on_incoming_transaction_async(trx, false, [](const auto&){}); } LOG_AND_DROP(); }); - my->_incoming_block_sync_provider = app().get_method().register_provider([this](const signed_block_ptr& block){ + my->_incoming_block_sync_provider = app().get_method().register_provider( + [this](const signed_block_ptr& block) { my->on_incoming_block(block); }); - my->_incoming_transaction_async_provider = app().get_method().register_provider([this](const transaction_metadata_ptr& trx, bool persist_until_expired, next_function next) -> void { + my->_incoming_transaction_async_provider = app().get_method().register_provider( + [this](const packed_transaction_ptr& trx, bool persist_until_expired, next_function next) -> void { return my->on_incoming_transaction_async(trx, persist_until_expired, next ); }); @@ -834,7 +891,6 @@ void producer_plugin::plugin_startup() EOS_ASSERT( my->_producers.empty() || chain.get_validation_mode() == chain::validation_mode::FULL, plugin_config_exception, "node cannot have any producer-name configured because block production is not safe when validation_mode is not \"full\"" ); - my->_accepted_block_connection.emplace(chain.accepted_block.connect( [this]( const auto& bsp ){ my->on_block( bsp ); } )); my->_accepted_block_header_connection.emplace(chain.accepted_block_header.connect( [this]( const auto& bsp ){ my->on_block_header( bsp ); } )); my->_irreversible_block_connection.emplace(chain.irreversible_block.connect( [this]( const auto& bsp ){ my->on_irreversible_block( bsp->block ); } )); @@ -854,7 +910,6 @@ void producer_plugin::plugin_startup() if (chain.head_block_num() == 0) { new_chain_banner(chain); } - //_production_skip_flags |= eosio::chain::skip_undo_history_check; } } @@ -898,7 +953,7 @@ void producer_plugin::resume() { // if (my->_pending_block_mode == pending_block_mode::speculating) { chain::controller& chain = my->chain_plug->chain(); - chain.abort_block(); + my->_unapplied_transactions.add_aborted( chain.abort_block() ); my->schedule_production_loop(); } } @@ -937,7 +992,7 @@ void producer_plugin::update_runtime_options(const runtime_options& options) { } if (check_speculating && my->_pending_block_mode == pending_block_mode::speculating) { - chain.abort_block(); + my->_unapplied_transactions.add_aborted( chain.abort_block() ); my->schedule_production_loop(); } @@ -1021,7 +1076,7 @@ producer_plugin::integrity_hash_information producer_plugin::get_integrity_hash( if (chain.is_building_block()) { // abort the pending block - chain.abort_block(); + my->_unapplied_transactions.add_aborted( chain.abort_block() ); } else { reschedule.cancel(); } @@ -1050,7 +1105,7 @@ void producer_plugin::create_snapshot(producer_plugin::next_function_unapplied_transactions.add_aborted( chain.abort_block() ); } else { reschedule.cancel(); } @@ -1183,8 +1238,8 @@ producer_plugin::get_account_ram_corrections( const get_account_ram_corrections_ const auto& db = my->chain_plug->chain().db(); const auto& idx = db.get_index(); - account_name lower_bound_value = std::numeric_limits::lowest(); - account_name upper_bound_value = std::numeric_limits::max(); + account_name lower_bound_value{ std::numeric_limits::lowest() }; + account_name upper_bound_value{ std::numeric_limits::max() }; if( params.lower_bound ) { lower_bound_value = *params.lower_bound; @@ -1296,21 +1351,12 @@ fc::time_point producer_plugin_impl::calculate_block_deadline( const fc::time_po return block_time + fc::microseconds(last_block ? _last_block_time_offset_us : _produce_time_offset_us); } -enum class tx_category { - PERSISTED, - UNEXPIRED_UNPERSISTED, - EXPIRED, -}; - - producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { chain::controller& chain = chain_plug->chain(); if( chain.get_read_mode() == chain::db_read_mode::READ_ONLY ) return start_block_result::waiting; - fc_dlog(_log, "Starting block at ${time}", ("time", fc::time_point::now())); - const auto& hbs = chain.head_block_state(); //Schedule for the next second's tick regardless of chain state @@ -1322,8 +1368,20 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { // Not our turn const auto& scheduled_producer = hbs->get_scheduled_producer(block_time); + + fc_dlog(_log, "Starting block #${n} at ${time} producer ${p}", + ("n", hbs->block_num + 1)("time", now)("p", scheduled_producer.producer_name)); + auto current_watermark = get_watermark(scheduled_producer.producer_name); - auto signature_provider_itr = _signature_providers.find(scheduled_producer.block_signing_key); + + size_t num_relevant_signatures = 0; + scheduled_producer.for_each_key([&](const public_key_type& key){ + const auto& iter = _signature_providers.find(key); + if(iter != _signature_providers.end()) { + num_relevant_signatures++; + } + }); + auto irreversible_block_age = get_irreversible_block_age(); // If the next block production opportunity is in the present or future, we're synced. @@ -1331,8 +1389,8 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { _pending_block_mode = pending_block_mode::speculating; } else if( _producers.find(scheduled_producer.producer_name) == _producers.end()) { _pending_block_mode = pending_block_mode::speculating; - } else if (signature_provider_itr == _signature_providers.end()) { - elog("Not producing block because I don't have the private key for ${scheduled_key}", ("scheduled_key", scheduled_producer.block_signing_key)); + } else if (num_relevant_signatures == 0) { + elog("Not producing block because I don't have any private keys relevant to authority: ${authority}", ("authority", scheduled_producer.authority)); _pending_block_mode = pending_block_mode::speculating; } else if ( _pause_production ) { elog("Not producing block because production is explicitly paused"); @@ -1380,7 +1438,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { blocks_to_confirm = (uint16_t)(std::min(blocks_to_confirm, (uint32_t)(hbs->block_num - hbs->dpos_irreversible_blocknum))); } - chain.abort_block(); + _unapplied_transactions.add_aborted( chain.abort_block() ); auto features_to_activate = chain.get_preactivated_protocol_features(); if( _pending_block_mode == pending_block_mode::producing && _protocol_features_to_activate.size() > 0 ) { @@ -1421,11 +1479,11 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } LOG_AND_DROP(); if( chain.is_building_block() ) { - auto pending_block_signing_key = chain.pending_block_signing_key(); + const auto& pending_block_signing_authority = chain.pending_block_signing_authority(); const fc::time_point preprocess_deadline = calculate_block_deadline(block_time); - if (_pending_block_mode == pending_block_mode::producing && pending_block_signing_key != scheduled_producer.block_signing_key) { - elog("Block Signing Key is not expected value, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.block_signing_key)("actual", pending_block_signing_key)); + if (_pending_block_mode == pending_block_mode::producing && pending_block_signing_authority != scheduled_producer.authority) { + elog("Unexpected block signing authority, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.authority)("actual", pending_block_signing_authority)); _pending_block_mode = pending_block_mode::speculating; } @@ -1455,7 +1513,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { if( app().is_quiting() ) // db guard exception above in LOG_AND_DROP could have called app().quit() return start_block_result::failed; - if( preprocess_deadline <= fc::time_point::now() ) { + if (preprocess_deadline <= fc::time_point::now()) { return start_block_result::exhausted; } else { if( !process_incoming_trxs( preprocess_deadline, pending_incoming_process_limit ) ) @@ -1471,6 +1529,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } catch ( boost::interprocess::bad_alloc& ) { chain_plugin::handle_db_exhaustion(); } + } return start_block_result::failed; @@ -1478,44 +1537,46 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { bool producer_plugin_impl::remove_expired_persisted_trxs( const fc::time_point& deadline ) { - bool exhausted = false; - auto& persisted_by_expiry = _persistent_transactions.get(); - if (!persisted_by_expiry.empty()) { - chain::controller& chain = chain_plug->chain(); - int num_expired_persistent = 0; - int orig_count = _persistent_transactions.size(); - - const time_point pending_block_time = chain.pending_block_time(); - while(!persisted_by_expiry.empty() && persisted_by_expiry.begin()->expiry <= pending_block_time) { - if (deadline <= fc::time_point::now()) { - exhausted = true; - break; - } - auto const& txid = persisted_by_expiry.begin()->trx_id; - if (_pending_block_mode == pending_block_mode::producing) { - fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${txid}", - ("block_num", chain.head_block_num() + 1) - ("prod", chain.pending_block_producer()) - ("txid", txid)); - } else { - fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${txid}", - ("txid", txid)); - } - - persisted_by_expiry.erase(persisted_by_expiry.begin()); - num_expired_persistent++; - } + chain::controller& chain = chain_plug->chain(); + auto pending_block_time = chain.pending_block_time(); + + // remove all expired transactions + size_t num_expired_persistent = 0; + size_t num_expired_other = 0; + size_t orig_count = _unapplied_transactions.size(); + bool exhausted = !_unapplied_transactions.clear_expired( pending_block_time, deadline, + [&num_expired_persistent, &num_expired_other, pbm = _pending_block_mode, + &chain, has_producers = !_producers.empty()]( const transaction_id_type& txid, trx_enum_type trx_type ) { + if( trx_type == trx_enum_type::persisted ) { + if( pbm == pending_block_mode::producing ) { + fc_dlog( _trx_trace_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${txid}", + ("block_num", chain.head_block_num() + 1)("prod", chain.pending_block_producer())("txid", txid)); + } else { + fc_dlog( _trx_trace_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${txid}", ("txid", txid)); + } + ++num_expired_persistent; + } else { + if (has_producers) { + fc_dlog(_trx_trace_log, + "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED : ${txid}", + ("txid", txid)); + } + ++num_expired_other; + } + }); - if( exhausted ) { - fc_wlog( _log, "Unable to process all ${n} persisted transactions before deadline, Expired ${expired}", - ( "n", orig_count ) - ( "expired", num_expired_persistent ) ); - } else { - fc_dlog( _log, "Processed ${n} persisted transactions, Expired ${expired}", - ( "n", orig_count ) - ( "expired", num_expired_persistent ) ); - } + if( exhausted ) { + fc_wlog( _log, "Unable to process all expired transactions in unapplied queue before deadline, " + "Persistent expired ${persistent_expired}, Other expired ${other_expired}", + ("persistent_expired", num_expired_persistent)("other_expired", num_expired_other) ); + } else { + fc_dlog( _log, "Processed ${m} expired transactions of the ${n} transactions in the unapplied queue, " + "Persistent expired ${persistent_expired}, Other expired ${other_expired}", + ("m", num_expired_persistent+num_expired_other)("n", orig_count) + ("persistent_expired", num_expired_persistent)("other_expired", num_expired_other) ); } + return !exhausted; } @@ -1537,109 +1598,69 @@ bool producer_plugin_impl::remove_expired_blacklisted_trxs( const fc::time_point num_expired++; } - fc_dlog( _log, "Processed ${n} blacklisted transactions, Expired ${expired}", - ("n", orig_count)("expired", num_expired) ); + fc_dlog(_log, "Processed ${n} blacklisted transactions, Expired ${expired}", + ("n", orig_count)("expired", num_expired)); } return !exhausted; } bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadline ) { - chain::controller& chain = chain_plug->chain(); - auto& persisted_by_id = _persistent_transactions.get(); - bool exhausted = false; - // Processing unapplied transactions... - // - if (_producers.empty() && persisted_by_id.empty()) { - // if this node can never produce and has no persisted transactions, - // there is no need for unapplied transactions they can be dropped - chain.get_unapplied_transactions().clear(); - } else { - // derive appliable transactions from unapplied_transactions and drop droppable transactions - unapplied_transactions_type& unapplied_trxs = chain.get_unapplied_transactions(); - if( !unapplied_trxs.empty() ) { - const time_point pending_block_time = chain.pending_block_time(); - auto unapplied_trxs_size = unapplied_trxs.size(); - int num_applied = 0; - int num_failed = 0; - int num_processed = 0; - auto calculate_transaction_category = [&](const transaction_metadata_ptr& trx) { - if (trx->packed_trx->expiration() < pending_block_time) { - return tx_category::EXPIRED; - } else if (persisted_by_id.find(trx->id) != persisted_by_id.end()) { - return tx_category::PERSISTED; - } else { - return tx_category::UNEXPIRED_UNPERSISTED; - } - }; - - auto itr = unapplied_trxs.begin(); - while( itr != unapplied_trxs.end() ) { - auto itr_next = itr; // save off next since itr may be invalidated by loop - ++itr_next; + if( !_unapplied_transactions.empty() ) { + chain::controller& chain = chain_plug->chain(); + int num_applied = 0, num_failed = 0, num_processed = 0; + auto unapplied_trxs_size = _unapplied_transactions.size(); + auto itr = (_pending_block_mode == pending_block_mode::producing) ? + _unapplied_transactions.begin() : _unapplied_transactions.persisted_begin(); + auto end_itr = (_pending_block_mode == pending_block_mode::producing) ? + _unapplied_transactions.end() : _unapplied_transactions.persisted_end(); + while( itr != end_itr ) { + if( deadline <= fc::time_point::now() ) { + exhausted = true; + break; + } - if( deadline <= fc::time_point::now() ) { - exhausted = true; - break; + const transaction_metadata_ptr trx = itr->trx_meta; + ++num_processed; + try { + auto trx_deadline = fc::time_point::now() + fc::milliseconds( _max_transaction_time_ms ); + bool deadline_is_subjective = false; + if( _max_transaction_time_ms < 0 || + (_pending_block_mode == pending_block_mode::producing && deadline < trx_deadline) ) { + deadline_is_subjective = true; + trx_deadline = deadline; } - const transaction_metadata_ptr trx = itr->second; - auto category = calculate_transaction_category(trx); - if (category == tx_category::EXPIRED || - (category == tx_category::UNEXPIRED_UNPERSISTED && _producers.empty())) - { - if (!_producers.empty()) { - fc_dlog(_trx_trace_log, "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED : ${txid}", - ("txid", trx->id)); + + auto trace = chain.push_transaction( trx, trx_deadline ); + if( trace->except ) { + if( failure_is_subjective( *trace->except, deadline_is_subjective ) ) { + exhausted = true; + // don't erase, subjective failure so try again next time + break; + } else { + // this failed our configured maximum transaction time, we don't want to replay it + ++num_failed; + itr = _unapplied_transactions.erase( itr ); + continue; } - itr = unapplied_trxs.erase( itr ); // unapplied_trxs map has not been modified, so simply erase and continue + } else { + ++num_applied; + itr = _unapplied_transactions.erase( itr ); continue; - } else if (category == tx_category::PERSISTED || - (category == tx_category::UNEXPIRED_UNPERSISTED && _pending_block_mode == pending_block_mode::producing)) - { - ++num_processed; - - try { - auto trx_deadline = fc::time_point::now() + fc::milliseconds( _max_transaction_time_ms ); - bool deadline_is_subjective = false; - if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && deadline < trx_deadline)) { - deadline_is_subjective = true; - trx_deadline = deadline; - } - - auto trace = chain.push_transaction(trx, trx_deadline); - if (trace->except) { - if (failure_is_subjective(*trace->except, deadline_is_subjective)) { - exhausted = true; - break; - } else { - // this failed our configured maximum transaction time, we don't want to replay it - // chain.plus_transactions can modify unapplied_trxs, so erase by id - unapplied_trxs.erase( trx->signed_id ); - ++num_failed; - } - } else { - ++num_applied; - } - } LOG_AND_DROP(); } - - itr = itr_next; - } - - fc_dlog( _log, "Processed ${m} of ${n} previously applied transactions, Applied ${applied}, Failed/Dropped ${failed}", - ("m", num_processed)("n", unapplied_trxs_size)("applied", num_applied)("failed", num_failed) ); + } LOG_AND_DROP(); + ++itr; } + + fc_dlog( _log, "Processed ${m} of ${n} previously applied transactions, Applied ${applied}, Failed/Dropped ${failed}", + ("m", num_processed)( "n", unapplied_trxs_size )("applied", num_applied)("failed", num_failed) ); } return !exhausted; } bool producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_point& deadline, size_t& pending_incoming_process_limit ) { - chain::controller& chain = chain_plug->chain(); - const time_point pending_block_time = chain.pending_block_time(); - auto& blacklist_by_id = _blacklisted_transactions.get(); - // scheduled transactions int num_applied = 0; int num_failed = 0; @@ -1647,6 +1668,9 @@ bool producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p bool exhausted = false; double incoming_trx_weight = 0.0; + auto& blacklist_by_id = _blacklisted_transactions.get(); + chain::controller& chain = chain_plug->chain(); + time_point pending_block_time = chain.pending_block_time(); const auto& sch_idx = chain.db().get_index(); const auto scheduled_trxs_size = sch_idx.size(); auto sch_itr = sch_idx.begin(); @@ -1681,8 +1705,7 @@ bool producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p break; } - auto e = _pending_incoming_transactions.front(); - _pending_incoming_transactions.pop_front(); + auto e = _pending_incoming_transactions.pop_front(); --pending_incoming_process_limit; incoming_trx_weight -= 1.0; process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); @@ -1725,9 +1748,11 @@ bool producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p } if( scheduled_trxs_size > 0 ) { - fc_dlog( _log, "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", + fc_dlog( _log, + "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", ( "m", num_processed )( "n", scheduled_trxs_size )( "applied", num_applied )( "failed", num_failed ) ); } + return !exhausted; } @@ -1735,17 +1760,19 @@ bool producer_plugin_impl::process_incoming_trxs( const fc::time_point& deadline { bool exhausted = false; if (!_pending_incoming_transactions.empty()) { - fc_dlog(_log, "Processing ${n} pending transactions", ("n", _pending_incoming_transactions.size())); + size_t processed = 0; + fc_dlog(_log, "Processing ${n} pending transactions", ("n", pending_incoming_process_limit)); while (pending_incoming_process_limit && _pending_incoming_transactions.size()) { - if( deadline <= fc::time_point::now() ) { + if (deadline <= fc::time_point::now()) { exhausted = true; break; } - auto e = _pending_incoming_transactions.front(); - _pending_incoming_transactions.pop_front(); + auto e = _pending_incoming_transactions.pop_front(); --pending_incoming_process_limit; process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); + ++processed; } + fc_dlog(_log, "Processed ${n} pending transactions, ${p} left", ("n", processed)("p", _pending_incoming_transactions.size())); } return !exhausted; } @@ -1809,9 +1836,9 @@ void producer_plugin_impl::schedule_production_loop() { [&chain,weak_this,cid=++_timer_corelation_id](const boost::system::error_code& ec) { auto self = weak_this.lock(); if( self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id ) { - fc_dlog( _log, "Produce block timer running at ${time}", ("time", fc::time_point::now()) ); // pending_block_state expected, but can't assert inside async_wait auto block_num = chain.is_building_block() ? chain.head_block_num() + 1 : 0; + fc_dlog( _log, "Produce block timer for ${num} running at ${time}", ("num", block_num)("time", fc::time_point::now()) ); auto res = self->maybe_produce_block(); fc_dlog( _log, "Producing Block #${num} returned: ${res}", ("num", block_num)( "res", res ) ); } @@ -1831,9 +1858,8 @@ void producer_plugin_impl::schedule_delayed_production_loop(const std::weak_ptr< for (const auto&p: _producers) { auto next_producer_block_time = calculate_next_block_time(p, current_block_time); if (next_producer_block_time) { - auto producer_wake_up_time = *next_producer_block_time - fc::microseconds(config::block_interval_us); + auto producer_wake_up_time = *next_producer_block_time; if (wake_up_time) { - // wake up with a full block interval to the deadline wake_up_time = std::min(*wake_up_time, producer_wake_up_time); } else { wake_up_time = producer_wake_up_time; @@ -1870,7 +1896,7 @@ bool producer_plugin_impl::maybe_produce_block() { fc_dlog(_log, "Aborting block due to produce_block error"); chain::controller& chain = chain_plug->chain(); - chain.abort_block(); + _unapplied_transactions.add_aborted( chain.abort_block() ); return false; } @@ -1895,9 +1921,21 @@ void producer_plugin_impl::produce_block() { chain::controller& chain = chain_plug->chain(); const auto& hbs = chain.head_block_state(); EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "pending_block_state does not exist but it should, another plugin may have corrupted it"); - auto signature_provider_itr = _signature_providers.find( chain.pending_block_signing_key() ); - EOS_ASSERT(signature_provider_itr != _signature_providers.end(), producer_priv_key_not_found, "Attempting to produce a block for which we don't have the private key"); + + const auto& auth = chain.pending_block_signing_authority(); + std::vector> relevant_providers; + + relevant_providers.reserve(_signature_providers.size()); + + producer_authority::for_each_key(auth, [&](const public_key_type& key){ + const auto& iter = _signature_providers.find(key); + if (iter != _signature_providers.end()) { + relevant_providers.emplace_back(iter->second); + } + }); + + EOS_ASSERT(relevant_providers.size() > 0, producer_priv_key_not_found, "Attempting to produce a block for which we don't have any relevant private keys"); if (_protocol_features_signaled) { _protocol_features_to_activate.clear(); // clear _protocol_features_to_activate as it is already set in pending_block @@ -1907,7 +1945,14 @@ void producer_plugin_impl::produce_block() { //idump( (fc::time_point::now() - chain.pending_block_time()) ); chain.finalize_block( [&]( const digest_type& d ) { auto debug_logger = maybe_make_debug_time_logger(); - return signature_provider_itr->second(d); + vector sigs; + sigs.reserve(relevant_providers.size()); + + // sign with all relevant public keys + for (const auto& p : relevant_providers) { + sigs.emplace_back(p.get()(d)); + } + return sigs; } ); chain.commit_block(); @@ -1915,7 +1960,7 @@ void producer_plugin_impl::produce_block() { block_state_ptr new_bs = chain.head_block_state(); ilog("Produced block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, confirmed: ${confs}]", - ("p",new_bs->header.producer)("id",fc::variant(new_bs->id).as_string().substr(0,16)) + ("p",new_bs->header.producer)("id",new_bs->id.str().substr(8,16)) ("n",new_bs->block_num)("t",new_bs->header.timestamp) ("count",new_bs->block->transactions.size())("lib",chain.last_irreversible_block_num())("confs", new_bs->header.confirmed)); diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp index 12d43d82139..3bf19eb012a 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include @@ -12,6 +8,7 @@ #include #include #include +#include namespace eosio { @@ -31,8 +28,8 @@ namespace eosio { * payload */ -inline uint64_t ship_magic(uint32_t version) { return N(ship) | version; } -inline bool is_ship(uint64_t magic) { return (magic & 0xffff'ffff'0000'0000) == N(ship); } +inline uint64_t ship_magic(uint32_t version) { return N(ship).to_uint64_t() | version; } +inline bool is_ship(uint64_t magic) { return (magic & 0xffff'ffff'0000'0000) == N(ship).to_uint64_t(); } inline uint32_t get_ship_version(uint64_t magic) { return magic; } inline bool is_ship_supported_version(uint64_t magic) { return get_ship_version(magic) == 0; } static const uint32_t ship_current_version = 0; @@ -51,8 +48,8 @@ class state_history_log { const char* const name = ""; std::string log_filename; std::string index_filename; - std::fstream log; - std::fstream index; + fc::cfile log; + fc::cfile index; uint32_t _begin_block = 0; uint32_t _end_block = 0; chain::block_id_type last_block_id; @@ -108,16 +105,16 @@ class state_history_log { if (block_num < _end_block) truncate(block_num); - log.seekg(0, std::ios_base::end); - uint64_t pos = log.tellg(); + log.seek_end(0); + uint64_t pos = log.tellp(); write_header(header); write_payload(log); - uint64_t end = log.tellg(); + uint64_t end = log.tellp(); EOS_ASSERT(end == pos + state_history_log_header_serial_size + header.payload_size, chain::plugin_exception, "wrote payload with incorrect size to ${name}.log", ("name", name)); log.write((char*)&pos, sizeof(pos)); - index.seekg(0, std::ios_base::end); + index.seek_end(0); index.write((char*)&pos, sizeof(pos)); if (_begin_block == _end_block) _begin_block = block_num; @@ -125,11 +122,11 @@ class state_history_log { last_block_id = header.block_id; } - // returns stream positioned at payload - std::fstream& get_entry(uint32_t block_num, state_history_log_header& header) { + // returns cfile positioned at payload + fc::cfile& get_entry(uint32_t block_num, state_history_log_header& header) { EOS_ASSERT(block_num >= _begin_block && block_num < _end_block, chain::plugin_exception, "read non-existing block in ${name}.log", ("name", name)); - log.seekg(get_pos(block_num)); + log.seek(get_pos(block_num)); read_header(header); return log; } @@ -144,13 +141,13 @@ class state_history_log { bool get_last_block(uint64_t size) { state_history_log_header header; uint64_t suffix; - log.seekg(size - sizeof(suffix)); + log.seek(size - sizeof(suffix)); log.read((char*)&suffix, sizeof(suffix)); if (suffix > size || suffix + state_history_log_header_serial_size > size) { elog("corrupt ${name}.log (2)", ("name", name)); return false; } - log.seekg(suffix); + log.seek(suffix); read_header(header, false); if (!is_ship(header.magic) || !is_ship_supported_version(header.magic) || suffix + state_history_log_header_serial_size + header.payload_size + sizeof(suffix) != size) { @@ -174,7 +171,7 @@ class state_history_log { state_history_log_header header; if (pos + state_history_log_header_serial_size > size) break; - log.seekg(pos); + log.seek(pos); read_header(header, false); uint64_t suffix; if (!is_ship(header.magic) || !is_ship_supported_version(header.magic) || header.payload_size > size || @@ -183,7 +180,7 @@ class state_history_log { "${name}.log has an unsupported version", ("name", name)); break; } - log.seekg(pos + state_history_log_header_serial_size + header.payload_size); + log.seek(pos + state_history_log_header_serial_size + header.payload_size); log.read((char*)&suffix, sizeof(suffix)); if (suffix != pos) break; @@ -195,17 +192,18 @@ class state_history_log { } log.flush(); boost::filesystem::resize_file(log_filename, pos); - log.sync(); + log.flush(); EOS_ASSERT(get_last_block(pos), chain::plugin_exception, "recover ${name}.log failed", ("name", name)); } void open_log() { - log.open(log_filename, std::ios_base::binary | std::ios_base::in | std::ios_base::out | std::ios_base::app); - log.seekg(0, std::ios_base::end); - uint64_t size = log.tellg(); + log.set_file_path( log_filename ); + log.open( "a+b" ); // std::ios_base::binary | std::ios_base::in | std::ios_base::out | std::ios_base::app + log.seek_end(0); + uint64_t size = log.tellp(); if (size >= state_history_log_header_serial_size) { state_history_log_header header; - log.seekg(0); + log.seek(0); read_header(header, false); EOS_ASSERT(is_ship(header.magic) && is_ship_supported_version(header.magic) && state_history_log_header_serial_size + header.payload_size + sizeof(uint64_t) <= size, @@ -222,30 +220,31 @@ class state_history_log { } void open_index() { - index.open(index_filename, std::ios_base::binary | std::ios_base::in | std::ios_base::out | std::ios_base::app); - index.seekg(0, std::ios_base::end); - if (index.tellg() == (static_cast(_end_block) - _begin_block) * sizeof(uint64_t)) + index.set_file_path( index_filename ); + index.open( "a+b" ); // std::ios_base::binary | std::ios_base::in | std::ios_base::out | std::ios_base::app + index.seek_end(0); + if (index.tellp() == (static_cast(_end_block) - _begin_block) * sizeof(uint64_t)) return; ilog("Regenerate ${name}.index", ("name", name)); index.close(); - index.open(index_filename, std::ios_base::binary | std::ios_base::in | std::ios_base::out | std::ios_base::trunc); + index.open( "w+b" ); // std::ios_base::binary | std::ios_base::in | std::ios_base::out | std::ios_base::trunc - log.seekg(0, std::ios_base::end); - uint64_t size = log.tellg(); + log.seek_end(0); + uint64_t size = log.tellp(); uint64_t pos = 0; uint32_t num_found = 0; while (pos < size) { state_history_log_header header; EOS_ASSERT(pos + state_history_log_header_serial_size <= size, chain::plugin_exception, "corrupt ${name}.log (6)", ("name", name)); - log.seekg(pos); + log.seek(pos); read_header(header, false); uint64_t suffix_pos = pos + state_history_log_header_serial_size + header.payload_size; uint64_t suffix; EOS_ASSERT(is_ship(header.magic) && is_ship_supported_version(header.magic) && suffix_pos + sizeof(suffix) <= size, chain::plugin_exception, "corrupt ${name}.log (7)", ("name", name)); - log.seekg(suffix_pos); + log.seek(suffix_pos); log.read((char*)&suffix, sizeof(suffix)); // ilog("block ${b} at ${pos}-${end} suffix=${suffix} file_size=${fs}", // ("b", header.block_num)("pos", pos)("end", suffix_pos + sizeof(suffix))("suffix", suffix)("fs", size)); @@ -262,7 +261,7 @@ class state_history_log { uint64_t get_pos(uint32_t block_num) { uint64_t pos; - index.seekg((block_num - _begin_block) * sizeof(pos)); + index.seek((block_num - _begin_block) * sizeof(pos)); index.read((char*)&pos, sizeof(pos)); return pos; } @@ -273,22 +272,22 @@ class state_history_log { uint64_t num_removed = 0; if (block_num <= _begin_block) { num_removed = _end_block - _begin_block; - log.seekg(0); - index.seekg(0); + log.seek(0); + index.seek(0); boost::filesystem::resize_file(log_filename, 0); boost::filesystem::resize_file(index_filename, 0); _begin_block = _end_block = 0; } else { num_removed = _end_block - block_num; uint64_t pos = get_pos(block_num); - log.seekg(0); - index.seekg(0); + log.seek(0); + index.seek(0); boost::filesystem::resize_file(log_filename, pos); boost::filesystem::resize_file(index_filename, (block_num - _begin_block) * sizeof(uint64_t)); _end_block = block_num; } - log.sync(); - index.sync(); + log.flush(); + index.flush(); ilog("fork or replay: removed ${n} blocks from ${name}.log", ("n", num_removed)("name", name)); } }; // state_history_log diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp index a682b205e00..66f8de30b53 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp index 6685eadadd5..3f42d0792e9 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include @@ -133,7 +129,7 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { fc::raw::pack(ds, fc::unsigned_int(0)); - fc::raw::pack(ds, as_type(obj.obj.name.value)); + fc::raw::pack(ds, as_type(obj.obj.name.to_uint64_t())); fc::raw::pack(ds, as_type(obj.obj.creation_date)); fc::raw::pack(ds, as_type(obj.obj.abi)); return ds; @@ -143,7 +139,7 @@ template datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { fc::raw::pack(ds, fc::unsigned_int(0)); - fc::raw::pack(ds, as_type(obj.obj.name.value)); + fc::raw::pack(ds, as_type(obj.obj.name.to_uint64_t())); fc::raw::pack(ds, as_type(obj.obj.is_privileged())); fc::raw::pack(ds, as_type(obj.obj.last_code_update)); bool has_code = obj.obj.code_hash != eosio::chain::digest_type(); @@ -169,10 +165,10 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { fc::raw::pack(ds, fc::unsigned_int(0)); - fc::raw::pack(ds, as_type(obj.obj.code.value)); - fc::raw::pack(ds, as_type(obj.obj.scope.value)); - fc::raw::pack(ds, as_type(obj.obj.table.value)); - fc::raw::pack(ds, as_type(obj.obj.payer.value)); + fc::raw::pack(ds, as_type(obj.obj.code.to_uint64_t())); + fc::raw::pack(ds, as_type(obj.obj.scope.to_uint64_t())); + fc::raw::pack(ds, as_type(obj.obj.table.to_uint64_t())); + fc::raw::pack(ds, as_type(obj.obj.payer.to_uint64_t())); return ds; } @@ -181,11 +177,11 @@ datastream& operator<<(datastream& ds, const history_context_wrapper& obj) { fc::raw::pack(ds, fc::unsigned_int(0)); - fc::raw::pack(ds, as_type(obj.context.code.value)); - fc::raw::pack(ds, as_type(obj.context.scope.value)); - fc::raw::pack(ds, as_type(obj.context.table.value)); + fc::raw::pack(ds, as_type(obj.context.code.to_uint64_t())); + fc::raw::pack(ds, as_type(obj.context.scope.to_uint64_t())); + fc::raw::pack(ds, as_type(obj.context.table.to_uint64_t())); fc::raw::pack(ds, as_type(obj.obj.primary_key)); - fc::raw::pack(ds, as_type(obj.obj.payer.value)); + fc::raw::pack(ds, as_type(obj.obj.payer.to_uint64_t())); fc::raw::pack(ds, as_type(obj.obj.value)); return ds; } @@ -224,11 +220,11 @@ template datastream& serialize_secondary_index(datastream& ds, const eosio::chain::table_id_object& context, const T& obj) { fc::raw::pack(ds, fc::unsigned_int(0)); - fc::raw::pack(ds, as_type(context.code.value)); - fc::raw::pack(ds, as_type(context.scope.value)); - fc::raw::pack(ds, as_type(context.table.value)); + fc::raw::pack(ds, as_type(context.code.to_uint64_t())); + fc::raw::pack(ds, as_type(context.scope.to_uint64_t())); + fc::raw::pack(ds, as_type(context.table.to_uint64_t())); fc::raw::pack(ds, as_type(obj.primary_key)); - fc::raw::pack(ds, as_type(obj.payer.value)); + fc::raw::pack(ds, as_type(obj.payer.to_uint64_t())); serialize_secondary_index_data(ds, obj.secondary_key); return ds; } @@ -269,18 +265,27 @@ datastream& operator<<( } template -datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { - fc::raw::pack(ds, as_type(obj.obj.producer_name.value)); - fc::raw::pack(ds, as_type(obj.obj.block_signing_key)); +datastream& operator<<(datastream& ds, + const history_serial_wrapper& obj) { + fc::raw::pack(ds, as_type(obj.obj.threshold)); + history_serialize_container(ds, obj.db, + as_type>(obj.obj.keys)); + +} + +template +datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { + fc::raw::pack(ds, as_type(obj.obj.producer_name.to_uint64_t())); + fc::raw::pack(ds, as_type(obj.obj.authority)); return ds; } template datastream& operator<<(datastream& ds, - const history_serial_wrapper& obj) { + const history_serial_wrapper& obj) { fc::raw::pack(ds, as_type(obj.obj.version)); history_serialize_container(ds, obj.db, - as_type>(obj.obj.producers)); + as_type>(obj.obj.producers)); return ds; } @@ -310,11 +315,12 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { - fc::raw::pack(ds, fc::unsigned_int(0)); + fc::raw::pack(ds, fc::unsigned_int(1)); fc::raw::pack(ds, as_type>(obj.obj.proposed_schedule_block_num)); fc::raw::pack(ds, make_history_serial_wrapper( - obj.db, as_type(obj.obj.proposed_schedule))); + obj.db, as_type(obj.obj.proposed_schedule))); fc::raw::pack(ds, make_history_serial_wrapper(obj.db, as_type(obj.obj.configuration))); + fc::raw::pack(ds, as_type(obj.obj.chain_id)); return ds; } @@ -323,9 +329,9 @@ template datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { fc::raw::pack(ds, fc::unsigned_int(0)); - fc::raw::pack(ds, as_type(obj.obj.sender.value)); + fc::raw::pack(ds, as_type(obj.obj.sender.to_uint64_t())); fc::raw::pack(ds, as_type<__uint128_t>(obj.obj.sender_id)); - fc::raw::pack(ds, as_type(obj.obj.payer.value)); + fc::raw::pack(ds, as_type(obj.obj.payer.to_uint64_t())); fc::raw::pack(ds, as_type(obj.obj.trx_id)); fc::raw::pack(ds, as_type(obj.obj.packed_trx)); return ds; @@ -349,16 +355,16 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper -datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { - fc::raw::pack(ds, as_type(obj.obj.key)); +datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { + fc::raw::pack(ds, as_type(obj.obj.key)); fc::raw::pack(ds, as_type(obj.obj.weight)); return ds; } template datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { - fc::raw::pack(ds, as_type(obj.obj.actor.value)); - fc::raw::pack(ds, as_type(obj.obj.permission.value)); + fc::raw::pack(ds, as_type(obj.obj.actor.to_uint64_t())); + fc::raw::pack(ds, as_type(obj.obj.permission.to_uint64_t())); return ds; } @@ -389,8 +395,8 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { fc::raw::pack(ds, fc::unsigned_int(0)); - fc::raw::pack(ds, as_type(obj.obj.owner.value)); - fc::raw::pack(ds, as_type(obj.obj.name.value)); + fc::raw::pack(ds, as_type(obj.obj.owner.to_uint64_t())); + fc::raw::pack(ds, as_type(obj.obj.name.to_uint64_t())); if (obj.obj.parent._id) { auto& index = obj.db.get_index(); const auto* parent = index.find(obj.obj.parent); @@ -401,7 +407,7 @@ datastream& operator<<(datastream& ds, const history_serial_wrappersecond; } - fc::raw::pack(ds, as_type(parent->name.value)); + fc::raw::pack(ds, as_type(parent->name.to_uint64_t())); } else { fc::raw::pack(ds, as_type(0)); } @@ -414,10 +420,10 @@ template datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { fc::raw::pack(ds, fc::unsigned_int(0)); - fc::raw::pack(ds, as_type(obj.obj.account.value)); - fc::raw::pack(ds, as_type(obj.obj.code.value)); - fc::raw::pack(ds, as_type(obj.obj.message_type.value)); - fc::raw::pack(ds, as_type(obj.obj.required_permission.value)); + fc::raw::pack(ds, as_type(obj.obj.account.to_uint64_t())); + fc::raw::pack(ds, as_type(obj.obj.code.to_uint64_t())); + fc::raw::pack(ds, as_type(obj.obj.message_type.to_uint64_t())); + fc::raw::pack(ds, as_type(obj.obj.required_permission.to_uint64_t())); return ds; } @@ -427,7 +433,7 @@ datastream& operator<<(datastream& EOS_ASSERT(!obj.obj.pending, eosio::chain::plugin_exception, "accepted_block sent while resource_limits_object in pending state"); fc::raw::pack(ds, fc::unsigned_int(0)); - fc::raw::pack(ds, as_type(obj.obj.owner.value)); + fc::raw::pack(ds, as_type(obj.obj.owner.to_uint64_t())); fc::raw::pack(ds, as_type(obj.obj.net_weight)); fc::raw::pack(ds, as_type(obj.obj.cpu_weight)); fc::raw::pack(ds, as_type(obj.obj.ram_bytes)); @@ -448,7 +454,7 @@ template datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { fc::raw::pack(ds, fc::unsigned_int(0)); - fc::raw::pack(ds, as_type(obj.obj.owner.value)); + fc::raw::pack(ds, as_type(obj.obj.owner.to_uint64_t())); fc::raw::pack(ds, make_history_serial_wrapper( obj.db, as_type(obj.obj.net_usage))); fc::raw::pack(ds, make_history_serial_wrapper( @@ -516,8 +522,8 @@ operator<<(datastream& template datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { - fc::raw::pack(ds, as_type(obj.obj.account.value)); - fc::raw::pack(ds, as_type(obj.obj.name.value)); + fc::raw::pack(ds, as_type(obj.obj.account.to_uint64_t())); + fc::raw::pack(ds, as_type(obj.obj.name.to_uint64_t())); history_serialize_container(ds, obj.db, as_type>(obj.obj.authorization)); fc::raw::pack(ds, as_type(obj.obj.data)); return ds; @@ -526,7 +532,7 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { fc::raw::pack(ds, fc::unsigned_int(0)); - fc::raw::pack(ds, as_type(obj.obj.receiver.value)); + fc::raw::pack(ds, as_type(obj.obj.receiver.to_uint64_t())); fc::raw::pack(ds, as_type(obj.obj.act_digest)); fc::raw::pack(ds, as_type(obj.obj.global_sequence)); fc::raw::pack(ds, as_type(obj.obj.recv_sequence)); @@ -538,7 +544,7 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { - fc::raw::pack(ds, as_type(obj.obj.account.value)); + fc::raw::pack(ds, as_type(obj.obj.account.to_uint64_t())); fc::raw::pack(ds, as_type(obj.obj.delta)); return ds; } @@ -569,7 +575,7 @@ datastream& operator<<(datastream& ds, const history_context_wrapper(*obj.obj.receipt))); } - fc::raw::pack(ds, as_type(obj.obj.receiver.value)); + fc::raw::pack(ds, as_type(obj.obj.receiver.to_uint64_t())); fc::raw::pack(ds, make_history_serial_wrapper(obj.db, as_type(obj.obj.act))); fc::raw::pack(ds, as_type(obj.obj.context_free)); fc::raw::pack(ds, as_type(debug_mode ? obj.obj.elapsed.count() : 0)); diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 97984b38af9..6c5b8bc514a 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -1,8 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ - #include #include #include @@ -99,7 +94,7 @@ bool include_delta(const eosio::chain::resource_limits::resource_limits_state_ob bool include_delta(const eosio::chain::account_metadata_object& old, const eosio::chain::account_metadata_object& curr) { return // - old.name.value != curr.name.value || // + old.name != curr.name || // old.is_privileged() != curr.is_privileged() || // old.last_code_update != curr.last_code_update || // old.vm_type != curr.vm_type || // diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index 9d5324b0bbf..766c747d3d1 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -311,6 +311,24 @@ extern const char* const state_history_plugin_abi = R"({ { "type": "producer_key[]", "name": "producers" } ] }, + { + "name": "block_signing_authority_v0", "fields": [ + { "type": "uint32", "name": "threshold" }, + { "type": "key_weight[]", "name": "keys" } + ] + }, + { + "name": "producer_authority", "fields": [ + { "type": "name", "name": "producer_name" }, + { "type": "block_signing_authority", "name": "authority" } + ] + }, + { + "name": "producer_authority_schedule", "fields": [ + { "type": "uint32", "name": "version" }, + { "type": "producer_authority[]", "name": "producers" } + ] + }, { "name": "chain_config_v0", "fields": [ { "type": "uint64", "name": "max_block_net_usage" }, @@ -339,6 +357,14 @@ extern const char* const state_history_plugin_abi = R"({ { "type": "chain_config", "name": "configuration" } ] }, + { + "name": "global_property_v1", "fields": [ + { "type": "uint32?", "name": "proposed_schedule_block_num" }, + { "type": "producer_authority_schedule", "name": "proposed_schedule" }, + { "type": "chain_config", "name": "configuration" }, + { "type": "checksum256", "name": "chain_id" } + ] + }, { "name": "generated_transaction_v0", "fields": [ { "type": "name", "name": "sender" }, @@ -492,7 +518,7 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "contract_index_double", "types": ["contract_index_double_v0"] }, { "name": "contract_index_long_double", "types": ["contract_index_long_double_v0"] }, { "name": "chain_config", "types": ["chain_config_v0"] }, - { "name": "global_property", "types": ["global_property_v0"] }, + { "name": "global_property", "types": ["global_property_v0", "global_property_v1"] }, { "name": "generated_transaction", "types": ["generated_transaction_v0"] }, { "name": "activated_protocol_feature", "types": ["activated_protocol_feature_v0"] }, { "name": "protocol_state", "types": ["protocol_state_v0"] }, @@ -504,7 +530,8 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "resource_limits_state", "types": ["resource_limits_state_v0"] }, { "name": "resource_limits_ratio", "types": ["resource_limits_ratio_v0"] }, { "name": "elastic_limit_parameters", "types": ["elastic_limit_parameters_v0"] }, - { "name": "resource_limits_config", "types": ["resource_limits_config_v0"] } + { "name": "resource_limits_config", "types": ["resource_limits_config_v0"] }, + { "name": "block_signing_authority", "types": ["block_signing_authority_v0"] } ], "tables": [ { "name": "account", "type": "account", "key_names": ["name"] }, diff --git a/plugins/template_plugin/include/eosio/template_plugin/template_plugin.hpp b/plugins/template_plugin/include/eosio/template_plugin/template_plugin.hpp index 3217b3b1640..a0acd5745f4 100644 --- a/plugins/template_plugin/include/eosio/template_plugin/template_plugin.hpp +++ b/plugins/template_plugin/include/eosio/template_plugin/template_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/plugins/template_plugin/template_plugin.cpp b/plugins/template_plugin/template_plugin.cpp index d8a738c30bc..13085f3beb2 100644 --- a/plugins/template_plugin/template_plugin.cpp +++ b/plugins/template_plugin/template_plugin.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include namespace eosio { diff --git a/plugins/test_control_api_plugin/include/eosio/test_control_api_plugin/test_control_api_plugin.hpp b/plugins/test_control_api_plugin/include/eosio/test_control_api_plugin/test_control_api_plugin.hpp index b9c165b8993..df536f4336e 100644 --- a/plugins/test_control_api_plugin/include/eosio/test_control_api_plugin/test_control_api_plugin.hpp +++ b/plugins/test_control_api_plugin/include/eosio/test_control_api_plugin/test_control_api_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include diff --git a/plugins/test_control_api_plugin/test_control_api_plugin.cpp b/plugins/test_control_api_plugin/test_control_api_plugin.cpp index a932a27cad9..0bac41fa1c0 100644 --- a/plugins/test_control_api_plugin/test_control_api_plugin.cpp +++ b/plugins/test_control_api_plugin/test_control_api_plugin.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include diff --git a/plugins/test_control_plugin/include/eosio/test_control_plugin/test_control_plugin.hpp b/plugins/test_control_plugin/include/eosio/test_control_plugin/test_control_plugin.hpp index 96badb10c83..df28318fce2 100644 --- a/plugins/test_control_plugin/include/eosio/test_control_plugin/test_control_plugin.hpp +++ b/plugins/test_control_plugin/include/eosio/test_control_plugin/test_control_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include diff --git a/plugins/test_control_plugin/test_control_plugin.cpp b/plugins/test_control_plugin/test_control_plugin.cpp index dbde59853ef..3ec82e722b9 100644 --- a/plugins/test_control_plugin/test_control_plugin.cpp +++ b/plugins/test_control_plugin/test_control_plugin.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include @@ -64,7 +60,8 @@ void test_control_plugin_impl::accepted_block(const chain::block_state_ptr& bsp) void test_control_plugin_impl::process_next_block_state(const chain::block_state_ptr& bsp) { const auto block_time = _chain.head_block_time() + fc::microseconds(chain::config::block_interval_us); - const auto producer_name = bsp->get_scheduled_producer(block_time).producer_name; + const auto& producer_authority = bsp->get_scheduled_producer(block_time); + const auto producer_name = producer_authority.producer_name; if (_producer != account_name()) ilog("producer ${cprod}, looking for ${lprod}", ("cprod", producer_name.to_string())("lprod", _producer.to_string())); diff --git a/plugins/txn_test_gen_plugin/include/eosio/txn_test_gen_plugin/txn_test_gen_plugin.hpp b/plugins/txn_test_gen_plugin/include/eosio/txn_test_gen_plugin/txn_test_gen_plugin.hpp index ebfe3bfb77c..8afee7e0907 100644 --- a/plugins/txn_test_gen_plugin/include/eosio/txn_test_gen_plugin/txn_test_gen_plugin.hpp +++ b/plugins/txn_test_gen_plugin/include/eosio/txn_test_gen_plugin/txn_test_gen_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp old mode 100755 new mode 100644 index 670114ea85c..6e5b8b32d9a --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -1,10 +1,7 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include +#include #include #include @@ -41,7 +38,6 @@ namespace eosio { static appbase::abstract_plugin& _txn_test_gen_plugin = app().register_plugin(); using namespace eosio::chain; -using io_work_t = boost::asio::executor_work_guard; #define CALL(api_name, api_handle, call_name, INVOKE, http_response_code) \ {std::string("/v1/" #api_name "/" #call_name), \ @@ -100,10 +96,8 @@ struct txn_test_gen_plugin_impl { uint64_t _total_us = 0; uint64_t _txcount = 0; - std::shared_ptr gen_ioc; - optional gen_ioc_work; uint16_t thread_pool_size; - optional thread_pool; + fc::optional thread_pool; std::shared_ptr timer; name newaccountA; name newaccountB; @@ -113,7 +107,7 @@ struct txn_test_gen_plugin_impl { chain_plugin& cp = app().get_plugin(); for (size_t i = 0; i < trxs->size(); ++i) { - cp.accept_transaction( packed_transaction(trxs->at(i)), [=](const fc::static_variant& result){ + cp.accept_transaction( std::make_shared(trxs->at(i)), [=](const fc::static_variant& result){ if (result.contains()) { next(result.get()); } else { @@ -166,21 +160,21 @@ struct txn_test_gen_plugin_impl { auto owner_auth = eosio::chain::authority{1, {{txn_text_receiver_A_pub_key, 1}}, {}}; auto active_auth = eosio::chain::authority{1, {{txn_text_receiver_A_pub_key, 1}}, {}}; - trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountA, owner_auth, active_auth}); + trx.actions.emplace_back(vector{{creator,name("active")}}, newaccount{creator, newaccountA, owner_auth, active_auth}); } //create "B" account { auto owner_auth = eosio::chain::authority{1, {{txn_text_receiver_B_pub_key, 1}}, {}}; auto active_auth = eosio::chain::authority{1, {{txn_text_receiver_B_pub_key, 1}}, {}}; - trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountB, owner_auth, active_auth}); + trx.actions.emplace_back(vector{{creator,name("active")}}, newaccount{creator, newaccountB, owner_auth, active_auth}); } //create "T" account { auto owner_auth = eosio::chain::authority{1, {{txn_text_receiver_C_pub_key, 1}}, {}}; auto active_auth = eosio::chain::authority{1, {{txn_text_receiver_C_pub_key, 1}}, {}}; - trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountT, owner_auth, active_auth}); + trx.actions.emplace_back(vector{{creator,name("active")}}, newaccount{creator, newaccountT, owner_auth, active_auth}); } trx.expiration = cc.head_block_time() + fc::seconds(180); @@ -199,13 +193,13 @@ struct txn_test_gen_plugin_impl { handler.account = newaccountT; handler.code.assign(wasm.begin(), wasm.end()); - trx.actions.emplace_back( vector{{newaccountT,"active"}}, handler); + trx.actions.emplace_back( vector{{newaccountT,name("active")}}, handler); { setabi handler; handler.account = newaccountT; handler.abi = fc::raw::pack(json::from_string(contracts::eosio_token_abi().data()).as()); - trx.actions.emplace_back( vector{{newaccountT,"active"}}, handler); + trx.actions.emplace_back( vector{{newaccountT,name("active")}}, handler); } { @@ -305,17 +299,13 @@ struct txn_test_gen_plugin_impl { batch = batch_size/2; nonce_prefix = 0; - gen_ioc = std::make_shared(); - gen_ioc_work.emplace( boost::asio::make_work_guard(*gen_ioc) ); - thread_pool.emplace( thread_pool_size ); - for( uint16_t i = 0; i < thread_pool_size; i++ ) - boost::asio::post( *thread_pool, [ioc = gen_ioc]() { ioc->run(); } ); - timer = std::make_shared(*gen_ioc); + thread_pool.emplace( "txntest", thread_pool_size ); + timer = std::make_shared(thread_pool->get_executor()); ilog("Started transaction test plugin; generating ${p} transactions every ${m} ms by ${t} load generation threads", ("p", batch_size) ("m", period) ("t", thread_pool_size)); - boost::asio::post( *gen_ioc, [this]() { + boost::asio::post( thread_pool->get_executor(), [this]() { arm_timer(boost::asio::high_resolution_timer::clock_type::now()); }); return "success"; @@ -323,7 +313,7 @@ struct txn_test_gen_plugin_impl { void arm_timer(boost::asio::high_resolution_timer::time_point s) { timer->expires_at(s + std::chrono::milliseconds(timer_timeout)); - boost::asio::post( *gen_ioc, [this]() { + boost::asio::post( thread_pool->get_executor(), [this]() { send_transaction([this](const fc::exception_ptr& e){ if (e) { elog("pushing transaction failed: ${e}", ("e", e->to_detail_string())); @@ -368,7 +358,7 @@ struct txn_test_gen_plugin_impl { { signed_transaction trx; trx.actions.push_back(act_a_to_b); - trx.context_free_actions.emplace_back(action({}, config::null_account_name, "nonce", fc::raw::pack( std::to_string(nonce_prefix)+std::to_string(nonce++) ))); + trx.context_free_actions.emplace_back(action({}, config::null_account_name, name("nonce"), fc::raw::pack( std::to_string(nonce_prefix)+std::to_string(nonce++) ))); trx.set_reference_block(reference_block_id); trx.expiration = cc.head_block_time() + fc::seconds(30); trx.max_net_usage_words = 100; @@ -379,7 +369,7 @@ struct txn_test_gen_plugin_impl { { signed_transaction trx; trx.actions.push_back(act_b_to_a); - trx.context_free_actions.emplace_back(action({}, config::null_account_name, "nonce", fc::raw::pack( std::to_string(nonce_prefix)+std::to_string(nonce++) ))); + trx.context_free_actions.emplace_back(action({}, config::null_account_name, name("nonce"), fc::raw::pack( std::to_string(nonce_prefix)+std::to_string(nonce++) ))); trx.set_reference_block(reference_block_id); trx.expiration = cc.head_block_time() + fc::seconds(30); trx.max_net_usage_words = 100; @@ -391,7 +381,6 @@ struct txn_test_gen_plugin_impl { next(e.dynamic_copy_exception()); } - ilog("send ${c} transactions", ("c",trxs.size())); push_transactions(std::move(trxs), next); } @@ -400,14 +389,9 @@ struct txn_test_gen_plugin_impl { throw fc::exception(fc::invalid_operation_exception_code); timer->cancel(); running = false; - if( gen_ioc_work ) - gen_ioc_work->reset(); - if( gen_ioc ) - gen_ioc->stop(); - if( thread_pool ) { - thread_pool->join(); + if( thread_pool ) thread_pool->stop(); - } + ilog("Stopping transaction generation test"); if (_txcount) { @@ -445,9 +429,9 @@ void txn_test_gen_plugin::plugin_initialize(const variables_map& options) { my->txn_reference_block_lag = options.at( "txn-reference-block-lag" ).as(); my->thread_pool_size = options.at( "txn-test-gen-threads" ).as(); const std::string thread_pool_account_prefix = options.at( "txn-test-gen-account-prefix" ).as(); - my->newaccountA = thread_pool_account_prefix + "a"; - my->newaccountB = thread_pool_account_prefix + "b"; - my->newaccountT = thread_pool_account_prefix + "t"; + my->newaccountA = eosio::chain::name(thread_pool_account_prefix + "a"); + my->newaccountB = eosio::chain::name(thread_pool_account_prefix + "b"); + my->newaccountT = eosio::chain::name(thread_pool_account_prefix + "t"); EOS_ASSERT( my->thread_pool_size > 0, chain::plugin_config_exception, "txn-test-gen-threads ${num} must be greater than 0", ("num", my->thread_pool_size) ); } FC_LOG_AND_RETHROW() diff --git a/plugins/wallet_api_plugin/include/eosio/wallet_api_plugin/wallet_api_plugin.hpp b/plugins/wallet_api_plugin/include/eosio/wallet_api_plugin/wallet_api_plugin.hpp index 611eed638d9..e3ba9dbe1b1 100644 --- a/plugins/wallet_api_plugin/include/eosio/wallet_api_plugin/wallet_api_plugin.hpp +++ b/plugins/wallet_api_plugin/include/eosio/wallet_api_plugin/wallet_api_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/plugins/wallet_api_plugin/wallet_api_plugin.cpp b/plugins/wallet_api_plugin/wallet_api_plugin.cpp index 1750150885c..235aa515e38 100644 --- a/plugins/wallet_api_plugin/wallet_api_plugin.cpp +++ b/plugins/wallet_api_plugin/wallet_api_plugin.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include diff --git a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet.hpp b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet.hpp index 900577d082c..92f2c0e2276 100644 --- a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet.hpp +++ b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_api.hpp b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_api.hpp index 61929b04733..bdca3ee93e1 100644 --- a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_api.hpp +++ b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_api.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_manager.hpp b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_manager.hpp index e61fb7b7617..07323a27158 100644 --- a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_manager.hpp +++ b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_manager.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include diff --git a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_plugin.hpp b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_plugin.hpp index c23911fa6fb..17ddc7aea69 100644 --- a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_plugin.hpp +++ b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_plugin.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include #include diff --git a/plugins/wallet_plugin/se_wallet.cpp b/plugins/wallet_plugin/se_wallet.cpp index 2f9ccfffc06..84cda429e88 100644 --- a/plugins/wallet_plugin/se_wallet.cpp +++ b/plugins/wallet_plugin/se_wallet.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include @@ -362,6 +358,7 @@ bool se_wallet::import_key(string wif_key) { } string se_wallet::create_key(string key_type) { + EOS_ASSERT(key_type.empty() || key_type == "R1", chain::unsupported_key_type_exception, "Secure Enclave wallet only supports R1 keys"); return (string)my->create(); } diff --git a/plugins/wallet_plugin/wallet.cpp b/plugins/wallet_plugin/wallet.cpp index a40027cb0a9..6c64de75491 100644 --- a/plugins/wallet_plugin/wallet.cpp +++ b/plugins/wallet_plugin/wallet.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include diff --git a/plugins/wallet_plugin/wallet_manager.cpp b/plugins/wallet_plugin/wallet_manager.cpp index 43fa37bc61a..304fdccceb8 100644 --- a/plugins/wallet_plugin/wallet_manager.cpp +++ b/plugins/wallet_plugin/wallet_manager.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include diff --git a/plugins/wallet_plugin/wallet_plugin.cpp b/plugins/wallet_plugin/wallet_plugin.cpp index cfd435390a6..1b1d6aa602b 100644 --- a/plugins/wallet_plugin/wallet_plugin.cpp +++ b/plugins/wallet_plugin/wallet_plugin.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include diff --git a/plugins/wallet_plugin/yubihsm_wallet.cpp b/plugins/wallet_plugin/yubihsm_wallet.cpp index 0f367457fa0..cce98fc69e5 100644 --- a/plugins/wallet_plugin/yubihsm_wallet.cpp +++ b/plugins/wallet_plugin/yubihsm_wallet.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include @@ -15,8 +11,6 @@ #include #include -#include - namespace eosio { namespace wallet { using namespace fc::crypto::r1; @@ -257,11 +251,13 @@ bool yubihsm_wallet::import_key(string wif_key) { } string yubihsm_wallet::create_key(string key_type) { + EOS_ASSERT(key_type.empty() || key_type == "R1", chain::unsupported_key_type_exception, "YubiHSM wallet only supports R1 keys"); return (string)my->create(); } bool yubihsm_wallet::remove_key(string key) { FC_ASSERT(!is_locked()); + FC_THROW_EXCEPTION(chain::wallet_exception, "YubiHSM wallet does not currently support removal of keys"); return true; } diff --git a/programs/cleos/CMakeLists.txt b/programs/cleos/CMakeLists.txt index 0787c5fe937..cc7cf865680 100644 --- a/programs/cleos/CMakeLists.txt +++ b/programs/cleos/CMakeLists.txt @@ -10,23 +10,6 @@ if( GPERFTOOLS_FOUND ) list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) endif() -if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/../../.git) - find_package(Git) - if(GIT_FOUND) - execute_process( - COMMAND ${GIT_EXECUTABLE} rev-parse --short=8 HEAD - WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/../.." - OUTPUT_VARIABLE "cleos_BUILD_VERSION" - ERROR_QUIET - OUTPUT_STRIP_TRAILING_WHITESPACE) - message(STATUS "Git commit revision: ${cleos_BUILD_VERSION}") - else() - set(cleos_BUILD_VERSION 0) - endif() -else() - set(cleos_BUILD_VERSION 0) -endif() - find_package(Intl REQUIRED) set(LOCALEDIR ${CMAKE_INSTALL_PREFIX}/share/locale) @@ -36,7 +19,7 @@ configure_file(config.hpp.in config.hpp ESCAPE_QUOTES) target_include_directories(${CLI_CLIENT_EXECUTABLE_NAME} PUBLIC ${Intl_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries( ${CLI_CLIENT_EXECUTABLE_NAME} - PRIVATE appbase chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ${Intl_LIBRARIES} ) + PRIVATE appbase version chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ${Intl_LIBRARIES} ) copy_bin( ${CLI_CLIENT_EXECUTABLE_NAME} ) diff --git a/programs/cleos/config.hpp.in b/programs/cleos/config.hpp.in index d9d5f45b1de..6f424f3cb4b 100644 --- a/programs/cleos/config.hpp.in +++ b/programs/cleos/config.hpp.in @@ -1,14 +1,11 @@ -/** @file - * @copyright defined in eos/LICENSE - * +/** * \warning This file is machine generated. DO NOT EDIT. See config.hpp.in for changes. */ #pragma once namespace eosio { namespace client { namespace config { - constexpr char version_str[] = "${cleos_BUILD_VERSION}"; - constexpr char locale_path[] = "${LOCALEDIR}"; - constexpr char locale_domain[] = "${LOCALEDOMAIN}"; - constexpr char key_store_executable_name[] = "${KEY_STORE_EXECUTABLE_NAME}"; - constexpr char node_executable_name[] = "${NODE_EXECUTABLE_NAME}"; -}}} + constexpr char locale_path[] {"${LOCALEDIR}" }; + constexpr char locale_domain[] {"${LOCALEDOMAIN}" }; + constexpr char key_store_executable_name[]{"${KEY_STORE_EXECUTABLE_NAME}"}; + constexpr char node_executable_name[] {"${NODE_EXECUTABLE_NAME}" }; +} } } diff --git a/programs/cleos/help_text.cpp.in b/programs/cleos/help_text.cpp.in index b97784cef29..5b8ee07d493 100644 --- a/programs/cleos/help_text.cpp.in +++ b/programs/cleos/help_text.cpp.in @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include "help_text.hpp" #include "localize.hpp" #include @@ -144,8 +140,7 @@ const char* error_advice_authority_type_exception = R"=====(Ensure that your aut } )====="; const char* error_advice_action_type_exception = R"=====(Ensure that your action JSON follows the contract's abi!)====="; -const char* error_advice_transaction_type_exception = R"=====(Ensure that your transaction JSON follows the right transaction format! -You can refer to eosio.cdt/libraries/eosiolib/transaction.hpp for reference)====="; +const char* error_advice_transaction_type_exception = R"=====(Ensure that your transaction JSON follows the right transaction format!)====="; const char* error_advice_abi_type_exception = R"=====(Ensure that your abi JSON follows the following format! { "types" : [{ "new_type_name":"type_name", "type":"type_name" }], diff --git a/programs/cleos/help_text.hpp b/programs/cleos/help_text.hpp index dbae493d714..d632d77a40a 100644 --- a/programs/cleos/help_text.hpp +++ b/programs/cleos/help_text.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/programs/cleos/httpc.hpp b/programs/cleos/httpc.hpp index f90e58046c4..5f8e5566f22 100644 --- a/programs/cleos/httpc.hpp +++ b/programs/cleos/httpc.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include "config.hpp" @@ -84,6 +80,7 @@ namespace eosio { namespace client { namespace http { const string chain_func_base = "/v1/chain"; const string get_info_func = chain_func_base + "/get_info"; + const string send_txn_func = chain_func_base + "/send_transaction"; const string push_txn_func = chain_func_base + "/push_transaction"; const string push_txns_func = chain_func_base + "/push_transactions"; const string json_to_bin_func = chain_func_base + "/abi_json_to_bin"; @@ -103,7 +100,6 @@ namespace eosio { namespace client { namespace http { const string get_schedule_func = chain_func_base + "/get_producer_schedule"; const string get_required_keys = chain_func_base + "/get_required_keys"; - const string history_func_base = "/v1/history"; const string get_actions_func = history_func_base + "/get_actions"; const string get_transaction_func = history_func_base + "/get_transaction"; diff --git a/programs/cleos/localize.hpp b/programs/cleos/localize.hpp index ea15ea3009b..c15a12a5006 100644 --- a/programs/cleos/localize.hpp +++ b/programs/cleos/localize.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 80f226c782e..0c827426af1 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -1,11 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - * @defgroup eosclienttool EOSIO Command Line Client Reference - * @brief Tool for sending transactions and querying state from @ref nodeos - * @ingroup eosclienttool - */ - /** @defgroup eosclienttool @@ -93,6 +85,8 @@ Usage: ./cleos create account [OPTIONS] creator name OwnerKey ActiveKey #include #include +#include + #pragma push_macro("N") #undef N @@ -179,6 +173,8 @@ bool tx_dont_broadcast = false; bool tx_return_packed = false; bool tx_skip_sign = false; bool tx_print_json = false; +bool tx_use_old_rpc = false; +string tx_json_save_file; bool print_request = false; bool print_response = false; bool no_auto_keosd = false; @@ -208,9 +204,11 @@ void add_standard_transaction_options(CLI::App* cmd, string default_permission = cmd->add_flag("-f,--force-unique", tx_force_unique, localized("force the transaction to be unique. this will consume extra bandwidth and remove any protections against accidently issuing the same transaction multiple times")); cmd->add_flag("-s,--skip-sign", tx_skip_sign, localized("Specify if unlocked wallet keys should be used to sign transaction")); cmd->add_flag("-j,--json", tx_print_json, localized("print result as json")); + cmd->add_option("--json-file", tx_json_save_file, localized("save result in json format into a file")); cmd->add_flag("-d,--dont-broadcast", tx_dont_broadcast, localized("don't broadcast transaction to the network (just print to stdout)")); cmd->add_flag("--return-packed", tx_return_packed, localized("used in conjunction with --dont-broadcast to get the packed transaction")); cmd->add_option("-r,--ref-block", tx_ref_block_num_or_id, (localized("set the reference block num or block id used for TAPOS (Transaction as Proof-of-Stake)"))); + cmd->add_flag("--use-old-rpc", tx_use_old_rpc, localized("use old RPC push_transaction, rather than new RPC send_transaction")); string msg = "An account and permission level to authorize, as in 'account@permission'"; if(!default_permission.empty()) @@ -228,7 +226,7 @@ vector get_account_permissions(const vector& pe vector pieces; split(pieces, p, boost::algorithm::is_any_of("@")); if( pieces.size() == 1 ) pieces.push_back( "active" ); - return chain::permission_level{ .actor = pieces[0], .permission = pieces[1] }; + return chain::permission_level{ .actor = name(pieces[0]), .permission = name(pieces[1]) }; }); vector accountPermissions; boost::range::copy(fixedPermissions, back_inserter(accountPermissions)); @@ -276,7 +274,7 @@ string generate_nonce_string() { } chain::action generate_nonce_action() { - return chain::action( {}, config::null_account_name, "nonce", fc::raw::pack(fc::time_point::now().time_since_epoch().count())); + return chain::action( {}, config::null_account_name, name("nonce"), fc::raw::pack(fc::time_point::now().time_since_epoch().count())); } void prompt_for_wallet_password(string& pw, const string& name) { @@ -305,7 +303,7 @@ void sign_transaction(signed_transaction& trx, fc::variant& required_keys, const trx = signed_trx.as(); } -fc::variant push_transaction( signed_transaction& trx, packed_transaction::compression_type compression = packed_transaction::none ) { +fc::variant push_transaction( signed_transaction& trx, packed_transaction::compression_type compression = packed_transaction::compression_type::none ) { auto info = get_info(); if (trx.signatures.size() == 0) { // #5445 can't change txn content if already signed @@ -337,7 +335,17 @@ fc::variant push_transaction( signed_transaction& trx, packed_transaction::compr } if (!tx_dont_broadcast) { - return call(push_txn_func, packed_transaction(trx, compression)); + if (tx_use_old_rpc) { + return call(push_txn_func, packed_transaction(trx, compression)); + } else { + try { + return call(send_txn_func, packed_transaction(trx, compression)); + } + catch (chain::missing_chain_api_plugin_exception &) { + std::cerr << "New RPC send_transaction may not be supported. Add flag --use-old-rpc to use old RPC push_transaction instead." << std::endl; + throw; + } + } } else { if (!tx_return_packed) { return fc::variant(trx); @@ -347,7 +355,7 @@ fc::variant push_transaction( signed_transaction& trx, packed_transaction::compr } } -fc::variant push_actions(std::vector&& actions, packed_transaction::compression_type compression = packed_transaction::none ) { +fc::variant push_actions(std::vector&& actions, packed_transaction::compression_type compression = packed_transaction::compression_type::none ) { signed_transaction trx; trx.actions = std::forward(actions); @@ -425,18 +433,21 @@ fc::variant json_from_file_or_string(const string& file_or_str, fc::json::parse_ { regex r("^[ \t]*[\{\[]"); if ( !regex_search(file_or_str, r) && fc::is_regular_file(file_or_str) ) { - return fc::json::from_file(file_or_str, ptype); + try { + return fc::json::from_file(file_or_str, ptype); + } EOS_RETHROW_EXCEPTIONS(json_parse_exception, "Fail to parse JSON from file: ${file}", ("file", file_or_str)); + } else { - return fc::json::from_string(file_or_str, ptype); + try { + return fc::json::from_string(file_or_str, ptype); + } EOS_RETHROW_EXCEPTIONS(json_parse_exception, "Fail to parse JSON from string: ${string}", ("string", file_or_str)); } } bytes json_or_file_to_bin( const account_name& account, const action_name& action, const string& data_or_filename ) { fc::variant action_args_var; if( !data_or_filename.empty() ) { - try { - action_args_var = json_from_file_or_string(data_or_filename, fc::json::relaxed_parser); - } EOS_RETHROW_EXCEPTIONS(action_type_exception, "Fail to parse action JSON data='${data}'", ("data", data_or_filename)); + action_args_var = json_from_file_or_string(data_or_filename, fc::json::relaxed_parser); } return variant_to_bin( account, action, action_args_var ); } @@ -500,21 +511,49 @@ void print_result( const fc::variant& result ) { try { } FC_CAPTURE_AND_RETHROW( (result) ) } using std::cout; -void send_actions(std::vector&& actions, packed_transaction::compression_type compression = packed_transaction::none ) { +void send_actions(std::vector&& actions, packed_transaction::compression_type compression = packed_transaction::compression_type::none ) { + std::ofstream out; + if (tx_json_save_file.length()) { + out.open(tx_json_save_file); + EOSC_ASSERT(!out.fail(), "ERROR: Failed to create file \"${p}\"", ("p", tx_json_save_file)); + } auto result = push_actions( move(actions), compression); + string jsonstr; + if (tx_json_save_file.length()) { + jsonstr = fc::json::to_pretty_string( result ); + out << jsonstr; + out.close(); + } if( tx_print_json ) { - cout << fc::json::to_pretty_string( result ) << endl; + if (jsonstr.length() == 0) { + jsonstr = fc::json::to_pretty_string( result ); + } + cout << jsonstr << endl; } else { print_result( result ); } } -void send_transaction( signed_transaction& trx, packed_transaction::compression_type compression = packed_transaction::none ) { +void send_transaction( signed_transaction& trx, packed_transaction::compression_type compression = packed_transaction::compression_type::none ) { + std::ofstream out; + if (tx_json_save_file.length()) { + out.open(tx_json_save_file); + EOSC_ASSERT(!out.fail(), "ERROR: Failed to create file \"${p}\"", ("p", tx_json_save_file)); + } auto result = push_transaction(trx, compression); + string jsonstr; + if (tx_json_save_file.length()) { + jsonstr = fc::json::to_pretty_string( result ); + out << jsonstr; + out.close(); + } if( tx_print_json ) { - cout << fc::json::to_pretty_string( result ) << endl; + if (jsonstr.length() == 0) { + jsonstr = fc::json::to_pretty_string( result ); + } + cout << jsonstr << endl; } else { print_result( result ); } @@ -522,7 +561,7 @@ void send_transaction( signed_transaction& trx, packed_transaction::compression_ chain::permission_level to_permission_level(const std::string& s) { auto at_pos = s.find('@'); - return permission_level { s.substr(0, at_pos), s.substr(at_pos + 1) }; + return permission_level { name(s.substr(0, at_pos)), name(s.substr(at_pos + 1)) }; } chain::action create_newaccount(const name& creator, const name& newaccount, auth_type owner, auth_type active) { @@ -585,8 +624,8 @@ chain::action create_open(const string& contract, const name& owner, symbol sym, ("symbol", sym) ("ram_payer", ram_payer); return action { - get_account_permissions(tx_permission, {ram_payer,config::active_name}), - contract, "open", variant_to_bin( contract, N(open), open_ ) + get_account_permissions(tx_permission, {ram_payer, config::active_name}), + name(contract), N(open), variant_to_bin( name(contract), N(open), open_ ) }; } @@ -600,7 +639,7 @@ chain::action create_transfer(const string& contract, const name& sender, const return action { get_account_permissions(tx_permission, {sender,config::active_name}), - contract, "transfer", variant_to_bin( contract, N(transfer), transfer ) + name(contract), N(transfer), variant_to_bin( name(contract), N(transfer), transfer ) }; } @@ -647,9 +686,11 @@ chain::action create_unlinkauth(const name& account, const name& code, const nam } authority parse_json_authority(const std::string& authorityJsonOrFile) { + fc::variant authority_var = json_from_file_or_string(authorityJsonOrFile); try { - return json_from_file_or_string(authorityJsonOrFile).as(); - } EOS_RETHROW_EXCEPTIONS(authority_type_exception, "Fail to parse Authority JSON '${data}'", ("data",authorityJsonOrFile)) + return authority_var.as(); + } EOS_RETHROW_EXCEPTIONS(authority_type_exception, "Invalid authority format '${data}'", + ("data", fc::json::to_string(authority_var, fc::time_point::maximum()))) } authority parse_json_authority_or_key(const std::string& authorityJsonOrFile) { @@ -700,10 +741,10 @@ inline asset to_asset( const string& s ) { } struct set_account_permission_subcommand { - name account; - name permission; + string account; + string permission; string authority_json_or_file; - name parent; + string parent; bool add_code; bool remove_code; @@ -724,34 +765,34 @@ struct set_account_permission_subcommand { authority auth; - bool need_parent = parent.empty() && (permission != name("owner")); + bool need_parent = parent.empty() && (name(permission) != name("owner")); bool need_auth = add_code || remove_code; if ( !need_auth && boost::iequals(authority_json_or_file, "null") ) { - send_actions( { create_deleteauth(account, permission) } ); + send_actions( { create_deleteauth(name(account), name(permission)) } ); return; } if ( need_parent || need_auth ) { - fc::variant json = call(get_account_func, fc::mutable_variant_object("account_name", account.to_string())); + fc::variant json = call(get_account_func, fc::mutable_variant_object("account_name", account)); auto res = json.as(); auto itr = std::find_if(res.permissions.begin(), res.permissions.end(), [&](const auto& perm) { - return perm.perm_name == permission; + return perm.perm_name == name(permission); }); if ( need_parent ) { // see if we can auto-determine the proper parent if ( itr != res.permissions.end() ) { - parent = (*itr).parent; + parent = (*itr).parent.to_string(); } else { // if this is a new permission and there is no parent we default to "active" - parent = name(config::active_name); + parent = config::active_name.to_string(); } } if ( need_auth ) { - auto actor = (authority_json_or_file.empty()) ? account : name(authority_json_or_file); - auto code_name = name(config::eosio_code_name); + auto actor = (authority_json_or_file.empty()) ? name(account) : name(authority_json_or_file); + auto code_name = config::eosio_code_name; if ( itr != res.permissions.end() ) { // fetch existing authority @@ -794,7 +835,7 @@ struct set_account_permission_subcommand { // remove code permission, if authority becomes empty by the removal of code permission, delete permission auth.accounts.erase( itr2 ); if ( auth.keys.empty() && auth.accounts.empty() && auth.waits.empty() ) { - send_actions( { create_deleteauth(account, permission) } ); + send_actions( { create_deleteauth(name(account), name(permission)) } ); return; } } else { @@ -825,7 +866,7 @@ struct set_account_permission_subcommand { auth = parse_json_authority_or_key(authority_json_or_file); } - send_actions( { create_updateauth(account, permission, parent, auth) } ); + send_actions( { create_updateauth(name(account), name(permission), name(parent), auth) } ); }); } }; @@ -888,11 +929,13 @@ void try_local_port(uint32_t duration) { void ensure_keosd_running(CLI::App* app) { if (no_auto_keosd) return; - // get, version, net do not require keosd - if (tx_skip_sign || app->got_subcommand("get") || app->got_subcommand("version") || app->got_subcommand("net")) + // get, version, net, convert do not require keosd + if (tx_skip_sign || app->got_subcommand("get") || app->got_subcommand("version") || app->got_subcommand("net") || app->got_subcommand("convert")) return; if (app->get_subcommand("create")->got_subcommand("key")) // create key does not require wallet return; + if (app->get_subcommand("multisig")->got_subcommand("review")) // multisig review does not require wallet + return; if (auto* subapp = app->get_subcommand("system")) { if (subapp->got_subcommand("listproducers") || subapp->got_subcommand("listbw") || subapp->got_subcommand("bidnameinfo")) // system list* do not require wallet return; @@ -971,8 +1014,8 @@ struct register_producer_subcommand { producer_key = public_key_type(producer_key_str); } EOS_RETHROW_EXCEPTIONS(public_key_type_exception, "Invalid producer public key: ${public_key}", ("public_key", producer_key_str)) - auto regprod_var = regproducer_variant(producer_str, producer_key, url, loc ); - auto accountPermissions = get_account_permissions(tx_permission, {producer_str,config::active_name}); + auto regprod_var = regproducer_variant(name(producer_str), producer_key, url, loc ); + auto accountPermissions = get_account_permissions(tx_permission, {name(producer_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, N(regproducer), regprod_var)}); }); } @@ -1044,16 +1087,16 @@ struct create_account_subcommand { } EOS_RETHROW_EXCEPTIONS( public_key_type_exception, "Invalid active public key: ${public_key}", ("public_key", active_key_str) ); } - auto create = create_newaccount(creator, account_name, owner, active); + auto create = create_newaccount(name(creator), name(account_name), owner, active); if (!simple) { EOSC_ASSERT( buy_ram_eos.size() || buy_ram_bytes_in_kbytes || buy_ram_bytes, "ERROR: One of --buy-ram, --buy-ram-kbytes or --buy-ram-bytes should have non-zero value" ); EOSC_ASSERT( !buy_ram_bytes_in_kbytes || !buy_ram_bytes, "ERROR: --buy-ram-kbytes and --buy-ram-bytes cannot be set at the same time" ); - action buyram = !buy_ram_eos.empty() ? create_buyram(creator, account_name, to_asset(buy_ram_eos)) - : create_buyrambytes(creator, account_name, (buy_ram_bytes_in_kbytes) ? (buy_ram_bytes_in_kbytes * 1024) : buy_ram_bytes); + action buyram = !buy_ram_eos.empty() ? create_buyram(name(creator), name(account_name), to_asset(buy_ram_eos)) + : create_buyrambytes(name(creator), name(account_name), (buy_ram_bytes_in_kbytes) ? (buy_ram_bytes_in_kbytes * 1024) : buy_ram_bytes); auto net = to_asset(stake_net); auto cpu = to_asset(stake_cpu); if ( net.get_amount() != 0 || cpu.get_amount() != 0 ) { - action delegate = create_delegate( creator, account_name, net, cpu, transfer); + action delegate = create_delegate( name(creator), name(account_name), net, cpu, transfer); send_actions( { create, buyram, delegate } ); } else { send_actions( { create, buyram } ); @@ -1077,7 +1120,7 @@ struct unregister_producer_subcommand { fc::variant act_payload = fc::mutable_variant_object() ("producer", producer_str); - auto accountPermissions = get_account_permissions(tx_permission, {producer_str,config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(producer_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, N(unregprod), act_payload)}); }); } @@ -1098,7 +1141,7 @@ struct vote_producer_proxy_subcommand { ("voter", voter_str) ("proxy", proxy_str) ("producers", std::vector{}); - auto accountPermissions = get_account_permissions(tx_permission, {voter_str,config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(voter_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, N(voteproducer), act_payload)}); }); } @@ -1106,7 +1149,7 @@ struct vote_producer_proxy_subcommand { struct vote_producers_subcommand { string voter_str; - vector producer_names; + vector producer_names; vote_producers_subcommand(CLI::App* actionRoot) { auto vote_producers = actionRoot->add_subcommand("prods", localized("Vote for one or more producers")); @@ -1122,15 +1165,15 @@ struct vote_producers_subcommand { ("voter", voter_str) ("proxy", "") ("producers", producer_names); - auto accountPermissions = get_account_permissions(tx_permission, {voter_str,config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(voter_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, N(voteproducer), act_payload)}); }); } }; struct approve_producer_subcommand { - eosio::name voter; - eosio::name producer_name; + string voter; + string producer_name; approve_producer_subcommand(CLI::App* actionRoot) { auto approve_producer = actionRoot->add_subcommand("approve", localized("Add one producer to list of voted producers")); @@ -1144,8 +1187,8 @@ struct approve_producer_subcommand { ("scope", name(config::system_account_name).to_string()) ("table", "voters") ("table_key", "owner") - ("lower_bound", voter.value) - ("upper_bound", voter.value + 1) + ("lower_bound", name(voter).to_uint64_t()) + ("upper_bound", name(voter).to_uint64_t() + 1) // Less than ideal upper_bound usage preserved so cleos can still work with old buggy nodeos versions // Change to voter.value when cleos no longer needs to support nodeos versions older than 1.5.0 ("limit", 1) @@ -1164,7 +1207,7 @@ struct approve_producer_subcommand { for ( auto& x : prod_vars ) { prods.push_back( name(x.as_string()) ); } - prods.push_back( producer_name ); + prods.push_back( name(producer_name) ); std::sort( prods.begin(), prods.end() ); auto it = std::unique( prods.begin(), prods.end() ); if (it != prods.end() ) { @@ -1175,15 +1218,15 @@ struct approve_producer_subcommand { ("voter", voter) ("proxy", "") ("producers", prods); - auto accountPermissions = get_account_permissions(tx_permission, {voter,config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(voter), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, N(voteproducer), act_payload)}); }); } }; struct unapprove_producer_subcommand { - eosio::name voter; - eosio::name producer_name; + string voter; + string producer_name; unapprove_producer_subcommand(CLI::App* actionRoot) { auto approve_producer = actionRoot->add_subcommand("unapprove", localized("Remove one producer from list of voted producers")); @@ -1197,8 +1240,8 @@ struct unapprove_producer_subcommand { ("scope", name(config::system_account_name).to_string()) ("table", "voters") ("table_key", "owner") - ("lower_bound", voter.value) - ("upper_bound", voter.value + 1) + ("lower_bound", name(voter).to_uint64_t()) + ("upper_bound", name(voter).to_uint64_t() + 1) // Less than ideal upper_bound usage preserved so cleos can still work with old buggy nodeos versions // Change to voter.value when cleos no longer needs to support nodeos versions older than 1.5.0 ("limit", 1) @@ -1217,7 +1260,7 @@ struct unapprove_producer_subcommand { for ( auto& x : prod_vars ) { prods.push_back( name(x.as_string()) ); } - auto it = std::remove( prods.begin(), prods.end(), producer_name ); + auto it = std::remove( prods.begin(), prods.end(), name(producer_name) ); if (it == prods.end() ) { std::cerr << "Cannot remove: producer \"" << producer_name << "\" is not on the list." << std::endl; return; @@ -1227,7 +1270,7 @@ struct unapprove_producer_subcommand { ("voter", voter) ("proxy", "") ("producers", prods); - auto accountPermissions = get_account_permissions(tx_permission, {voter,config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(voter), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, N(voteproducer), act_payload)}); }); } @@ -1280,10 +1323,21 @@ struct get_schedule_subcommand { return; } printf("%s schedule version %s\n", name, schedule["version"].as_string().c_str()); - printf(" %-13s %s\n", "Producer", "Producer key"); + printf(" %-13s %s\n", "Producer", "Producer Authority"); printf(" %-13s %s\n", "=============", "=================="); - for (auto& row: schedule["producers"].get_array()) - printf(" %-13s %s\n", row["producer_name"].as_string().c_str(), row["block_signing_key"].as_string().c_str()); + for( auto& row: schedule["producers"].get_array() ) { + if( row.get_object().contains("block_signing_key") ) { + // pre 2.0 + printf( " %-13s %s\n", row["producer_name"].as_string().c_str(), row["block_signing_key"].as_string().c_str() ); + } else { + printf( " %-13s ", row["producer_name"].as_string().c_str() ); + auto a = row["authority"].as(); + static_assert( std::is_same>::value, + "Updates maybe needed if block_signing_authority changes" ); + block_signing_authority_v0 auth = a.get(); + printf( "%s\n", fc::json::to_string( auth, fc::time_point::maximum() ).c_str() ); + } + } printf("\n"); } @@ -1389,13 +1443,13 @@ struct delegate_bandwidth_subcommand { ("stake_net_quantity", to_asset(stake_net_amount)) ("stake_cpu_quantity", to_asset(stake_cpu_amount)) ("transfer", transfer); - auto accountPermissions = get_account_permissions(tx_permission, {from_str,config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); std::vector acts{create_action(accountPermissions, config::system_account_name, N(delegatebw), act_payload)}; EOSC_ASSERT( !(buy_ram_amount.size()) || !buy_ram_bytes, "ERROR: --buyram and --buy-ram-bytes cannot be set at the same time" ); if (buy_ram_amount.size()) { - acts.push_back( create_buyram(from_str, receiver_str, to_asset(buy_ram_amount)) ); + acts.push_back( create_buyram(name(from_str), name(receiver_str), to_asset(buy_ram_amount)) ); } else if (buy_ram_bytes) { - acts.push_back( create_buyrambytes(from_str, receiver_str, buy_ram_bytes) ); + acts.push_back( create_buyrambytes(name(from_str), name(receiver_str), buy_ram_bytes) ); } send_actions(std::move(acts)); }); @@ -1423,7 +1477,7 @@ struct undelegate_bandwidth_subcommand { ("receiver", receiver_str) ("unstake_net_quantity", to_asset(unstake_net_amount)) ("unstake_cpu_quantity", to_asset(unstake_cpu_amount)); - auto accountPermissions = get_account_permissions(tx_permission, {from_str,config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, N(undelegatebw), act_payload)}); }); } @@ -1444,7 +1498,7 @@ struct bidname_subcommand { ("bidder", bidder_str) ("newname", newname_str) ("bid", to_asset(bid_amount)); - auto accountPermissions = get_account_permissions(tx_permission, {bidder_str,config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(bidder_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, N(bidname), act_payload)}); }); } @@ -1452,7 +1506,7 @@ struct bidname_subcommand { struct bidname_info_subcommand { bool print_json = false; - name newname; + string newname; bidname_info_subcommand(CLI::App* actionRoot) { auto list_producers = actionRoot->add_subcommand("bidnameinfo", localized("Get bidname info")); list_producers->add_flag("--json,-j", print_json, localized("Output in JSON format")); @@ -1460,8 +1514,8 @@ struct bidname_info_subcommand { list_producers->set_callback([this] { auto rawResult = call(get_table_func, fc::mutable_variant_object("json", true) ("code", "eosio")("scope", "eosio")("table", "namebids") - ("lower_bound", newname.value) - ("upper_bound", newname.value + 1) + ("lower_bound", name(newname).to_uint64_t()) + ("upper_bound", name(newname).to_uint64_t() + 1) // Less than ideal upper_bound usage preserved so cleos can still work with old buggy nodeos versions // Change to newname.value when cleos no longer needs to support nodeos versions older than 1.5.0 ("limit", 1)); @@ -1471,7 +1525,7 @@ struct bidname_info_subcommand { } auto result = rawResult.as(); // Condition in if statement below can simply be res.rows.empty() when cleos no longer needs to support nodeos versions older than 1.5.0 - if( result.rows.empty() || result.rows[0].get_object()["newname"].as_string() != newname.to_string() ) { + if( result.rows.empty() || result.rows[0].get_object()["newname"].as_string() != name(newname).to_string() ) { std::cout << "No bidname record found" << std::endl; return; } @@ -1492,7 +1546,7 @@ struct bidname_info_subcommand { }; struct list_bw_subcommand { - eosio::name account; + string account; bool print_json = false; list_bw_subcommand(CLI::App* actionRoot) { @@ -1504,7 +1558,7 @@ struct list_bw_subcommand { //get entire table in scope of user account auto result = call(get_table_func, fc::mutable_variant_object("json", true) ("code", name(config::system_account_name).to_string()) - ("scope", account.to_string()) + ("scope", name(account).to_string()) ("table", "delband") ); if (!print_json) { @@ -1546,9 +1600,9 @@ struct buyram_subcommand { buyram->set_callback([this] { EOSC_ASSERT( !kbytes || !bytes, "ERROR: --kbytes and --bytes cannot be set at the same time" ); if (kbytes || bytes) { - send_actions( { create_buyrambytes(from_str, receiver_str, fc::to_uint64(amount) * ((kbytes) ? 1024ull : 1ull)) } ); + send_actions( { create_buyrambytes(name(from_str), name(receiver_str), fc::to_uint64(amount) * ((kbytes) ? 1024ull : 1ull)) } ); } else { - send_actions( { create_buyram(from_str, receiver_str, to_asset(amount)) } ); + send_actions( { create_buyram(name(from_str), name(receiver_str), to_asset(amount)) } ); } }); } @@ -1569,7 +1623,7 @@ struct sellram_subcommand { fc::variant act_payload = fc::mutable_variant_object() ("account", receiver_str) ("bytes", amount); - auto accountPermissions = get_account_permissions(tx_permission, {receiver_str,config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(receiver_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, N(sellram), act_payload)}); }); } @@ -1586,7 +1640,7 @@ struct claimrewards_subcommand { claim_rewards->set_callback([this] { fc::variant act_payload = fc::mutable_variant_object() ("owner", owner); - auto accountPermissions = get_account_permissions(tx_permission, {owner,config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(owner), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, N(claimrewards), act_payload)}); }); } @@ -1604,7 +1658,7 @@ struct regproxy_subcommand { fc::variant act_payload = fc::mutable_variant_object() ("proxy", proxy) ("isproxy", true); - auto accountPermissions = get_account_permissions(tx_permission, {proxy,config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(proxy), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, N(regproxy), act_payload)}); }); } @@ -1622,7 +1676,7 @@ struct unregproxy_subcommand { fc::variant act_payload = fc::mutable_variant_object() ("proxy", proxy) ("isproxy", false); - auto accountPermissions = get_account_permissions(tx_permission, {proxy,config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(proxy), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, N(regproxy), act_payload)}); }); } @@ -1641,7 +1695,7 @@ struct canceldelay_subcommand { add_standard_transaction_options(cancel_delay, "canceling_account@canceling_permission"); cancel_delay->set_callback([this] { - auto canceling_auth = permission_level{canceling_account, canceling_permission}; + auto canceling_auth = permission_level{name(canceling_account), name(canceling_permission)}; fc::variant act_payload = fc::mutable_variant_object() ("canceling_auth", canceling_auth) ("trx_id", trx_id); @@ -1665,7 +1719,7 @@ struct deposit_subcommand { fc::variant act_payload = fc::mutable_variant_object() ("owner", owner_str) ("amount", amount_str); - auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -1685,7 +1739,7 @@ struct withdraw_subcommand { fc::variant act_payload = fc::mutable_variant_object() ("owner", owner_str) ("amount", amount_str); - auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -1705,7 +1759,7 @@ struct buyrex_subcommand { fc::variant act_payload = fc::mutable_variant_object() ("from", from_str) ("amount", amount_str); - auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -1729,7 +1783,7 @@ struct lendrex_subcommand { fc::variant act_payload2 = fc::mutable_variant_object() ("from", from_str) ("amount", amount_str); - auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name1, act_payload1), create_action(accountPermissions, config::system_account_name, act_name2, act_payload2)}); }); @@ -1756,7 +1810,7 @@ struct unstaketorex_subcommand { ("receiver", receiver_str) ("from_net", from_net_str) ("from_cpu", from_cpu_str); - auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -1776,7 +1830,7 @@ struct sellrex_subcommand { fc::variant act_payload = fc::mutable_variant_object() ("from", from_str) ("rex", rex_str); - auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -1792,7 +1846,7 @@ struct cancelrexorder_subcommand { add_standard_transaction_options(cancelrexorder, "owner@active"); cancelrexorder->set_callback([this] { fc::variant act_payload = fc::mutable_variant_object()("owner", owner_str); - auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -1818,7 +1872,7 @@ struct rentcpu_subcommand { ("receiver", receiver_str) ("loan_payment", loan_payment_str) ("loan_fund", loan_fund_str); - auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -1844,7 +1898,7 @@ struct rentnet_subcommand { ("receiver", receiver_str) ("loan_payment", loan_payment_str) ("loan_fund", loan_fund_str); - auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -1867,7 +1921,7 @@ struct fundcpuloan_subcommand { ("from", from_str) ("loan_num", loan_num_str) ("payment", payment_str); - auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -1890,7 +1944,7 @@ struct fundnetloan_subcommand { ("from", from_str) ("loan_num", loan_num_str) ("payment", payment_str); - auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -1913,7 +1967,7 @@ struct defcpuloan_subcommand { ("from", from_str) ("loan_num", loan_num_str) ("amount", amount_str); - auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -1936,7 +1990,7 @@ struct defnetloan_subcommand { ("from", from_str) ("loan_num", loan_num_str) ("amount", amount_str); - auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(from_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -1956,7 +2010,7 @@ struct mvtosavings_subcommand { fc::variant act_payload = fc::mutable_variant_object() ("owner", owner_str) ("rex", rex_str); - auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -1976,7 +2030,7 @@ struct mvfrsavings_subcommand { fc::variant act_payload = fc::mutable_variant_object() ("owner", owner_str) ("rex", rex_str); - auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -1992,7 +2046,7 @@ struct updaterex_subcommand { add_standard_transaction_options(updaterex, "owner@active"); updaterex->set_callback([this] { fc::variant act_payload = fc::mutable_variant_object()("owner", owner_str); - auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -2008,7 +2062,7 @@ struct consolidate_subcommand { add_standard_transaction_options(consolidate, "owner@active"); consolidate->set_callback([this] { fc::variant act_payload = fc::mutable_variant_object()("owner", owner_str); - auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -2028,7 +2082,7 @@ struct rexexec_subcommand { fc::variant act_payload = fc::mutable_variant_object() ("user", user_str) ("max", max_str); - auto accountPermissions = get_account_permissions(tx_permission, {user_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(user_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -2044,7 +2098,7 @@ struct closerex_subcommand { add_standard_transaction_options(closerex, "owner@active"); closerex->set_callback([this] { fc::variant act_payload = fc::mutable_variant_object()("owner", owner_str); - auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + auto accountPermissions = get_account_permissions(tx_permission, {name(owner_str), config::active_name}); send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); }); } @@ -2099,7 +2153,7 @@ void get_account( const string& accountName, const string& coresym, bool json_fo sep = ", "; } for ( auto& acc : p.required_auth.accounts ) { - std::cout << sep << acc.weight << ' ' << string(acc.permission.actor) << '@' << string(acc.permission.permission); + std::cout << sep << acc.weight << ' ' << acc.permission.actor.to_string() << '@' << acc.permission.permission.to_string(); sep = ", "; } std::cout << std::endl; @@ -2288,6 +2342,18 @@ void get_account( const string& accountName, const string& coresym, bool json_fo std::cout << std::endl; } + if( res.rex_info.is_object() ) { + auto& obj = res.rex_info.get_object(); + asset vote_stake = asset::from_string( obj["vote_stake"].as_string() ); + asset rex_balance = asset::from_string( obj["rex_balance"].as_string() ); + std::cout << rex_balance.get_symbol().name() << " balances: " << std::endl; + std::cout << indent << std::left << std::setw(11) + << "balance:" << std::right << std::setw(18) << rex_balance << std::endl; + std::cout << indent << std::left << std::setw(11) + << "staked:" << std::right << std::setw(18) << vote_stake << std::endl; + std::cout << std::endl; + } + if ( res.voter_info.is_object() ) { auto& obj = res.voter_info.get_object(); string proxy = obj["proxy"].as_string(); @@ -2355,8 +2421,12 @@ int main( int argc, char** argv ) { auto version = app.add_subcommand("version", localized("Retrieve version information"), false); version->require_subcommand(); - version->add_subcommand("client", localized("Retrieve version information of the client"))->set_callback([] { - std::cout << localized("Build version: ${ver}", ("ver", eosio::client::config::version_str)) << std::endl; + version->add_subcommand("client", localized("Retrieve basic version information of the client"))->set_callback([] { + std::cout << eosio::version::version_client() << '\n'; + }); + + version->add_subcommand("full", localized("Retrieve full version information of the client"))->set_callback([] { + std::cout << eosio::version::version_full() << '\n'; }); // Create subcommand @@ -2404,18 +2474,18 @@ int main( int argc, char** argv ) { pack_transaction->add_option("transaction", plain_signed_transaction_json, localized("The plain signed json (string)"))->required(); pack_transaction->add_flag("--pack-action-data", pack_action_data_flag, localized("Pack all action data within transaction, needs interaction with ${n}", ("n", node_executable_name))); pack_transaction->set_callback([&] { - fc::variant trx_var; - try { - trx_var = json_from_file_or_string( plain_signed_transaction_json ); - } EOS_RETHROW_EXCEPTIONS( transaction_type_exception, "Fail to parse plain transaction JSON '${data}'", ("data", plain_signed_transaction_json)) + fc::variant trx_var = json_from_file_or_string( plain_signed_transaction_json ); if( pack_action_data_flag ) { signed_transaction trx; - abi_serializer::from_variant( trx_var, trx, abi_serializer_resolver, abi_serializer_max_time ); - std::cout << fc::json::to_pretty_string( packed_transaction( trx, packed_transaction::none )) << std::endl; + try { + abi_serializer::from_variant( trx_var, trx, abi_serializer_resolver, abi_serializer_max_time ); + } EOS_RETHROW_EXCEPTIONS( transaction_type_exception, "Invalid transaction format: '${data}'", + ("data", fc::json::to_string(trx_var, fc::time_point::maximum()))) + std::cout << fc::json::to_pretty_string( packed_transaction( trx, packed_transaction::compression_type::none )) << std::endl; } else { try { signed_transaction trx = trx_var.as(); - std::cout << fc::json::to_pretty_string( fc::variant( packed_transaction( trx, packed_transaction::none ))) << std::endl; + std::cout << fc::json::to_pretty_string( fc::variant( packed_transaction( trx, packed_transaction::compression_type::none ))) << std::endl; } EOS_RETHROW_EXCEPTIONS( transaction_type_exception, "Fail to convert transaction, --pack-action-data likely needed" ) } }); @@ -2427,12 +2497,12 @@ int main( int argc, char** argv ) { unpack_transaction->add_option("transaction", packed_transaction_json, localized("The packed transaction json (string containing packed_trx and optionally compression fields)"))->required(); unpack_transaction->add_flag("--unpack-action-data", unpack_action_data_flag, localized("Unpack all action data within transaction, needs interaction with ${n}", ("n", node_executable_name))); unpack_transaction->set_callback([&] { - fc::variant packed_trx_var; + fc::variant packed_trx_var = json_from_file_or_string( packed_transaction_json ); packed_transaction packed_trx; try { - packed_trx_var = json_from_file_or_string( packed_transaction_json ); fc::from_variant( packed_trx_var, packed_trx ); - } EOS_RETHROW_EXCEPTIONS( transaction_type_exception, "Fail to parse packed transaction JSON '${data}'", ("data", packed_transaction_json)) + } EOS_RETHROW_EXCEPTIONS( transaction_type_exception, "Invalid packed transaction format: '${data}'", + ("data", fc::json::to_string(packed_trx_var, fc::time_point::maximum()))) signed_transaction strx = packed_trx.get_signed_transaction(); fc::variant trx_var; if( unpack_action_data_flag ) { @@ -2452,11 +2522,11 @@ int main( int argc, char** argv ) { pack_action_data->add_option("name", unpacked_action_data_name_string, localized("The name of the function that's called by this action"))->required(); pack_action_data->add_option("unpacked_action_data", unpacked_action_data_string, localized("The action data expressed as json"))->required(); pack_action_data->set_callback([&] { - fc::variant unpacked_action_data_json; + fc::variant unpacked_action_data_json = json_from_file_or_string(unpacked_action_data_string); + bytes packed_action_data_string; try { - unpacked_action_data_json = json_from_file_or_string(unpacked_action_data_string); + packed_action_data_string = variant_to_bin(name(unpacked_action_data_account_string), name(unpacked_action_data_name_string), unpacked_action_data_json); } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse unpacked action data JSON") - bytes packed_action_data_string = variant_to_bin(unpacked_action_data_account_string, unpacked_action_data_name_string, unpacked_action_data_json); std::cout << fc::to_hex(packed_action_data_string.data(), packed_action_data_string.size()) << std::endl; }); @@ -2472,7 +2542,7 @@ int main( int argc, char** argv ) { EOS_ASSERT( packed_action_data_string.size() >= 2, transaction_type_exception, "No packed_action_data found" ); vector packed_action_data_blob(packed_action_data_string.size()/2); fc::from_hex(packed_action_data_string, packed_action_data_blob.data(), packed_action_data_blob.size()); - fc::variant unpacked_action_data_json = bin_to_variant(packed_action_data_account_string, packed_action_data_name_string, packed_action_data_blob); + fc::variant unpacked_action_data_json = bin_to_variant(name(packed_action_data_account_string), name(packed_action_data_name_string), packed_action_data_blob); std::cout << fc::json::to_pretty_string(unpacked_action_data_json) << std::endl; }); @@ -2606,7 +2676,6 @@ int main( int argc, char** argv ) { getTable->add_option( "account", code, localized("The account who owns the table") )->required(); getTable->add_option( "scope", scope, localized("The scope within the contract in which the table is found") )->required(); getTable->add_option( "table", table, localized("The name of the table as specified by the contract abi") )->required(); - getTable->add_option( "-b,--binary", binary, localized("Return the value as BINARY rather than using abi to interpret as JSON") ); getTable->add_option( "-l,--limit", limit, localized("The maximum number of rows to return") ); getTable->add_option( "-k,--key", table_key, localized("Deprecated") ); getTable->add_option( "-L,--lower", lower, localized("JSON representation of lower bound value of key, defaults to first") ); @@ -2620,6 +2689,7 @@ int main( int argc, char** argv ) { getTable->add_option( "--encode-type", encode_type, localized("The encoding type of key_type (i64 , i128 , float64, float128) only support decimal encoding e.g. 'dec'" "i256 - supports both 'dec' and 'hex', ripemd160 and sha256 is 'hex' only")); + getTable->add_flag("-b,--binary", binary, localized("Return the value as BINARY rather than using abi to interpret as JSON")); getTable->add_flag("-r,--reverse", reverse, localized("Iterate in reverse order")); getTable->add_flag("--show-payer", show_payer, localized("show RAM payer")); @@ -2666,23 +2736,28 @@ int main( int argc, char** argv ) { // currency accessors // get currency balance string symbol; + bool currency_balance_print_json = false; auto get_currency = get->add_subcommand( "currency", localized("Retrieve information related to standard currencies"), true); get_currency->require_subcommand(); auto get_balance = get_currency->add_subcommand( "balance", localized("Retrieve the balance of an account for a given currency"), false); get_balance->add_option( "contract", code, localized("The contract that operates the currency") )->required(); get_balance->add_option( "account", accountName, localized("The account to query balances for") )->required(); get_balance->add_option( "symbol", symbol, localized("The symbol for the currency if the contract operates multiple currencies") ); + get_balance->add_flag("--json,-j", currency_balance_print_json, localized("Output in JSON format") ); get_balance->set_callback([&] { auto result = call(get_currency_balance_func, fc::mutable_variant_object ("account", accountName) ("code", code) ("symbol", symbol.empty() ? fc::variant() : symbol) ); - - const auto& rows = result.get_array(); - for( const auto& r : rows ) { - std::cout << r.as_string() - << std::endl; + if (!currency_balance_print_json) { + const auto& rows = result.get_array(); + for( const auto& r : rows ) { + std::cout << r.as_string() + << std::endl; + } + } else { + std::cout << fc::json::to_pretty_string(result) << std::endl; } }); @@ -2904,13 +2979,13 @@ int main( int argc, char** argv ) { bool suppress_duplicate_check = false; auto codeSubcommand = setSubcommand->add_subcommand("code", localized("Create or update the code on an account")); codeSubcommand->add_option("account", account, localized("The account to set code for"))->required(); - codeSubcommand->add_option("code-file", wasmPath, localized("The fullpath containing the contract WASM"));//->required(); + codeSubcommand->add_option("code-file", wasmPath, localized("The path containing the contract WASM"));//->required(); codeSubcommand->add_flag( "-c,--clear", contract_clear, localized("Remove code on an account")); codeSubcommand->add_flag( "--suppress-duplicate-check", suppress_duplicate_check, localized("Don't check for duplicate")); auto abiSubcommand = setSubcommand->add_subcommand("abi", localized("Create or update the abi on an account")); abiSubcommand->add_option("account", account, localized("The account to set the ABI for"))->required(); - abiSubcommand->add_option("abi-file", abiPath, localized("The fullpath containing the contract ABI"));//->required(); + abiSubcommand->add_option("abi-file", abiPath, localized("The path containing the contract ABI"));//->required(); abiSubcommand->add_flag( "-c,--clear", contract_clear, localized("Remove abi on an account")); abiSubcommand->add_flag( "--suppress-duplicate-check", suppress_duplicate_check, localized("Don't check for duplicate")); @@ -2947,10 +3022,11 @@ int main( int argc, char** argv ) { std::string wasm; fc::path cpath = fc::canonical(fc::path(contractPath)); - if( wasmPath.empty() ) + if( wasmPath.empty() ) { wasmPath = (cpath / (cpath.filename().generic_string()+".wasm")).generic_string(); - else + } else if ( boost::filesystem::path(wasmPath).is_relative() ) { wasmPath = (cpath / wasmPath).generic_string(); + } std::cerr << localized(("Reading WASM from " + wasmPath + "...").c_str()) << std::endl; fc::read_file_contents(wasmPath, wasm); @@ -2972,10 +3048,10 @@ int main( int argc, char** argv ) { } if (!duplicate) { - actions.emplace_back( create_setcode(account, code_bytes ) ); + actions.emplace_back( create_setcode(name(account), code_bytes ) ); if ( shouldSend ) { std::cerr << localized("Setting Code...") << std::endl; - send_actions(std::move(actions), packed_transaction::zlib); + send_actions(std::move(actions), packed_transaction::compression_type::zlib); } } else { std::cerr << localized("Skipping set code because the new code is the same as the existing code") << std::endl; @@ -3002,7 +3078,7 @@ int main( int argc, char** argv ) { if( abiPath.empty() ) { abiPath = (cpath / (cpath.filename().generic_string()+".abi")).generic_string(); - } else { + } else if ( boost::filesystem::path(abiPath).is_relative() ) { abiPath = (cpath / abiPath).generic_string(); } @@ -3019,11 +3095,11 @@ int main( int argc, char** argv ) { if (!duplicate) { try { - actions.emplace_back( create_setabi(account, abi_bytes) ); + actions.emplace_back( create_setabi(name(account), abi_bytes) ); } EOS_RETHROW_EXCEPTIONS(abi_type_exception, "Fail to parse ABI JSON") if ( shouldSend ) { std::cerr << localized("Setting ABI...") << std::endl; - send_actions(std::move(actions), packed_transaction::zlib); + send_actions(std::move(actions), packed_transaction::compression_type::zlib); } } else { std::cerr << localized("Skipping set abi because the new abi is the same as the existing abi") << std::endl; @@ -3040,7 +3116,7 @@ int main( int argc, char** argv ) { set_abi_callback(); if (actions.size()) { std::cerr << localized("Publishing contract...") << std::endl; - send_actions(std::move(actions), packed_transaction::zlib); + send_actions(std::move(actions), packed_transaction::compression_type::zlib); } else { std::cout << "no transaction is sent" << std::endl; } @@ -3083,12 +3159,12 @@ int main( int argc, char** argv ) { tx_force_unique = false; } - auto transfer_amount = to_asset(con, amount); - auto transfer = create_transfer(con, sender, recipient, transfer_amount, memo); + auto transfer_amount = to_asset(name(con), amount); + auto transfer = create_transfer(con, name(sender), name(recipient), transfer_amount, memo); if (!pay_ram) { send_actions( { transfer }); } else { - auto open_ = create_open(con, recipient, transfer_amount.get_symbol(), sender); + auto open_ = create_open(con, name(recipient), transfer_amount.get_symbol(), name(sender)); send_actions( { open_, transfer } ); } }); @@ -3208,7 +3284,7 @@ int main( int argc, char** argv ) { try { wallet_key = private_key_type( wallet_key_str ); } catch (...) { - EOS_THROW(private_key_type_exception, "Invalid private key: ${private_key}", ("private_key", wallet_key_str)) + EOS_THROW(private_key_type_exception, "Invalid private key") } public_key_type pubkey = wallet_key.get_public_key(); @@ -3288,17 +3364,27 @@ int main( int argc, char** argv ) { string trx_json_to_sign; string str_private_key; string str_chain_id; + string str_private_key_file; + string str_public_key; bool push_trx = false; auto sign = app.add_subcommand("sign", localized("Sign a transaction"), false); sign->add_option("transaction", trx_json_to_sign, localized("The JSON string or filename defining the transaction to sign"), true)->required(); sign->add_option("-k,--private-key", str_private_key, localized("The private key that will be used to sign the transaction")); + sign->add_option("--public-key", str_public_key, localized("Ask ${exec} to sign with the corresponding private key of the given public key", ("exec", key_store_executable_name))); sign->add_option("-c,--chain-id", str_chain_id, localized("The chain id that will be used to sign the transaction")); - sign->add_flag( "-p,--push-transaction", push_trx, localized("Push transaction after signing")); + sign->add_flag("-p,--push-transaction", push_trx, localized("Push transaction after signing")); sign->set_callback([&] { - signed_transaction trx = json_from_file_or_string(trx_json_to_sign).as(); + + EOSC_ASSERT( str_private_key.empty() || str_public_key.empty(), "ERROR: Either -k/--private-key or --public-key or none of them can be set" ); + fc::variant trx_var = json_from_file_or_string(trx_json_to_sign); + signed_transaction trx; + try { + trx = trx_var.as(); + } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Invalid transaction format: '${data}'", + ("data", fc::json::to_string(trx_var, fc::time_point::maximum()))) fc::optional chain_id; @@ -3310,18 +3396,29 @@ int main( int argc, char** argv ) { chain_id = chain_id_type(str_chain_id); } - if( str_private_key.size() == 0 ) { - std::cerr << localized("private key: "); - fc::set_console_echo(false); - std::getline( std::cin, str_private_key, '\n' ); - fc::set_console_echo(true); + if( str_public_key.size() > 0 ) { + public_key_type pub_key; + try { + pub_key = public_key_type(str_public_key); + } EOS_RETHROW_EXCEPTIONS(public_key_type_exception, "Invalid public key: ${public_key}", ("public_key", str_public_key)) + fc::variant keys_var(flat_set{ pub_key }); + sign_transaction(trx, keys_var, *chain_id); + } else { + if( str_private_key.size() == 0 ) { + std::cerr << localized("private key: "); + fc::set_console_echo(false); + std::getline( std::cin, str_private_key, '\n' ); + fc::set_console_echo(true); + } + private_key_type priv_key; + try { + priv_key = private_key_type(str_private_key); + } EOS_RETHROW_EXCEPTIONS(private_key_type_exception, "Invalid private key") + trx.sign(priv_key, *chain_id); } - auto priv_key = private_key_type(str_private_key); - trx.sign(priv_key, *chain_id); - if(push_trx) { - auto trx_result = call(push_txn_func, packed_transaction(trx, packed_transaction::none)); + auto trx_result = call(push_txn_func, packed_transaction(trx, packed_transaction::compression_type::none)); std::cout << fc::json::to_pretty_string(trx_result) << std::endl; } else { std::cout << fc::json::to_pretty_string(trx) << std::endl; @@ -3349,13 +3446,12 @@ int main( int argc, char** argv ) { actionsSubcommand->set_callback([&] { fc::variant action_args_var; if( !data.empty() ) { - try { - action_args_var = json_from_file_or_string(data, fc::json::relaxed_parser); - } EOS_RETHROW_EXCEPTIONS(action_type_exception, "Fail to parse action JSON data='${data}'", ("data", data)) + action_args_var = json_from_file_or_string(data, fc::json::relaxed_parser); } auto accountPermissions = get_account_permissions(tx_permission); - send_actions({chain::action{accountPermissions, contract_account, action, variant_to_bin( contract_account, action, action_args_var ) }}); + send_actions({chain::action{accountPermissions, name(contract_account), name(action), + variant_to_bin( name(contract_account), name(action), action_args_var ) }}); }); // push transaction @@ -3365,10 +3461,7 @@ int main( int argc, char** argv ) { add_standard_transaction_options(trxSubcommand); trxSubcommand->set_callback([&] { - fc::variant trx_var; - try { - trx_var = json_from_file_or_string(trx_to_push); - } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse transaction JSON '${data}'", ("data",trx_to_push)) + fc::variant trx_var = json_from_file_or_string(trx_to_push); try { signed_transaction trx = trx_var.as(); std::cout << fc::json::to_pretty_string( push_transaction( trx )) << std::endl; @@ -3380,15 +3473,12 @@ int main( int argc, char** argv ) { } }); - + // push transactions string trxsJson; auto trxsSubcommand = push->add_subcommand("transactions", localized("Push an array of arbitrary JSON transactions")); trxsSubcommand->add_option("transactions", trxsJson, localized("The JSON string or filename defining the array of the transactions to push"))->required(); trxsSubcommand->set_callback([&] { - fc::variant trx_var; - try { - trx_var = json_from_file_or_string(trxsJson); - } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse transaction JSON '${data}'", ("data",trxsJson)) + fc::variant trx_var = json_from_file_or_string(trxsJson); auto trxs_result = call(push_txns_func, trx_var); std::cout << fc::json::to_pretty_string(trxs_result) << std::endl; }); @@ -3429,20 +3519,15 @@ int main( int argc, char** argv ) { propose_action->add_option("proposal_expiration", parse_expiration_hours, localized("Proposal expiration interval in hours")); propose_action->set_callback([&] { - fc::variant requested_perm_var; - try { - requested_perm_var = json_from_file_or_string(requested_perm); - } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse permissions JSON '${data}'", ("data",requested_perm)) - fc::variant transaction_perm_var; - try { - transaction_perm_var = json_from_file_or_string(transaction_perm); - } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse permissions JSON '${data}'", ("data",transaction_perm)) - fc::variant trx_var; + fc::variant requested_perm_var = json_from_file_or_string(requested_perm); + fc::variant transaction_perm_var = json_from_file_or_string(transaction_perm); + fc::variant trx_var = json_from_file_or_string(proposed_transaction); + transaction proposed_trx; try { - trx_var = json_from_file_or_string(proposed_transaction); - } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse transaction JSON '${data}'", ("data",proposed_transaction)) - transaction proposed_trx = trx_var.as(); - bytes proposed_trx_serialized = variant_to_bin( proposed_contract, proposed_action, trx_var ); + proposed_trx = trx_var.as(); + } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Invalid transaction format: '${data}'", + ("data", fc::json::to_string(trx_var, fc::time_point::maximum()))) + bytes proposed_trx_serialized = variant_to_bin( name(proposed_contract), name(proposed_action), trx_var ); vector reqperm; try { @@ -3457,7 +3542,7 @@ int main( int argc, char** argv ) { auto accountPermissions = get_account_permissions(tx_permission); if (accountPermissions.empty()) { if (!proposer.empty()) { - accountPermissions = vector{{proposer, config::active_name}}; + accountPermissions = vector{{name(proposer), config::active_name}}; } else { EOS_THROW(missing_auth_exception, "Authority is not provided (either by multisig parameter or -p)"); } @@ -3484,7 +3569,7 @@ int main( int argc, char** argv ) { ("requested", requested_perm_var) ("trx", trx_var); - send_actions({chain::action{accountPermissions, "eosio.msig", "propose", variant_to_bin( N(eosio.msig), N(propose), args ) }}); + send_actions({chain::action{accountPermissions, N(eosio.msig), N(propose), variant_to_bin( N(eosio.msig), N(propose), args ) }}); }); //multisig propose transaction @@ -3496,20 +3581,13 @@ int main( int argc, char** argv ) { propose_trx->add_option("proposer", proposer, localized("Account proposing the transaction")); propose_trx->set_callback([&] { - fc::variant requested_perm_var; - try { - requested_perm_var = json_from_file_or_string(requested_perm); - } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse permissions JSON '${data}'", ("data",requested_perm)) - - fc::variant trx_var; - try { - trx_var = json_from_file_or_string(trx_to_push); - } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse transaction JSON '${data}'", ("data",trx_to_push)) + fc::variant requested_perm_var = json_from_file_or_string(requested_perm); + fc::variant trx_var = json_from_file_or_string(trx_to_push); auto accountPermissions = get_account_permissions(tx_permission); if (accountPermissions.empty()) { if (!proposer.empty()) { - accountPermissions = vector{{proposer, config::active_name}}; + accountPermissions = vector{{name(proposer), config::active_name}}; } else { EOS_THROW(missing_auth_exception, "Authority is not provided (either by multisig parameter or -p)"); } @@ -3524,7 +3602,7 @@ int main( int argc, char** argv ) { ("requested", requested_perm_var) ("trx", trx_var); - send_actions({chain::action{accountPermissions, "eosio.msig", "propose", variant_to_bin( N(eosio.msig), N(propose), args ) }}); + send_actions({chain::action{accountPermissions, N(eosio.msig), N(propose), variant_to_bin( N(eosio.msig), N(propose), args ) }}); }); @@ -3541,8 +3619,8 @@ int main( int argc, char** argv ) { ("scope", proposer) ("table", "proposal") ("table_key", "") - ("lower_bound", name(proposal_name).value) - ("upper_bound", name(proposal_name).value + 1) + ("lower_bound", name(proposal_name).to_uint64_t()) + ("upper_bound", name(proposal_name).to_uint64_t() + 1) // Less than ideal upper_bound usage preserved so cleos can still work with old buggy nodeos versions // Change to name(proposal_name).value when cleos no longer needs to support nodeos versions older than 1.5.0 ("limit", 1) @@ -3577,8 +3655,8 @@ int main( int argc, char** argv ) { ("scope", proposer) ("table", "approvals2") ("table_key", "") - ("lower_bound", name(proposal_name).value) - ("upper_bound", name(proposal_name).value + 1) + ("lower_bound", name(proposal_name).to_uint64_t()) + ("upper_bound", name(proposal_name).to_uint64_t() + 1) // Less than ideal upper_bound usage preserved so cleos can still work with old buggy nodeos versions // Change to name(proposal_name).value when cleos no longer needs to support nodeos versions older than 1.5.0 ("limit", 1) @@ -3609,8 +3687,8 @@ int main( int argc, char** argv ) { ("scope", proposer) ("table", "approvals") ("table_key", "") - ("lower_bound", name(proposal_name).value) - ("upper_bound", name(proposal_name).value + 1) + ("lower_bound", name(proposal_name).to_uint64_t()) + ("upper_bound", name(proposal_name).to_uint64_t() + 1) // Less than ideal upper_bound usage preserved so cleos can still work with old buggy nodeos versions // Change to name(proposal_name).value when cleos no longer needs to support nodeos versions older than 1.5.0 ("limit", 1) @@ -3642,8 +3720,8 @@ int main( int argc, char** argv ) { ("scope", "eosio.msig") ("table", "invals") ("table_key", "") - ("lower_bound", a.first.value) - ("upper_bound", a.first.value + 1) + ("lower_bound", a.first.to_uint64_t()) + ("upper_bound", a.first.to_uint64_t() + 1) // Less than ideal upper_bound usage preserved so cleos can still work with old buggy nodeos versions // Change to name(proposal_name).value when cleos no longer needs to support nodeos versions older than 1.5.0 ("limit", 1) @@ -3729,10 +3807,7 @@ int main( int argc, char** argv ) { string perm; string proposal_hash; auto approve_or_unapprove = [&](const string& action) { - fc::variant perm_var; - try { - perm_var = json_from_file_or_string(perm); - } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse permissions JSON '${data}'", ("data",perm)) + fc::variant perm_var = json_from_file_or_string(perm); auto args = fc::mutable_variant_object() ("proposer", proposer) @@ -3743,8 +3818,8 @@ int main( int argc, char** argv ) { args("proposal_hash", proposal_hash); } - auto accountPermissions = get_account_permissions(tx_permission, {proposer,config::active_name}); - send_actions({chain::action{accountPermissions, "eosio.msig", action, variant_to_bin( N(eosio.msig), action, args ) }}); + auto accountPermissions = get_account_permissions(tx_permission, {name(proposer), config::active_name}); + send_actions({chain::action{accountPermissions, N(eosio.msig), name(action), variant_to_bin( N(eosio.msig), name(action), args ) }}); }; // multisig approve @@ -3773,8 +3848,8 @@ int main( int argc, char** argv ) { auto args = fc::mutable_variant_object() ("account", invalidator); - auto accountPermissions = get_account_permissions(tx_permission, {invalidator,config::active_name}); - send_actions({chain::action{accountPermissions, "eosio.msig", "invalidate", variant_to_bin( N(eosio.msig), "invalidate", args ) }}); + auto accountPermissions = get_account_permissions(tx_permission, {name(invalidator), config::active_name}); + send_actions({chain::action{accountPermissions, N(eosio.msig), N(invalidate), variant_to_bin( N(eosio.msig), N(invalidate), args ) }}); }); // multisig cancel @@ -3788,7 +3863,7 @@ int main( int argc, char** argv ) { auto accountPermissions = get_account_permissions(tx_permission); if (accountPermissions.empty()) { if (!canceler.empty()) { - accountPermissions = vector{{canceler, config::active_name}}; + accountPermissions = vector{{name(canceler), config::active_name}}; } else { EOS_THROW(missing_auth_exception, "Authority is not provided (either by multisig parameter or -p)"); } @@ -3801,7 +3876,7 @@ int main( int argc, char** argv ) { ("proposal_name", proposal_name) ("canceler", canceler); - send_actions({chain::action{accountPermissions, "eosio.msig", "cancel", variant_to_bin( N(eosio.msig), N(cancel), args ) }}); + send_actions({chain::action{accountPermissions, N(eosio.msig), N(cancel), variant_to_bin( N(eosio.msig), N(cancel), args ) }}); } ); @@ -3816,7 +3891,7 @@ int main( int argc, char** argv ) { auto accountPermissions = get_account_permissions(tx_permission); if (accountPermissions.empty()) { if (!executer.empty()) { - accountPermissions = vector{{executer, config::active_name}}; + accountPermissions = vector{{name(executer), config::active_name}}; } else { EOS_THROW(missing_auth_exception, "Authority is not provided (either by multisig parameter or -p)"); } @@ -3830,7 +3905,7 @@ int main( int argc, char** argv ) { ("proposal_name", proposal_name) ("executer", executer); - send_actions({chain::action{accountPermissions, "eosio.msig", "exec", variant_to_bin( N(eosio.msig), N(exec), args ) }}); + send_actions({chain::action{accountPermissions, N(eosio.msig), N(exec), variant_to_bin( N(eosio.msig), N(exec), args ) }}); } ); @@ -3849,21 +3924,18 @@ int main( int argc, char** argv ) { wrap_exec->add_option("--contract,-c", wrap_con, localized("The account which controls the wrap contract")); wrap_exec->set_callback([&] { - fc::variant trx_var; - try { - trx_var = json_from_file_or_string(trx_to_exec); - } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse transaction JSON '${data}'", ("data",trx_to_exec)) + fc::variant trx_var = json_from_file_or_string(trx_to_exec); auto accountPermissions = get_account_permissions(tx_permission); if( accountPermissions.empty() ) { - accountPermissions = vector{{executer, config::active_name}, {wrap_con, config::active_name}}; + accountPermissions = vector{{name(executer), config::active_name}, {name(wrap_con), config::active_name}}; } auto args = fc::mutable_variant_object() ("executer", executer ) ("trx", trx_var); - send_actions({chain::action{accountPermissions, wrap_con, "exec", variant_to_bin( wrap_con, N(exec), args ) }}); + send_actions({chain::action{accountPermissions, name(wrap_con), N(exec), variant_to_bin( name(wrap_con), N(exec), args ) }}); }); // system subcommand diff --git a/programs/eosio-blocklog/CMakeLists.txt b/programs/eosio-blocklog/CMakeLists.txt index b883e493f85..2e050924977 100644 --- a/programs/eosio-blocklog/CMakeLists.txt +++ b/programs/eosio-blocklog/CMakeLists.txt @@ -16,9 +16,12 @@ target_link_libraries( eosio-blocklog PRIVATE appbase PRIVATE eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) +copy_bin( eosio-blocklog ) install( TARGETS eosio-blocklog + COMPONENT base + RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR} diff --git a/programs/eosio-blocklog/main.cpp b/programs/eosio-blocklog/main.cpp index 8e4d78723d6..0220a2f23cb 100644 --- a/programs/eosio-blocklog/main.cpp +++ b/programs/eosio-blocklog/main.cpp @@ -1,7 +1,4 @@ -/** - * @file - * @copyright defined in eosio/LICENSE.txt - */ +#include #include #include #include @@ -10,12 +7,23 @@ #include #include #include +#include #include #include #include #include +#include + +#ifndef _WIN32 +#define FOPEN(p, m) fopen(p, m) +#else +#define CAT(s1, s2) s1 ## s2 +#define PREL(s) CAT(L, s) +#define FOPEN(p, m) _wfopen(p, PREL(m)) +#endif + using namespace eosio::chain; namespace bfs = boost::filesystem; namespace bpo = boost::program_options; @@ -32,19 +40,44 @@ struct blocklog { bfs::path blocks_dir; bfs::path output_file; - uint32_t first_block; - uint32_t last_block; - bool no_pretty_print; - bool as_json_array; + uint32_t first_block = 0; + uint32_t last_block = std::numeric_limits::max(); + bool no_pretty_print = false; + bool as_json_array = false; + bool make_index = false; + bool trim_log = false; + bool smoke_test = false; + bool help = false; +}; + +struct report_time { + report_time(std::string desc) + : _start(std::chrono::high_resolution_clock::now()) + , _desc(desc) { + } + + void report() { + const auto duration = std::chrono::duration_cast(std::chrono::high_resolution_clock::now() - _start).count() / 1000; + ilog("eosio-blocklog - ${desc} took ${t} msec", ("desc", _desc)("t", duration)); + } + + const std::chrono::high_resolution_clock::time_point _start; + const std::string _desc; }; void blocklog::read_log() { + report_time rt("reading log"); block_log block_logger(blocks_dir); const auto end = block_logger.read_head(); EOS_ASSERT( end, block_log_exception, "No blocks found in block log" ); EOS_ASSERT( end->block_num() > 1, block_log_exception, "Only one block found in block log" ); - ilog( "existing block log contains block num 1 through block num ${n}", ("n",end->block_num()) ); + //fix message below, first block might not be 1, first_block_num is not set yet + ilog( "existing block log contains block num ${first} through block num ${n}", + ("first",block_logger.first_block_num())("n",end->block_num()) ); + if (first_block < block_logger.first_block_num()) { + first_block = block_logger.first_block_num(); + } optional reversible_blocks; try { @@ -62,8 +95,6 @@ void blocklog::read_log() { } catch( const std::runtime_error& e ) { if( std::string(e.what()).find("database dirty flag set") != std::string::npos ) { elog( "database dirty flag set (likely due to unclean shutdown): only block_log blocks are available" ); - } else if( std::string(e.what()) == "database metadata dirty flag set" ) { - elog( "database metadata dirty flag set (likely due to unclean shutdown): only block_log blocks are available" ); } else { throw; } @@ -102,10 +133,10 @@ void blocklog::read_log() { ("ref_block_prefix", ref_block_prefix) (pretty_output.get_object()); fc::variant v(std::move(enhanced_object)); - if (no_pretty_print) - fc::json::to_stream(*out, v, fc::time_point::maximum(), fc::json::stringify_large_ints_and_doubles); - else - *out << fc::json::to_pretty_string(v) << "\n"; + if (no_pretty_print) + fc::json::to_stream(*out, v, fc::time_point::maximum(), fc::json::stringify_large_ints_and_doubles); + else + *out << fc::json::to_pretty_string(v) << "\n"; }; bool contains_obj = false; while((block_num <= last_block) && (next = block_logger.read_block_by_num( block_num ))) { @@ -115,6 +146,7 @@ void blocklog::read_log() { ++block_num; contains_obj = true; } + if (reversible_blocks) { const reversible_block_object* obj = nullptr; while( (block_num <= last_block) && (obj = reversible_blocks->find(block_num)) ) { @@ -126,8 +158,10 @@ void blocklog::read_log() { contains_obj = true; } } + if (as_json_array) *out << "]"; + rt.report(); } void blocklog::set_program_options(options_description& cli) @@ -136,18 +170,23 @@ void blocklog::set_program_options(options_description& cli) ("blocks-dir", bpo::value()->default_value("blocks"), "the location of the blocks directory (absolute path or relative to the current directory)") ("output-file,o", bpo::value(), - "the file to write the block log output to (absolute or relative path). If not specified then output is to stdout.") - ("first", bpo::value(&first_block)->default_value(1), - "the first block number to log") - ("last", bpo::value(&last_block)->default_value(std::numeric_limits::max()), - "the last block number (inclusive) to log") + "the file to write the output to (absolute or relative path). If not specified then output is to stdout.") + ("first,f", bpo::value(&first_block)->default_value(0), + "the first block number to log or the first to keep if trim-blocklog") + ("last,l", bpo::value(&last_block)->default_value(std::numeric_limits::max()), + "the last block number to log or the last to keep if trim-blocklog") ("no-pretty-print", bpo::bool_switch(&no_pretty_print)->default_value(false), "Do not pretty print the output. Useful if piping to jq to improve performance.") ("as-json-array", bpo::bool_switch(&as_json_array)->default_value(false), "Print out json blocks wrapped in json array (otherwise the output is free-standing json objects).") - ("help", "Print this help message and exit.") + ("make-index", bpo::bool_switch(&make_index)->default_value(false), + "Create blocks.index from blocks.log. Must give 'blocks-dir'. Give 'output-file' relative to current directory or absolute path (default is /blocks.index).") + ("trim-blocklog", bpo::bool_switch(&trim_log)->default_value(false), + "Trim blocks.log and blocks.index. Must give 'blocks-dir' and 'first and/or 'last'.") + ("smoke-test", bpo::bool_switch(&smoke_test)->default_value(false), + "Quick test that blocks.log and blocks.index are well formed and agree with each other.") + ("help,h", bpo::bool_switch(&help)->default_value(false), "Print this help message and exit.") ; - } void blocklog::initialize(const variables_map& options) { @@ -169,9 +208,68 @@ void blocklog::initialize(const variables_map& options) { } +int trim_blocklog_end(bfs::path block_dir, uint32_t n) { //n is last block to keep (remove later blocks) + report_time rt("trimming blocklog end"); + using namespace std; + trim_data td(block_dir); + cout << "\nIn directory " << block_dir << " will trim all blocks after block " << n << " from " + << td.block_file_name.generic_string() << " and " << td.index_file_name.generic_string() << ".\n"; + if (n < td.first_block) { + cerr << "All blocks are after block " << n << " so do nothing (trim_end would delete entire blocks.log)\n"; + return 1; + } + if (n >= td.last_block) { + cerr << "There are no blocks after block " << n << " so do nothing\n"; + return 2; + } + const uint64_t end_of_new_file = td.block_pos(n + 1); + bfs::resize_file(td.block_file_name, end_of_new_file); + const uint64_t index_end= td.block_index(n) + sizeof(uint64_t); //advance past record for block n + bfs::resize_file(td.index_file_name, index_end); + cout << "blocks.index has been trimmed to " << index_end << " bytes\n"; + rt.report(); + return 0; +} -int main(int argc, char** argv) -{ +bool trim_blocklog_front(bfs::path block_dir, uint32_t n) { //n is first block to keep (remove prior blocks) + report_time rt("trimming blocklog start"); + const bool status = block_log::trim_blocklog_front(block_dir, block_dir / "old", n); + rt.report(); + return status; +} + + +void smoke_test(bfs::path block_dir) { + using namespace std; + cout << "\nSmoke test of blocks.log and blocks.index in directory " << block_dir << '\n'; + trim_data td(block_dir); + auto status = fseek(td.blk_in, -sizeof(uint64_t), SEEK_END); //get last_block from blocks.log, compare to from blocks.index + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from beginning of file", ("file", td.block_file_name.string())("pos", sizeof(uint64_t)) ); + uint64_t file_pos; + auto size = fread((void*)&file_pos, sizeof(uint64_t), 1, td.blk_in); + EOS_ASSERT( size == 1, block_log_exception, "${file} read fails", ("file", td.block_file_name.string()) ); + status = fseek(td.blk_in, file_pos + trim_data::blknum_offset, SEEK_SET); + EOS_ASSERT( status == 0, block_log_exception, "cannot seek to ${file} ${pos} from beginning of file", ("file", td.block_file_name.string())("pos", file_pos + trim_data::blknum_offset) ); + uint32_t bnum; + size = fread((void*)&bnum, sizeof(uint32_t), 1, td.blk_in); + EOS_ASSERT( size == 1, block_log_exception, "${file} read fails", ("file", td.block_file_name.string()) ); + bnum = endian_reverse_u32(bnum) + 1; //convert from big endian to little endian and add 1 + EOS_ASSERT( td.last_block == bnum, block_log_exception, "blocks.log says last block is ${lb} which disagrees with blocks.index", ("lb", bnum) ); + cout << "blocks.log and blocks.index agree on number of blocks\n"; + uint32_t delta = (td.last_block + 8 - td.first_block) >> 3; + if (delta < 1) + delta = 1; + for (uint32_t n = td.first_block; ; n += delta) { + if (n > td.last_block) + n = td.last_block; + td.block_pos(n); //check block 'n' is where blocks.index says + if (n == td.last_block) + break; + } + cout << "\nno problems found\n"; //if get here there were no exceptions +} + +int main(int argc, char** argv) { std::ios::sync_with_stdio(false); // for potential performance boost for large block log files options_description cli ("eosio-blocklog command line options"); try { @@ -180,10 +278,46 @@ int main(int argc, char** argv) variables_map vmap; bpo::store(bpo::parse_command_line(argc, argv, cli), vmap); bpo::notify(vmap); - if (vmap.count("help") > 0) { - cli.print(std::cerr); - return 0; + if (blog.help) { + cli.print(std::cerr); + return 0; + } + if (blog.smoke_test) { + smoke_test(vmap.at("blocks-dir").as()); + return 0; + } + if (blog.trim_log) { + if (blog.first_block == 0 && blog.last_block == std::numeric_limits::max()) { + std::cerr << "trim-blocklog does nothing unless specify first and/or last block."; + return -1; + } + if (blog.last_block != std::numeric_limits::max()) { + if (trim_blocklog_end(vmap.at("blocks-dir").as(), blog.last_block) != 0) + return -1; + } + if (blog.first_block != 0) { + if (!trim_blocklog_front(vmap.at("blocks-dir").as(), blog.first_block)) + return -1; + } + return 0; + } + if (blog.make_index) { + const bfs::path blocks_dir = vmap.at("blocks-dir").as(); + bfs::path out_file = blocks_dir / "blocks.index"; + const bfs::path block_file = blocks_dir / "blocks.log"; + + if (vmap.count("output-file") > 0) + out_file = vmap.at("output-file").as(); + + report_time rt("making index"); + const auto log_level = fc::logger::get(DEFAULT_LOGGER).get_log_level(); + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + block_log::construct_index(block_file.generic_string(), out_file.generic_string()); + fc::logger::get(DEFAULT_LOGGER).set_log_level(log_level); + rt.report(); + return 0; } + //else print blocks.log as JSON blog.initialize(vmap); blog.read_log(); } catch( const fc::exception& e ) { diff --git a/programs/eosio-launcher/CMakeLists.txt b/programs/eosio-launcher/CMakeLists.txt index c0c19262e09..9e09a02d804 100644 --- a/programs/eosio-launcher/CMakeLists.txt +++ b/programs/eosio-launcher/CMakeLists.txt @@ -29,7 +29,7 @@ configure_file(config.hpp.in config.hpp ESCAPE_QUOTES) target_include_directories(eosio-launcher PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) target_link_libraries(eosio-launcher - PRIVATE eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) + PRIVATE eosio_chain fc Boost::program_options ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) install( TARGETS eosio-launcher diff --git a/programs/eosio-launcher/config.hpp.in b/programs/eosio-launcher/config.hpp.in index f733308dc1b..e3681d96b1a 100644 --- a/programs/eosio-launcher/config.hpp.in +++ b/programs/eosio-launcher/config.hpp.in @@ -1,7 +1,4 @@ /** - * @file - * @copyright defined in eos/LICENSE - * * \warning This file is machine generated. DO NOT EDIT. See config.hpp.in for changes. */ #pragma once diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 93cb9b1af64..74e3ac3340d 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -1,8 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - * @brief launch testnet nodes - **/ #include #include #include @@ -505,7 +500,7 @@ launcher_def::set_options (bpo::options_description &cfg) { ("servers",bpo::value(),"a file containing ip addresses and names of individual servers to deploy as producers or non-producers ") ("per-host",bpo::value(&per_host)->default_value(0),("specifies how many " + string(node_executable_name) + " instances will run on a single host. Use 0 to indicate all on one.").c_str()) ("network-name",bpo::value(&network.name)->default_value("testnet_"),"network name prefix used in GELF logging source") - ("enable-gelf-logging",bpo::value(&gelf_enabled)->default_value(true),"enable gelf logging appender in logging configuration file") + ("enable-gelf-logging",bpo::value(&gelf_enabled)->default_value(false),"enable gelf logging appender in logging configuration file") ("gelf-endpoint",bpo::value(&gelf_endpoint)->default_value("10.160.11.21:12201"),"hostname:port or ip:port of GELF endpoint") ("template",bpo::value(&start_temp)->default_value("testnet.template"),"the startup script template") ("script",bpo::value(&start_script)->default_value("bios_boot.sh"),"the generated startup script name") diff --git a/programs/keosd/CMakeLists.txt b/programs/keosd/CMakeLists.txt index 3c806fbed39..f3903cd648c 100644 --- a/programs/keosd/CMakeLists.txt +++ b/programs/keosd/CMakeLists.txt @@ -12,7 +12,7 @@ endif() configure_file(config.hpp.in config.hpp ESCAPE_QUOTES) target_link_libraries( ${KEY_STORE_EXECUTABLE_NAME} - PRIVATE appbase + PRIVATE appbase version PRIVATE wallet_api_plugin wallet_plugin PRIVATE http_plugin PRIVATE eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) diff --git a/programs/keosd/config.hpp.in b/programs/keosd/config.hpp.in index 370f88db1a9..41c520e235c 100644 --- a/programs/keosd/config.hpp.in +++ b/programs/keosd/config.hpp.in @@ -1,7 +1,4 @@ /** - * @file - * @copyright defined in eos/LICENSE - * * \warning This file is machine generated. DO NOT EDIT. See config.hpp.in for changes. */ #pragma once diff --git a/programs/keosd/main.cpp b/programs/keosd/main.cpp index 626ef6bd0f8..ce1138014a8 100644 --- a/programs/keosd/main.cpp +++ b/programs/keosd/main.cpp @@ -1,12 +1,9 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include #include +#include #include #include @@ -37,6 +34,8 @@ bfs::path determine_home_directory() int main(int argc, char** argv) { try { + app().set_version_string(eosio::version::version_client()); + app().set_full_version_string(eosio::version::version_full()); bfs::path home = determine_home_directory(); app().set_default_data_dir(home / "eosio-wallet"); app().set_default_config_dir(home / "eosio-wallet"); @@ -48,7 +47,7 @@ int main(int argc, char** argv) if(!app().initialize(argc, argv)) return -1; auto& http = app().get_plugin(); - http.add_handler("/v1/" + keosd::config::key_store_executable_name + "/stop", [](string, string, url_response_callback cb) { cb(200, fc::variant(fc::variant_object())); std::raise(SIGTERM); } ); + http.add_handler("/v1/" + keosd::config::key_store_executable_name + "/stop", [&a=app()](string, string, url_response_callback cb) { cb(200, fc::variant(fc::variant_object())); a.quit(); } ); app().startup(); app().exec(); } catch (const fc::exception& e) { diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index d5fe8273eb5..0b1b5fed649 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -48,7 +48,7 @@ else() endif() target_link_libraries( ${NODE_EXECUTABLE_NAME} - PRIVATE appbase + PRIVATE appbase version PRIVATE -Wl,${whole_archive_flag} login_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} history_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} state_history_plugin -Wl,${no_whole_archive_flag} diff --git a/programs/nodeos/config.hpp.in b/programs/nodeos/config.hpp.in index 821477e3270..9e79e718fa5 100644 --- a/programs/nodeos/config.hpp.in +++ b/programs/nodeos/config.hpp.in @@ -1,7 +1,4 @@ /** - * @file - * @copyright defined in eos/LICENSE - * * \warning This file is machine generated. DO NOT EDIT. See config.hpp.in for changes. */ #pragma once diff --git a/programs/nodeos/logging.json b/programs/nodeos/logging.json index 07771457d72..54947425291 100644 --- a/programs/nodeos/logging.json +++ b/programs/nodeos/logging.json @@ -64,6 +64,15 @@ "stderr", "net" ] + },{ + "name": "http_plugin", + "level": "debug", + "enabled": true, + "additivity": false, + "appenders": [ + "stderr", + "net" + ] },{ "name": "producer_plugin", "level": "debug", @@ -73,6 +82,15 @@ "stderr", "net" ] + },{ + "name": "transaction_tracing", + "level": "info", + "enabled": true, + "additivity": false, + "appenders": [ + "stderr", + "net" + ] } ] } diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 17a83c0150c..64ab9c3be44 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -1,13 +1,10 @@ -/** - * @file - * @copyright defined in eosio/LICENSE.txt - */ #include #include #include #include #include +#include #include #include @@ -27,7 +24,11 @@ void configure_logging(const bfs::path& config_path) { try { try { - fc::configure_logging(config_path); + if( fc::exists( config_path ) ) { + fc::configure_logging( config_path ); + } else { + fc::configure_logging( fc::logging_config::default_config() ); + } } catch (...) { elog("Error reloading logging.json"); throw; @@ -48,9 +49,12 @@ void configure_logging(const bfs::path& config_path) void logging_conf_handler() { auto config_path = app().get_logging_conf(); - ilog("Received HUP. Reloading logging configuration from ${p}.", ("p", config_path.string())); - if(fc::exists(config_path)) - ::detail::configure_logging(config_path); + if( fc::exists( config_path ) ) { + ilog( "Received HUP. Reloading logging configuration from ${p}.", ("p", config_path.string()) ); + } else { + ilog( "Received HUP. No log config found at ${p}, setting to default.", ("p", config_path.string()) ); + } + ::detail::configure_logging( config_path ); fc::log_config::initialize_appenders( app().get_io_service() ); } @@ -70,8 +74,8 @@ enum return_codes { SUCCESS = 0, BAD_ALLOC = 1, DATABASE_DIRTY = 2, - FIXED_REVERSIBLE = 3, - EXTRACTED_GENESIS = 4, + FIXED_REVERSIBLE = SUCCESS, + EXTRACTED_GENESIS = SUCCESS, NODE_MANAGEMENT_SUCCESS = 5 }; @@ -79,6 +83,8 @@ int main(int argc, char** argv) { try { app().set_version(eosio::nodeos::config::version); + app().set_version_string(eosio::version::version_client()); + app().set_full_version_string(eosio::version::version_full()); auto root = fc::app_path(); app().set_default_data_dir(root / "eosio" / nodeos::config::node_executable_name / "data" ); @@ -87,13 +93,21 @@ int main(int argc, char** argv) .default_unix_socket_path = "", .default_http_port = 8888 }); - if(!app().initialize(argc, argv)) + if(!app().initialize(argc, argv)) { + const auto& opts = app().get_options(); + if( opts.count("help") || opts.count("version") || opts.count("full-version") || opts.count("print-default-config") ) { + return SUCCESS; + } return INITIALIZE_FAIL; + } initialize_logging(); - ilog("${name} version ${ver}", ("name", nodeos::config::node_executable_name)("ver", app().version_string())); + ilog( "${name} version ${ver} ${fv}", + ("name", nodeos::config::node_executable_name)("ver", app().version_string()) + ("fv", app().version_string() == app().full_version_string() ? "" : app().full_version_string()) ); ilog("${name} using configuration file ${c}", ("name", nodeos::config::node_executable_name)("c", app().full_config_file_path().string())); ilog("${name} data directory is ${d}", ("name", nodeos::config::node_executable_name)("d", app().data_dir().string())); app().startup(); + app().set_thread_priority_max(); app().exec(); } catch( const extract_genesis_state_exception& e ) { return EXTRACTED_GENESIS; @@ -106,9 +120,6 @@ int main(int argc, char** argv) if( e.top_message().find( "database dirty flag set" ) != std::string::npos ) { elog( "database dirty flag set (likely due to unclean shutdown): replay required" ); return DATABASE_DIRTY; - } else if( e.top_message().find( "database metadata dirty flag set" ) != std::string::npos ) { - elog( "database metadata dirty flag set (likely due to unclean shutdown): replay required" ); - return DATABASE_DIRTY; } } elog( "${e}", ("e", e.to_detail_string())); @@ -123,9 +134,6 @@ int main(int argc, char** argv) if( std::string(e.what()).find("database dirty flag set") != std::string::npos ) { elog( "database dirty flag set (likely due to unclean shutdown): replay required" ); return DATABASE_DIRTY; - } else if( std::string(e.what()) == "database metadata dirty flag set" ) { - elog( "database metadata dirty flag set (likely due to unclean shutdown): replay required" ); - return DATABASE_DIRTY; } else { elog( "${e}", ("e",e.what())); } diff --git a/scripts/.build_vars b/scripts/.build_vars index 6663f9d86fc..75dc9a38c10 100644 --- a/scripts/.build_vars +++ b/scripts/.build_vars @@ -30,14 +30,14 @@ export INSTALL_MONGO=${INSTALL_MONGO:-false} # BOOST export BOOST_VERSION_MAJOR=1 -export BOOST_VERSION_MINOR=70 +export BOOST_VERSION_MINOR=71 export BOOST_VERSION_PATCH=0 export BOOST_VERSION=${BOOST_VERSION_MAJOR}_${BOOST_VERSION_MINOR}_${BOOST_VERSION_PATCH} export BOOST_ROOT=${BOOST_LOCATION:-${SRC_DIR}/boost_${BOOST_VERSION}} export BOOST_LINK_LOCATION=${OPT_DIR}/boost # LLVM -export LLVM_VERSION=release_40 +export LLVM_VERSION=release_80 export LLVM_ROOT=${OPT_DIR}/llvm export LLVM_DIR=${LLVM_ROOT}/lib/cmake/llvm diff --git a/scripts/.environment b/scripts/.environment index 11246ff1147..b5968b1db8b 100644 --- a/scripts/.environment +++ b/scripts/.environment @@ -19,5 +19,4 @@ export EOSIO_INSTALL_DIR="${HOME}/eosio/${EOSIO_VERSION}" export TEMP_DIR="${TEMP_DIR:-${HOME}/tmp}" [[ -f ${BUILD_DIR}/CMakeCache.txt ]] && export CACHED_INSTALL_PATH=$(grep "CMAKE_INSTALL_PREFIX:PATH" ${BUILD_DIR}/CMakeCache.txt | cut -d= -f2) - -. ./scripts/.build_vars +. ./scripts/.build_vars \ No newline at end of file diff --git a/scripts/complete.sh b/scripts/complete.sh new file mode 100755 index 00000000000..608433af43e --- /dev/null +++ b/scripts/complete.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +_complete() +{ + COMPREPLY=() + + local cur=${COMP_WORDS[COMP_CWORD]} + local cmd=${COMP_WORDS[0]} + + choices=$( ${cmd} -- "${COMP_WORDS[@]:1}" 2>/dev/null ) + ret=$? + + if [[ "$ret" -ne 0 ]]; then + return 0 + fi + + local DISPLAY_HELP=1 + if [ "${__COMPLETE_PREV_LINE:-}" != "$COMP_LINE" ] || + [ "${__COMPLETE_PREV_POINT:-}" != "$COMP_POINT" ]; then + __COMPLETE_PREV_LINE=$COMP_LINE + __COMPLETE_PREV_POINT=$COMP_POINT + DISPLAY_HELP= + fi + + EXPANDED_PS1="$(bash --rcfile <(echo "PS1='$PS1'") -i <<<'' 2>&1 | head -n 1)" + + if [[ ! -z "${choices[@]}" ]]; then + COMPREPLY=( $( compgen -W '${choices}' -- ${cur} ) ) + + if [[ ! -z "${cur}" ]]; then + return 0 + fi + fi + + if [ -n "$DISPLAY_HELP" ]; then + ${cmd} -- "${COMP_WORDS[@]:1}" 2>&1 > /dev/null + + if [[ -z "${choices[@]}" ]]; then + echo -n "${EXPANDED_PS1}" + echo -n "${COMP_WORDS[@]}" + fi + fi + + return 0 +} + +complete -F _complete $@ diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 6353bfa4512..32de8ec0f2e 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -49,7 +49,7 @@ function usage() { TIME_BEGIN=$( date -u +%s ) if [ $# -ne 0 ]; then - while getopts "o:s:b:i:ycdhmPf" opt; do + while getopts "o:s:b:i:ycdhmP" opt; do case "${opt}" in o ) options=( "Debug" "Release" "RelWithDebInfo" "MinSizeRel" ) @@ -78,9 +78,6 @@ if [ $# -ne 0 ]; then NONINTERACTIVE=true PROCEED=true ;; - f ) - echo "DEPRECATION NOTICE: -f will be removed in the next release..." - ;; # Needs to be removed in 1.9 c ) ENABLE_COVERAGE_TESTING=true ;; @@ -134,7 +131,7 @@ ensure-sudo ensure-which # Prevent a non-git clone from running ensure-git-clone -# Prompt user for installation path. +# Prompt user for installation path (Set EOSIO_INSTALL_DIR) install-directory-prompt # If the same version has already been installed... previous-install-prompt @@ -246,7 +243,7 @@ if $ENABLE_MONGO; then echo "${BIN_DIR}/mongod --dbpath ${MONGODB_DATA_DIR} -f ${MONGODB_CONF} --logpath ${MONGODB_LOG_DIR}/mongod.log &" PATH_TO_USE=" PATH=\$PATH:$OPT_DIR/mongodb/bin" fi -echo "cd ${BUILD_DIR} && ${PATH_TO_USE} make test" # PATH is set as currently 'mongo' binary is required for the mongodb test +echo "cd ${BUILD_DIR} &&${PATH_TO_USE} make test" # PATH is set as currently 'mongo' binary is required for the mongodb test echo "" resources diff --git a/scripts/eosio_build_amazonlinux2_deps b/scripts/eosio_build_amazonlinux2_deps index 4017586125e..5e059a7bfce 100755 --- a/scripts/eosio_build_amazonlinux2_deps +++ b/scripts/eosio_build_amazonlinux2_deps @@ -20,4 +20,6 @@ libedit-devel,rpm -qa doxygen,rpm -qa graphviz,rpm -qa clang,rpm -qa -patch,rpm -qa \ No newline at end of file +patch,rpm -qa +llvm-devel,rpm -qa +llvm-static,rpm -qa \ No newline at end of file diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 9e20e86df55..a0fa87c3250 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -12,7 +12,7 @@ echo "Disk space available: ${DISK_AVAIL}G" echo "" -# Repo necessary for rh-python3 and devtoolset-8 +# Repo necessary for rh-python3, devtoolset-8 and llvm-toolset-7.0 ensure-scl # GCC8 for Centos / Needed for CMAKE install even if we're pinning ensure-devtoolset diff --git a/scripts/eosio_build_centos7_deps b/scripts/eosio_build_centos7_deps index 60452c66252..7d40915d770 100644 --- a/scripts/eosio_build_centos7_deps +++ b/scripts/eosio_build_centos7_deps @@ -18,4 +18,6 @@ gettext-devel,rpm -qa file,rpm -qa libusbx-devel,rpm -qa libcurl-devel,rpm -qa -patch,rpm -qa \ No newline at end of file +patch,rpm -qa +llvm-toolset-7.0-llvm-devel,rpm -qa +llvm-toolset-7.0-llvm-static,rpm -qa \ No newline at end of file diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index d06afd8bb42..d6c8d11389e 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -36,8 +36,6 @@ ensure-brew-packages "${REPO_ROOT}/scripts/eosio_build_darwin_deps" [[ -z "${CMAKE}" ]] && export CMAKE="/usr/local/bin/cmake" # CLANG Installation build-clang -# LLVM Installation -ensure-llvm # BOOST Installation ensure-boost # MONGO Installation diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 6912919cc1e..a447d59fa48 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -11,11 +11,9 @@ echo "Disk space available: ${DISK_AVAIL}G" [[ "${DISK_AVAIL}" -lt "${DISK_MIN}" ]] && echo " - You must have at least ${DISK_MIN}GB of available storage to install EOSIO." && exit 1 # system clang and build essential for Ubuntu 18 (16 too old) -( [[ $PIN_COMPILER == false ]] && [[ $VERSION_ID == "18.04" ]] ) && EXTRA_DEPS=(clang,dpkg\ -s) +( [[ $PIN_COMPILER == false ]] && [[ $VERSION_ID == "18.04" ]] ) && EXTRA_DEPS=(clang,dpkg\ -s llvm-7-dev,dpkg\ -s) # We install clang8 for Ubuntu 16, but we still need something to compile cmake, boost, etc + pinned 18 still needs something to build source ( [[ $VERSION_ID == "16.04" ]] || ( $PIN_COMPILER && [[ $VERSION_ID == "18.04" ]] ) ) && ensure-build-essential -# Ensure packages exist -([[ $PIN_COMPILER == false ]] && [[ $BUILD_CLANG == false ]]) && EXTRA_DEPS+=(llvm-4.0,dpkg\ -s) $ENABLE_COVERAGE_TESTING && EXTRA_DEPS+=(lcov,dpkg\ -s) ensure-apt-packages "${REPO_ROOT}/scripts/eosio_build_ubuntu_deps" $(echo ${EXTRA_DEPS[@]}) echo "" diff --git a/scripts/eosio_install.sh b/scripts/eosio_install.sh index 63b3038450e..e2f8f0fdb33 100755 --- a/scripts/eosio_install.sh +++ b/scripts/eosio_install.sh @@ -64,4 +64,3 @@ printf "${COLOR_GREEN}EOSIO has been installed into ${CACHED_INSTALL_PATH}/bin${ printf "\\n${COLOR_YELLOW}Uninstall with: ${SCRIPT_DIR}/eosio_uninstall.sh${COLOR_NC}\\n" printf "==============================================================================================\\n\\n" resources - diff --git a/scripts/helpers/eosio.sh b/scripts/helpers/eosio.sh index e700765827e..5e92d19513f 100755 --- a/scripts/helpers/eosio.sh +++ b/scripts/helpers/eosio.sh @@ -31,7 +31,6 @@ function setup() { echo "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}" echo "CORE_SYMBOL_NAME: ${CORE_SYMBOL_NAME}" echo "BOOST_LOCATION: ${BOOST_LOCATION}" - echo "INSTALL_LOCATION: ${INSTALL_LOCATION}" echo "BUILD_DIR: ${BUILD_DIR}" echo "EOSIO_INSTALL_DIR: ${EOSIO_INSTALL_DIR}" echo "NONINTERACTIVE: ${NONINTERACTIVE}" @@ -249,15 +248,17 @@ function ensure-boost() { B2_FLAGS="-q -j${JOBS} --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test install" BOOTSTRAP_FLAGS="" if [[ $ARCH == "Linux" ]] && $PIN_COMPILER; then - B2_FLAGS="toolset=clang cxxflags='-stdlib=libc++ -D__STRICT_ANSI__ -nostdinc++ -I${CLANG_ROOT}/include/c++/v1' linkflags='-stdlib=libc++' link=static threading=multi --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test -q -j${JOBS} install" + B2_FLAGS="toolset=clang cxxflags='-stdlib=libc++ -D__STRICT_ANSI__ -nostdinc++ -I${CLANG_ROOT}/include/c++/v1 -D_FORTIFY_SOURCE=2 -fstack-protector-strong -fpie' linkflags='-stdlib=libc++ -pie' link=static threading=multi --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test -q -j${JOBS} install" BOOTSTRAP_FLAGS="--with-toolset=clang" + elif $PIN_COMPILER; then + local SDKROOT="$(xcrun --sdk macosx --show-sdk-path)" fi - execute bash -c "cd $SRC_DIR && \ + execute bash -c "cd $SRC_DIR && \ curl -LO https://dl.bintray.com/boostorg/release/$BOOST_VERSION_MAJOR.$BOOST_VERSION_MINOR.$BOOST_VERSION_PATCH/source/boost_$BOOST_VERSION.tar.bz2 \ && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ && cd $BOOST_ROOT \ - && ./bootstrap.sh ${BOOTSTRAP_FLAGS} --prefix=$BOOST_ROOT \ - && ./b2 ${B2_FLAGS} \ + && SDKROOT="$SDKROOT" ./bootstrap.sh ${BOOTSTRAP_FLAGS} --prefix=$BOOST_ROOT \ + && SDKROOT="$SDKROOT" ./b2 ${B2_FLAGS} \ && cd .. \ && rm -f boost_$BOOST_VERSION.tar.bz2 \ && rm -rf $BOOST_LINK_LOCATION" @@ -270,30 +271,27 @@ function ensure-boost() { } function ensure-llvm() { - echo "${COLOR_CYAN}[Ensuring LLVM 4 support]${COLOR_NC}" - if [[ ! -d $LLVM_ROOT ]]; then + if $PIN_COMPILER || $BUILD_CLANG; then + if [[ -d $LLVM_ROOT ]]; then + return + fi + LLVM_TEMP_DIR=$(mktemp -d) if $PIN_COMPILER || $BUILD_CLANG; then - CMAKE_FLAGS="-DCMAKE_INSTALL_PREFIX='${LLVM_ROOT}' -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE='${BUILD_DIR}/pinned_toolchain.cmake' .." - else - if [[ $NAME == "Ubuntu" ]]; then - execute ln -s /usr/lib/llvm-4.0 $LLVM_ROOT - echo " - LLVM successfully linked from /usr/lib/llvm-4.0 to ${LLVM_ROOT}" - return 0 - fi - CMAKE_FLAGS="-G 'Unix Makefiles' -DCMAKE_INSTALL_PREFIX=${LLVM_ROOT} -DLLVM_TARGETS_TO_BUILD='host' -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release .." + local LLVM_PINNED_CMAKE_ARGS="-DCMAKE_TOOLCHAIN_FILE='${BUILD_DIR}/pinned_toolchain.cmake' -DCMAKE_EXE_LINKER_FLAGS=-pthread -DCMAKE_SHARED_LINKER_FLAGS=-pthread -DLLVM_ENABLE_PIC=NO" fi - execute bash -c "cd ${OPT_DIR} \ + trap "rm -rf '$LLVM_TEMP_DIR'" EXIT + execute bash -c "cd '$LLVM_TEMP_DIR' \ && git clone --depth 1 --single-branch --branch $LLVM_VERSION https://github.com/llvm-mirror/llvm.git llvm && cd llvm \ - && mkdir build \ - && cd build \ - && ${CMAKE} ${CMAKE_FLAGS} \ - && make -j${JOBS} \ - && make install" + && mkdir build && cd build \ + && ${CMAKE} -DCMAKE_INSTALL_PREFIX='${LLVM_ROOT}' -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release $LLVM_PINNED_CMAKE_ARGS .. \ + && make -j${JOBS} install" echo " - LLVM successfully installed @ ${LLVM_ROOT}" - echo "" - else - echo " - LLVM found @ ${LLVM_ROOT}." - echo "" + elif [[ $NAME == "Ubuntu" ]]; then + execute ln -snf /usr/lib/llvm-7 $LLVM_ROOT + elif [[ $NAME == "Amazon Linux" ]]; then + execute unlink $LLVM_ROOT || true + elif [[ $NAME == "CentOS Linux" ]]; then + execute ln -snf /opt/rh/llvm-toolset-7.0/root $LLVM_ROOT fi } diff --git a/scripts/helpers/general.sh b/scripts/helpers/general.sh index b8bbcf5d4ba..37244e42415 100755 --- a/scripts/helpers/general.sh +++ b/scripts/helpers/general.sh @@ -87,6 +87,7 @@ function install-package() { EXECUTION_FUNCTION="execute" [[ $2 == "WETRUN" ]] && EXECUTION_FUNCTION="execute-always" ( [[ $2 =~ "--" ]] || [[ $3 =~ "--" ]] ) && OPTIONS="${2}${3}" + # Can't use $SUDO_COMMAND: https://askubuntu.com/questions/953485/where-do-i-find-the-sudo-command-environment-variable [[ $CURRENT_USER != "root" ]] && [[ ! -z $SUDO_LOCATION ]] && NEW_SUDO_COMMAND="$SUDO_LOCATION -E" ( [[ $NAME =~ "Amazon Linux" ]] || [[ $NAME == "CentOS Linux" ]] ) && eval $EXECUTION_FUNCTION $NEW_SUDO_COMMAND $YUM $OPTIONS install -y $1 ( [[ $NAME =~ "Ubuntu" ]] ) && eval $EXECUTION_FUNCTION $NEW_SUDO_COMMAND $APTGET $OPTIONS install -y $1 diff --git a/scripts/pinned_toolchain.cmake b/scripts/pinned_toolchain.cmake index cdb517c5e1f..73b0a43bf62 100644 --- a/scripts/pinned_toolchain.cmake +++ b/scripts/pinned_toolchain.cmake @@ -6,9 +6,14 @@ set(CMAKE_CXX_COMPILER ${OPT_PATH}/clang8/bin/clang++) set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES ${OPT_PATH}/clang8/include/c++/v1 /usr/local/include /usr/include) -set(CMAKE_CXX_FLAGS_INIT "-nostdinc++") +set(CMAKE_C_FLAGS_INIT "-D_FORTIFY_SOURCE=2 -fstack-protector-strong -fpie") +set(CMAKE_CXX_FLAGS_INIT "-nostdinc++ -D_FORTIFY_SOURCE=2 -fstack-protector-strong -fpie") + +set(CMAKE_EXE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++ -pie") +if(NOT APPLE) + string(APPEND CMAKE_EXE_LINKER_FLAGS_INIT " -Wl,-z,relro,-z,now") +endif() -set(CMAKE_EXE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") set(CMAKE_SHARED_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") set(CMAKE_MODULE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") diff --git a/scripts/wasm-spec-test.sh b/scripts/wasm-spec-test.sh new file mode 100755 index 00000000000..06814e8d068 --- /dev/null +++ b/scripts/wasm-spec-test.sh @@ -0,0 +1,29 @@ +#!/bin/bash +set -eo pipefail +# variables +echo "+++ $([[ "$BUILDKITE" == 'true' ]] && echo ':evergreen_tree: ')Configuring Environment" +[[ -z "$JOBS" ]] && export JOBS=$(getconf _NPROCESSORS_ONLN) +GIT_ROOT="$(dirname $BASH_SOURCE[0])/.." +if [[ "$(uname)" == 'Linux' ]]; then + . /etc/os-release + if [[ "$ID" == 'centos' ]]; then + [[ -f /opt/rh/rh-python36/enable ]] && source /opt/rh/rh-python36/enable + fi +fi +cd $GIT_ROOT/build +# count tests +echo "+++ $([[ "$BUILDKITE" == 'true' ]] && echo ':microscope: ')Running WASM Spec Tests" +TEST_COUNT=$(ctest -N -L wasm_spec_tests | grep -i 'Total Tests: ' | cut -d ':' -f 2 | awk '{print $1}') +if [[ $TEST_COUNT > 0 ]]; then + echo "$TEST_COUNT tests found." +else + echo "+++ $([[ "$BUILDKITE" == 'true' ]] && echo ':no_entry: ')ERROR: No tests registered with ctest! Exiting..." + exit 1 +fi +# run tests +set +e # defer ctest error handling to end +echo "$ ctest -j $JOBS -L wasm_spec_tests --output-on-failure -T Test" +ctest -j $JOBS -L wasm_spec_tests --output-on-failure -T Test +EXIT_STATUS=$? +echo 'Done running WASM spec tests.' +exit $EXIT_STATUS \ No newline at end of file diff --git a/testnet.template b/testnet.template old mode 100644 new mode 100755 index 574d7ec9795..a087666bc50 --- a/testnet.template +++ b/testnet.template @@ -14,7 +14,14 @@ fi bioscontractpath=$BIOS_CONTRACT_PATH if [ -z "$bioscontractpath" ]; then - bioscontractpath="unittests/contracts/eosio.bios" + # this is defaulted to the version of bios that only requires the preactivate_feature + # newer versions may require + bioscontractpath="unittests/contracts/old_versions/v1.7.0-develop-preactivate_feature/eosio.bios" +fi + +bioscurrencysymbol=$BIOS_CURRENCY_SYMBOL +if [ -z "$bioscurrencysymbol" ]; then + bioscurrencysymbol="SYS" fi wddir=eosio-ignition-wd @@ -63,7 +70,7 @@ wcmd () { } cacmd () { - programs/cleos/cleos --wallet-url $wdurl --url http://$bioshost:$biosport system newaccount --transfer --stake-net "10000000.0000 SYS" --stake-cpu "10000000.0000 SYS" --buy-ram "10000000.0000 SYS" eosio $* >> $logfile 2>&1 + programs/cleos/cleos --wallet-url $wdurl --url http://$bioshost:$biosport system newaccount --transfer --stake-net "10000000.0000 "$bioscurrencysymbol --stake-cpu "10000000.0000 "$bioscurrencysymbol --buy-ram "10000000.0000 "$bioscurrencysymbol eosio $* >> $logfile 2>&1 ecmd system regproducer $1 $2 ecmd system voteproducer prods $1 $1 } @@ -81,7 +88,8 @@ wcmd create --to-console -n ignition # where $BIOSKEY is replaced by the private key for the bios node # ------ DO NOT ALTER THE NEXT LINE ------- ###INSERT prodkeys - +echo "Activated Features Check:" >> $logfile +curl http://$bioshost:$biosport/v1/chain/get_activated_protocol_features >> $logfile ecmd set contract eosio $bioscontractpath eosio.bios.wasm eosio.bios.abi # Preactivate all digests @@ -112,16 +120,16 @@ ecmd set contract eosio.msig unittests/contracts/eosio.msig eosio.msig.wasm eosi ecmd set contract eosio.wrap unittests/contracts/eosio.wrap eosio.wrap.wasm eosio.wrap.abi echo ===== Start: $step ============ >> $logfile -echo executing: cleos --wallet-url $wdurl --url http://$bioshost:$biosport push action eosio.token create '[ "eosio", "10000000000.0000 SYS" ]' -p eosio.token | tee -a $logfile -echo executing: cleos --wallet-url $wdurl --url http://$bioshost:$biosport push action eosio.token issue '[ "eosio", "1000000000.0000 SYS", "memo" ]' -p eosio | tee -a $logfile +echo executing: cleos --wallet-url $wdurl --url http://$bioshost:$biosport push action eosio.token create '[ "eosio", "10000000000.0000 '$bioscurrencysymbol'" ]' -p eosio.token | tee -a $logfile +echo executing: cleos --wallet-url $wdurl --url http://$bioshost:$biosport push action eosio.token issue '[ "eosio", "1000000000.0000 '$bioscurrencysymbol'", "memo" ]' -p eosio | tee -a $logfile echo ----------------------- >> $logfile -programs/cleos/cleos --wallet-url $wdurl --url http://$bioshost:$biosport push action eosio.token create '[ "eosio", "10000000000.0000 SYS" ]' -p eosio.token >> $logfile 2>&1 -programs/cleos/cleos --wallet-url $wdurl --url http://$bioshost:$biosport push action eosio.token issue '[ "eosio", "1000000000.0000 SYS", "memo" ]' -p eosio >> $logfile 2>&1 +programs/cleos/cleos --wallet-url $wdurl --url http://$bioshost:$biosport push action eosio.token create '[ "eosio", "10000000000.0000 '$bioscurrencysymbol'" ]' -p eosio.token >> $logfile 2>&1 +programs/cleos/cleos --wallet-url $wdurl --url http://$bioshost:$biosport push action eosio.token issue '[ "eosio", "1000000000.0000 '$bioscurrencysymbol'", "memo" ]' -p eosio >> $logfile 2>&1 echo ==== End: $step ============== >> $logfile step=$(($step + 1)) ecmd set contract eosio unittests/contracts/eosio.system eosio.system.wasm eosio.system.abi -programs/cleos/cleos --wallet-url $wdurl --url http://$bioshost:$biosport push action eosio init '[0, "4,SYS"]' -p eosio >> $logfile 2>&1 +programs/cleos/cleos --wallet-url $wdurl --url http://$bioshost:$biosport push action eosio init '[0, "4,'$bioscurrencysymbol'"]' -p eosio >> $logfile 2>&1 # Manual deployers, add a series of lines below this block that looks like: # cacmd $PRODNAME[0] $OWNERKEY[0] $ACTIVEKEY[0] diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 3a6b1ea7bf8..e3ba85a6c86 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,15 +1,12 @@ + find_package( Gperftools QUIET ) if( GPERFTOOLS_FOUND ) message( STATUS "Found gperftools; compiling tests with TCMalloc") list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) endif() -find_package(LLVM 4.0 REQUIRED CONFIG) - -link_directories(${LLVM_LIBRARY_DIR}) - -include_directories("${CMAKE_SOURCE_DIR}/plugins/wallet_plugin/include") +include_directories( "${CMAKE_SOURCE_DIR}/plugins/wallet_plugin/include" ) file(GLOB UNIT_TESTS "*.cpp") @@ -29,6 +26,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/Cluster.py ${CMAKE_CURRENT_BINARY_DIR configure_file(${CMAKE_CURRENT_SOURCE_DIR}/TestHelper.py ${CMAKE_CURRENT_BINARY_DIR}/TestHelper.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_tests/dawn_515/test.sh ${CMAKE_CURRENT_BINARY_DIR}/p2p_tests/dawn_515/test.sh COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/block_log_util_test.py ${CMAKE_CURRENT_BINARY_DIR}/block_log_util_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-test.py ${CMAKE_CURRENT_BINARY_DIR}/distributed-transactions-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-remote-test.py ${CMAKE_CURRENT_BINARY_DIR}/distributed-transactions-remote-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/sample-cluster-map.json ${CMAKE_CURRENT_BINARY_DIR}/sample-cluster-map.json COPYONLY) @@ -41,6 +39,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_remote_test.py ${CMAKE_CUR configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_under_min_avail_ram.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_under_min_avail_ram.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_voting_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_voting_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_irreversible_mode_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_irreversible_mode_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_chainbase_allocation_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_chainbase_allocation_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_protocol_feature_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_protocol_feature_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_multiple_version_protocol_feature_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_multiple_version_protocol_feature_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/consensus-validation-malicious-producers.py ${CMAKE_CURRENT_BINARY_DIR}/consensus-validation-malicious-producers.py COPYONLY) @@ -59,6 +58,8 @@ add_test(NAME nodeos_sanity_test COMMAND tests/nodeos_run_test.py -v --sanity-te set_property(TEST nodeos_sanity_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME nodeos_run_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME block_log_util_test COMMAND tests/block_log_util_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST block_log_util_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST p2p_dawn515_test PROPERTY LABELS nonparallelizable_tests) @@ -88,7 +89,6 @@ set_property(TEST launcher_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME db_modes_test COMMAND tests/db_modes_test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_tests_properties(db_modes_test PROPERTIES COST 6000) add_test(NAME release-build-test COMMAND tests/release-build.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST release-build-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME version-label-test COMMAND tests/version-label.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) # Long running tests @@ -109,11 +109,15 @@ add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --wal set_property(TEST nodeos_voting_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --wallet-port 9904 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_tests_properties(nodeos_under_min_avail_ram_lr_test PROPERTIES TIMEOUT 3000) set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_irreversible_mode_lr_test COMMAND tests/nodeos_irreversible_mode_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_irreversible_mode_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_chainbase_allocation_lr_test COMMAND tests/nodeos_chainbase_allocation_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_chainbase_allocation_lr_test PROPERTY LABELS long_running_tests) + add_test(NAME nodeos_startup_catchup_lr_test COMMAND tests/nodeos_startup_catchup.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_tests_properties(nodeos_startup_catchup_lr_test PROPERTIES TIMEOUT 3000) set_property(TEST nodeos_startup_catchup_lr_test PROPERTY LABELS long_running_tests) diff --git a/tests/Cluster.py b/tests/Cluster.py index 699858e355f..4102e3343f9 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -15,6 +15,7 @@ from core_symbol import CORE_SYMBOL from testUtils import Utils from testUtils import Account +from testUtils import BlockLogAction from Node import BlockType from Node import Node from WalletMgr import WalletMgr @@ -386,8 +387,8 @@ def connectGroup(group, producerNodes, bridgeNodes) : Cluster.__LauncherCmdArr = cmdArr.copy() - s=" ".join(cmdArr) - Utils.Print("cmd: %s" % (s)) + s=" ".join([("'{0}'".format(element) if (' ' in element) else element) for element in cmdArr.copy()]) + if Utils.Debug: Utils.Print("cmd: %s" % (s)) if 0 != subprocess.call(cmdArr): Utils.Print("ERROR: Launcher failed to launch. failed cmd: %s" % (s)) return False @@ -953,7 +954,7 @@ def bios_bootstrap(self, biosNode, totalNodes, pfSetupPolicy, silent=False): "FEATURE_DIGESTS": "" } if PFSetupPolicy.hasPreactivateFeature(pfSetupPolicy): - env["BIOS_CONTRACT_PATH"] = "unittests/contracts/eosio.bios" + env["BIOS_CONTRACT_PATH"] = "unittests/contracts/old_versions/v1.7.0-develop-preactivate_feature/eosio.bios" if pfSetupPolicy == PFSetupPolicy.FULL: allBuiltinProtocolFeatureDigests = biosNode.getAllBuiltinFeatureDigestsToPreactivate() @@ -964,7 +965,7 @@ def bios_bootstrap(self, biosNode, totalNodes, pfSetupPolicy, silent=False): if not silent: Utils.Print("Launcher failed to shut down eos cluster.") return None - p = re.compile('error', re.IGNORECASE) + p = re.compile(r"\berror\b", re.IGNORECASE) with open(Cluster.__bootlog) as bootFile: for line in bootFile: if p.search(line): @@ -1071,7 +1072,7 @@ def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPoli contract="eosio.bios" contractDir="unittests/contracts/%s" % (contract) if PFSetupPolicy.hasPreactivateFeature(pfSetupPolicy): - contractDir="unittests/contracts/%s" % (contract) + contractDir="unittests/contracts/old_versions/v1.7.0-develop-preactivate_feature/%s" % (contract) else: contractDir="unittests/contracts/old_versions/v1.6.0-rc3/%s" % (contract) wasmFile="%s.wasm" % (contract) @@ -1606,9 +1607,9 @@ def printBlockLogIfNeeded(self): self.printBlockLog() - def getBlockLog(self, nodeExtension): + def getBlockLog(self, nodeExtension, blockLogAction=BlockLogAction.return_blocks, outputFile=None, first=None, last=None, throwException=False, silentErrors=False, exitOnError=False): blockLogDir=Utils.getNodeDataDir(nodeExtension, "blocks") - return Utils.getBlockLog(blockLogDir, exitOnError=False) + return Utils.getBlockLog(blockLogDir, blockLogAction=blockLogAction, outputFile=outputFile, first=first, last=last, throwException=throwException, silentErrors=silentErrors, exitOnError=exitOnError) def printBlockLog(self): blockLogBios=self.getBlockLog("bios") diff --git a/tests/Node.py b/tests/Node.py index f1a39dffc64..49251710d4b 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -14,12 +14,7 @@ from testUtils import EnumType from testUtils import addEnum from testUtils import unhandledEnumType - -class ReturnType(EnumType): - pass - -addEnum(ReturnType, "raw") -addEnum(ReturnType, "json") +from testUtils import ReturnType class BlockType(EnumType): pass @@ -171,6 +166,7 @@ def normalizeJsonObject(extJStr): tmpStr=re.sub(r'ObjectId\("(\w+)"\)', r'"ObjectId-\1"', tmpStr) tmpStr=re.sub(r'ISODate\("([\w|\-|\:|\.]+)"\)', r'"ISODate-\1"', tmpStr) tmpStr=re.sub(r'NumberLong\("(\w+)"\)', r'"NumberLong-\1"', tmpStr) + tmpStr=re.sub(r'NumberLong\((\w+)\)', r'\1', tmpStr) return tmpStr @staticmethod @@ -1207,6 +1203,7 @@ def getHeadBlockNum(self): return info[headBlockNumTag] else: # Either this implementation or the one in getIrreversibleBlockNum are likely wrong. + time.sleep(1) block=self.getBlockFromDb(-1) if block is not None: blockNum=block["block_num"] @@ -1217,6 +1214,7 @@ def getIrreversibleBlockNum(self): if not self.enableMongo: info=self.getInfo(exitOnError=True) if info is not None: + Utils.Print("current lib: %d" % (info["last_irreversible_block_num"])) return info["last_irreversible_block_num"] else: # Either this implementation or the one in getHeadBlockNum are likely wrong. @@ -1265,7 +1263,7 @@ def myFunc(): self.killed=True return True - def interruptAndVerifyExitStatus(self, timeout=15): + def interruptAndVerifyExitStatus(self, timeout=60): if Utils.Debug: Utils.Print("terminating node: %s" % (self.cmd)) assert self.popenProc is not None, "node: \"%s\" does not have a popenProc, this may be because it is only set after a relaunch." % (self.cmd) self.popenProc.send_signal(signal.SIGINT) @@ -1353,7 +1351,7 @@ def getNextCleanProductionCycle(self, trans): # TBD: make nodeId an internal property # pylint: disable=too-many-locals # If nodeosPath is equal to None, it will use the existing nodeos path - def relaunch(self, nodeId, chainArg=None, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None, cachePopen=False, nodeosPath=None): + def relaunch(self, nodeId, chainArg=None, newChain=False, timeout=Utils.systemWaitTimeout, addSwapFlags=None, cachePopen=False, nodeosPath=None): assert(self.pid is None) assert(self.killed) @@ -1365,7 +1363,7 @@ def relaunch(self, nodeId, chainArg=None, newChain=False, timeout=Utils.systemWa splittedCmd=self.cmd.split() if nodeosPath: splittedCmd[0] = nodeosPath myCmd=" ".join(splittedCmd) - toAddOrSwap=copy.deepcopy(addOrSwapFlags) if addOrSwapFlags is not None else {} + toAddOrSwap=copy.deepcopy(addSwapFlags) if addSwapFlags is not None else {} if not newChain: skip=False swapValue=None @@ -1529,7 +1527,7 @@ def activatePreactivateFeature(self): self.scheduleProtocolFeatureActivations([preactivateFeatureDigest]) # Wait for the next block to be produced so the scheduled protocol feature is activated - self.waitForHeadToAdvance() + assert self.waitForHeadToAdvance(), print("ERROR: TIMEOUT WAITING FOR PREACTIVATE") # Return an array of feature digests to be preactivated in a correct order respecting dependencies # Require producer_api_plugin @@ -1583,3 +1581,8 @@ def modifyBuiltinPFSubjRestrictions(self, nodeId, featureCodename, subjectiveRes protocolFeatureJson["subjective_restrictions"].update(subjectiveRestriction) with open(jsonPath, "w") as f: json.dump(protocolFeatureJson, f, indent=2) + + # Require producer_api_plugin + def createSnapshot(self): + param = { } + return self.processCurlCmd("producer", "create_snapshot", json.dumps(param)) diff --git a/tests/bash-bats/eosio_build.sh b/tests/bash-bats/eosio_build.sh index c343397a35e..53ca8dbe94f 100644 --- a/tests/bash-bats/eosio_build.sh +++ b/tests/bash-bats/eosio_build.sh @@ -24,7 +24,7 @@ TEST_LABEL="[eosio_build]" run bash -c "printf \"y\ny\ny\nn\nn\n\" | ./${SCRIPT_LOCATION}" [[ ! -z $(echo "${output}" | grep "Unable to find .* compiler") ]] || exit fi - fi + fi cd ./scripts # Also test that we can run the script from a directory other than the root run bash -c "./eosio_build.sh -y -P" @@ -77,4 +77,4 @@ TEST_LABEL="[eosio_build]" [[ ! -z $(echo "${output}" | grep "Invalid Option!") ]] || exit run bash -c "./$SCRIPT_LOCATION -h" [[ ! -z $(echo "${output}" | grep "Usage:") ]] || exit -} +} \ No newline at end of file diff --git a/tests/bash-bats/eosio_build_darwin.sh b/tests/bash-bats/eosio_build_darwin.sh index 0fc231950dc..e5c063ca4e2 100644 --- a/tests/bash-bats/eosio_build_darwin.sh +++ b/tests/bash-bats/eosio_build_darwin.sh @@ -30,7 +30,7 @@ export TEST_LABEL="[eosio_build_darwin]" [[ ! -z $(echo "${output}" | grep "Starting EOSIO Dependency Install") ]] || exit [[ ! -z $(echo "${output}" | grep "Executing: /usr/bin/xcode-select --install") ]] || exit [[ -z $(echo "${output}" | grep " - NOT found") ]] || exit - rm -f $CMAKE + # rm -f $CMAKE [[ ! -z $(echo "${output}" | grep "[Updating HomeBrew]") ]] || exit [[ ! -z $(echo "${output}" | grep "brew tap eosio/eosio") ]] || exit [[ ! -z $(echo "${output}" | grep "brew install.*llvm@4.*") ]] || exit @@ -39,4 +39,4 @@ export TEST_LABEL="[eosio_build_darwin]" [[ ! -z $(echo "${output}" | grep "Starting EOSIO Build") ]] || exit [[ ! -z $(echo "${output}" | grep " --with-iostreams --with-date_time") ]] || exit # BOOST [[ ! -z $(echo "${output}" | grep "EOSIO has been successfully built") ]] || exit -} +} \ No newline at end of file diff --git a/tests/bash-bats/eosio_uninstall.sh b/tests/bash-bats/eosio_uninstall.sh index 4942693cff9..753832aee33 100644 --- a/tests/bash-bats/eosio_uninstall.sh +++ b/tests/bash-bats/eosio_uninstall.sh @@ -70,4 +70,4 @@ rm -rf $VAR_DIR/log rm -rf $ETC_DIR rm -rf $LIB_DIR rm -rf $MONGODB_LOG_DIR -rm -rf $MONGODB_DATA_DIR +rm -rf $MONGODB_DATA_DIR \ No newline at end of file diff --git a/tests/bash-bats/helpers/general.sh b/tests/bash-bats/helpers/general.sh index 1a03c29ead3..e553e07bd32 100644 --- a/tests/bash-bats/helpers/general.sh +++ b/tests/bash-bats/helpers/general.sh @@ -21,4 +21,4 @@ fi if [[ ! -d "tests" ]] && [[ ! -f "README.md" ]]; then echo "You must navigate into the root directory to execute tests..." >&3 exit 1 -fi +fi \ No newline at end of file diff --git a/tests/bash-bats/modules/cmake.sh b/tests/bash-bats/modules/cmake.sh index e9be5319ce9..e726fce9dc3 100755 --- a/tests/bash-bats/modules/cmake.sh +++ b/tests/bash-bats/modules/cmake.sh @@ -40,4 +40,4 @@ load ../helpers/functions [[ ! -z $(echo "${output}" | grep "Executing: bash -c ${BIN_DIR}/cmake") ]] || exit [[ ! -z $(echo "${output}" | grep "CMAKE successfully installed") ]] || exit fi -} +} \ No newline at end of file diff --git a/tests/block_log_util_test.py b/tests/block_log_util_test.py new file mode 100755 index 00000000000..1278808c50b --- /dev/null +++ b/tests/block_log_util_test.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +from testUtils import BlockLogAction +import time +from Cluster import Cluster +from WalletMgr import WalletMgr +from Node import BlockType +import os +import signal +import subprocess +from TestHelper import AppArgs +from TestHelper import TestHelper + +############################################################### +# block_log_util_test +# Test verifies that the blockLogUtil is still compatible with nodeos +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit + +from core_symbol import CORE_SYMBOL + +def verifyBlockLog(expected_block_num, trimmedBlockLog): + firstBlockNum = expected_block_num + for block in trimmedBlockLog: + assert 'block_num' in block, print("ERROR: eosio-blocklog didn't return block output") + block_num = block['block_num'] + assert block_num == expected_block_num + expected_block_num += 1 + Print("Block_log contiguous from block number %d to %d" % (firstBlockNum, expected_block_num - 1)) + + +appArgs=AppArgs() +args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run"}) +Utils.Debug=args.v +pnodes=2 +cluster=Cluster(walletd=True) +dumpErrorDetails=args.dump_error_details +keepLogs=args.keep_logs +dontKill=args.leave_running +prodCount=2 +killAll=args.clean_run +walletPort=TestHelper.DEFAULT_WALLET_PORT +totalNodes=pnodes+1 + +walletMgr=WalletMgr(True, port=walletPort) +testSuccessful=False +killEosInstances=not dontKill +killWallet=not dontKill + +WalletdName=Utils.EosWalletName +ClientName="cleos" + +try: + TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) + + cluster.killall(allInstances=killAll) + cluster.cleanup() + Print("Stand up cluster") + if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=pnodes, totalNodes=totalNodes, totalProducers=pnodes*prodCount, useBiosBootFile=False) is False: + Utils.errorExit("Failed to stand up eos cluster.") + + Print("Validating system accounts after bootstrap") + cluster.validateAccounts(None) + + biosNode=cluster.biosNode + node0=cluster.getNode(0) + node1=cluster.getNode(1) + + blockNum=100 + Print("Wait till we at least get to block %d" % (blockNum)) + node0.waitForBlock(blockNum, blockType=BlockType.lib) + info=node0.getInfo(exitOnError=True) + headBlockNum=info["head_block_num"] + lib=info["last_irreversible_block_num"] + + Print("Kill the node we want to verify its block log") + node0.kill(signal.SIGTERM) + + Print("Wait for node0's head block to become irreversible") + node1.waitForBlock(headBlockNum, blockType=BlockType.lib) + infoAfter=node1.getInfo(exitOnError=True) + headBlockNumAfter=infoAfter["head_block_num"] + + def checkBlockLog(blockLog, blockNumsToFind, firstBlockNum=1): + foundBlockNums=[] + nextBlockNum=firstBlockNum + previous=0 + nextIndex=0 + for block in blockLog: + blockNum=block["block_num"] + if nextBlockNum!=blockNum: + Utils.errorExit("BlockLog should progress to the next block number, expected block number %d but got %d" % (nextBlockNum, blockNum)) + if nextIndex blockNumsToFind[nextIndex], "expects passed in array, blockNumsToFind to increase from smallest to largest, %d is less than or equal to %d" % (next, previous) + nextIndex+=1 + + return foundBlockNums + + Print("Retrieve the whole blocklog for node 0") + blockLog=cluster.getBlockLog(0) + foundBlockNums=checkBlockLog(blockLog, [headBlockNum, headBlockNumAfter]) + assert foundBlockNums[0], "Couldn't find \"%d\" in blocklog:\n\"%s\"\n" % (foundBlockNums[0], output) + assert not foundBlockNums[1], "Should not find \"%d\" in blocklog:\n\"%s\"\n" % (foundBlockNums[1], blockLog) + + output=cluster.getBlockLog(0, blockLogAction=BlockLogAction.smoke_test) + expectedStr="no problems found" + assert output.find(expectedStr) != -1, "Couldn't find \"%s\" in:\n\"%s\"\n" % (expectedStr, output) + + blockLogDir=Utils.getNodeDataDir(0, "blocks") + duplicateIndexFileName=os.path.join(blockLogDir, "duplicate.index") + output=cluster.getBlockLog(0, blockLogAction=BlockLogAction.make_index, outputFile=duplicateIndexFileName) + assert output is not None, "Couldn't make new index file \"%s\"\n" % (duplicateIndexFileName) + + blockIndexFileName=os.path.join(blockLogDir, "blocks.index") + blockIndexFile=open(blockIndexFileName,"rb") + duplicateIndexFile=open(duplicateIndexFileName,"rb") + blockIndexStr=blockIndexFile.read() + duplicateIndexStr=duplicateIndexFile.read() + assert blockIndexStr==duplicateIndexStr, "Generated file \%%s\" didn't match original \"%s\"" % (duplicateIndexFileName, blockIndexFileName) + + try: + Print("Head block num %d will not be in block log (it will be in reversible DB), so --trim will throw an exception" % (headBlockNum)) + output=cluster.getBlockLog(0, blockLogAction=BlockLogAction.trim, last=headBlockNum, throwException=True) + Utils.errorExit("BlockLogUtil --trim should have indicated error for last value set to lib (%d) " + + "which should not do anything since only trimming blocklog and not irreversible blocks" % (lib)) + except subprocess.CalledProcessError as ex: + pass + + beforeEndOfBlockLog=lib-20 + Print("Block num %d will definitely be at least one block behind the most recent entry in block log, so --trim will work" % (beforeEndOfBlockLog)) + output=cluster.getBlockLog(0, blockLogAction=BlockLogAction.trim, last=beforeEndOfBlockLog, throwException=True) + + Print("Kill the non production node, we want to verify its block log") + cluster.getNode(2).kill(signal.SIGTERM) + + Print("Trim off block num 1 to remove genesis block from block log.") + output=cluster.getBlockLog(2, blockLogAction=BlockLogAction.trim, first=2, throwException=True) + + Print("Smoke test the trimmed block log.") + output=cluster.getBlockLog(2, blockLogAction=BlockLogAction.smoke_test) + + Print("Analyze block log.") + trimmedBlockLog=cluster.getBlockLog(2, blockLogAction=BlockLogAction.return_blocks) + + verifyBlockLog(2, trimmedBlockLog) + + # relaunch the node with the truncated block log and ensure it catches back up with the producers + current_head_block_num = node1.getInfo()["head_block_num"] + cluster.getNode(2).relaunch(2, cachePopen=True) + assert cluster.getNode(2).waitForBlock(current_head_block_num, timeout=60, reportInterval=15) + + # ensure it continues to advance + current_head_block_num = node1.getInfo()["head_block_num"] + assert cluster.getNode(2).waitForBlock(current_head_block_num, timeout=60, reportInterval=15) + info = cluster.getNode(2).getInfo() + block = cluster.getNode(2).getBlock(2) + assert block is not None + block = cluster.getNode(2).getBlock(1, silentErrors=True) + assert block is None + + # verify it shuts down cleanly + cluster.getNode(2).interruptAndVerifyExitStatus() + + firstBlock = info["last_irreversible_block_num"] + Print("Trim off block num %s." % (firstBlock)) + output=cluster.getBlockLog(2, blockLogAction=BlockLogAction.trim, first=firstBlock, throwException=True) + + Print("Smoke test the trimmed block log.") + output=cluster.getBlockLog(2, blockLogAction=BlockLogAction.smoke_test) + + Print("Analyze block log.") + trimmedBlockLog=cluster.getBlockLog(2, blockLogAction=BlockLogAction.return_blocks) + + verifyBlockLog(firstBlock, trimmedBlockLog) + + # relaunch the node with the truncated block log and ensure it catches back up with the producers + current_head_block_num = node1.getInfo()["head_block_num"] + assert current_head_block_num >= info["head_block_num"] + cluster.getNode(2).relaunch(2, cachePopen=True) + assert cluster.getNode(2).waitForBlock(current_head_block_num, timeout=60, reportInterval=15) + + # ensure it continues to advance + current_head_block_num = node1.getInfo()["head_block_num"] + assert cluster.getNode(2).waitForBlock(current_head_block_num, timeout=60, reportInterval=15) + info = cluster.getNode(2).getInfo() + block = cluster.getNode(2).getBlock(firstBlock) + assert block is not None + block = cluster.getNode(2).getBlock(firstBlock - 1, silentErrors=True) + assert block is None + block = cluster.getNode(2).getBlock(1, silentErrors=True) + assert block is None + + # verify it shuts down cleanly + cluster.getNode(2).interruptAndVerifyExitStatus() + + testSuccessful=True + +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + +exit(0) diff --git a/tests/chain_plugin_tests.cpp b/tests/chain_plugin_tests.cpp index a6e119a5b06..5336baab7f4 100644 --- a/tests/chain_plugin_tests.cpp +++ b/tests/chain_plugin_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ #include #include diff --git a/tests/distributed-transactions-remote-test.py b/tests/distributed-transactions-remote-test.py index 9e79b352329..8e98d976e24 100755 --- a/tests/distributed-transactions-remote-test.py +++ b/tests/distributed-transactions-remote-test.py @@ -10,8 +10,10 @@ ############################################################### # distributed-transactions-remote-test +# # Tests remote capability of the distributed-transactions-test. Test will setup cluster and pass nodes info to distributed-transactions-test. E.g. # distributed-transactions-remote-test.py -v --clean-run --dump-error-detail +# ############################################################### Print=Utils.Print diff --git a/tests/distributed-transactions-test.py b/tests/distributed-transactions-test.py index 9a02d5e6de4..dac7a64b50b 100755 --- a/tests/distributed-transactions-test.py +++ b/tests/distributed-transactions-test.py @@ -7,6 +7,18 @@ import random +############################################################### +# distributed-transactions-test +# +# Performs currency transfers between N accounts sent to http endpoints of +# N nodes and verifies, after a steady state is reached, that the accounts +# balances are correct +# if called with --nodes-file it will will load a json description of nodes +# that are already running and run distributed test against them (not +# currently testing this feature) +# +############################################################### + Print=Utils.Print errorExit=Utils.errorExit diff --git a/tests/get_table_tests.cpp b/tests/get_table_tests.cpp index 91fca59ef3b..f6d66de136a 100644 --- a/tests/get_table_tests.cpp +++ b/tests/get_table_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ #include #include @@ -452,4 +448,226 @@ BOOST_FIXTURE_TEST_CASE( get_table_by_seckey_test, TESTER ) try { } FC_LOG_AND_RETHROW() + +BOOST_FIXTURE_TEST_CASE( get_table_next_key_test, TESTER ) try { + create_account(N(test)); + + // setup contract and abi + set_code( N(test), contracts::get_table_test_wasm() ); + set_abi( N(test), contracts::get_table_test_abi().data() ); + produce_block(); + + // Init some data + push_action(N(test), N(addnumobj), N(test), mutable_variant_object()("input", 2)); + push_action(N(test), N(addnumobj), N(test), mutable_variant_object()("input", 5)); + push_action(N(test), N(addnumobj), N(test), mutable_variant_object()("input", 7)); + push_action(N(test), N(addhashobj), N(test), mutable_variant_object()("hashinput", "firstinput")); + push_action(N(test), N(addhashobj), N(test), mutable_variant_object()("hashinput", "secondinput")); + push_action(N(test), N(addhashobj), N(test), mutable_variant_object()("hashinput", "thirdinput")); + produce_block(); + + // The result of the init will populate + // For numobjs table (secondary index is on sec64, sec128, secdouble, secldouble) + // { + // "rows": [{ + // "key": 0, + // "sec64": 2, + // "sec128": "0x02000000000000000000000000000000", + // "secdouble": "2.00000000000000000", + // "secldouble": "0x00000000000000000000000000000040" + // },{ + // "key": 1, + // "sec64": 5, + // "sec128": "0x05000000000000000000000000000000", + // "secdouble": "5.00000000000000000", + // "secldouble": "0x00000000000000000000000000400140" + // },{ + // "key": 2, + // "sec64": 7, + // "sec128": "0x07000000000000000000000000000000", + // "secdouble": "7.00000000000000000", + // "secldouble": "0x00000000000000000000000000c00140" + // } + // "more": false, + // "next_key": "" + // } + // For hashobjs table (secondary index is on sec256 and sec160): + // { + // "rows": [{ + // "key": 0, + // "hash_input": "firstinput", + // "sec256": "05f5aa6b6c5568c53e886591daa9d9f636fa8e77873581ba67ca46a0f96c226e", + // "sec160": "2a9baa59f1e376eda2e963c140d13c7e77c2f1fb" + // },{ + // "key": 1, + // "hash_input": "secondinput", + // "sec256": "3cb93a80b47b9d70c5296be3817d34b48568893b31468e3a76337bb7d3d0c264", + // "sec160": "fb9d03d3012dc2a6c7b319f914542e3423550c2a" + // },{ + // "key": 2, + // "hash_input": "thirdinput", + // "sec256": "2652d68fbbf6000c703b35fdc607b09cd8218cbeea1d108b5c9e84842cdd5ea5", + // "sec160": "ab4314638b573fdc39e5a7b107938ad1b5a16414" + // } + // ], + // "more": false, + // "next_key": "" + // } + + + chain_apis::read_only plugin(*(this->control), fc::microseconds::maximum()); + chain_apis::read_only::get_table_rows_params params{ + .json=true, + .code=N(test), + .scope="test", + .limit=1 + }; + + params.table = N(numobjs); + + // i64 primary key type + params.key_type = "i64"; + params.index_position = "1"; + params.lower_bound = "0"; + + auto res_1 = plugin.get_table_rows(params); + BOOST_REQUIRE(res_1.rows.size() > 0); + BOOST_TEST(res_1.rows[0].get_object()["key"].as() == 0); + BOOST_TEST(res_1.next_key == "1"); + params.lower_bound = res_1.next_key; + auto more2_res_1 = plugin.get_table_rows(params); + BOOST_REQUIRE(more2_res_1.rows.size() > 0); + BOOST_TEST(more2_res_1.rows[0].get_object()["key"].as() == 1); + + + // i64 secondary key type + params.key_type = "i64"; + params.index_position = "2"; + params.lower_bound = "5"; + + auto res_2 = plugin.get_table_rows(params); + BOOST_REQUIRE(res_2.rows.size() > 0); + BOOST_TEST(res_2.rows[0].get_object()["sec64"].as() == 5); + BOOST_TEST(res_2.next_key == "7"); + params.lower_bound = res_2.next_key; + auto more2_res_2 = plugin.get_table_rows(params); + BOOST_REQUIRE(more2_res_2.rows.size() > 0); + BOOST_TEST(more2_res_2.rows[0].get_object()["sec64"].as() == 7); + + // i128 secondary key type + params.key_type = "i128"; + params.index_position = "3"; + params.lower_bound = "5"; + + auto res_3 = plugin.get_table_rows(params); + chain::uint128_t sec128_expected_value = 5; + BOOST_REQUIRE(res_3.rows.size() > 0); + BOOST_CHECK(res_3.rows[0].get_object()["sec128"].as() == sec128_expected_value); + BOOST_TEST(res_3.next_key == "7"); + params.lower_bound = res_3.next_key; + auto more2_res_3 = plugin.get_table_rows(params); + chain::uint128_t more2_sec128_expected_value = 7; + BOOST_REQUIRE(more2_res_3.rows.size() > 0); + BOOST_CHECK(more2_res_3.rows[0].get_object()["sec128"].as() == more2_sec128_expected_value); + + // float64 secondary key type + params.key_type = "float64"; + params.index_position = "4"; + params.lower_bound = "5.0"; + + auto res_4 = plugin.get_table_rows(params); + float64_t secdouble_expected_value = ui64_to_f64(5); + BOOST_REQUIRE(res_4.rows.size() > 0); + double secdouble_res_value = res_4.rows[0].get_object()["secdouble"].as(); + BOOST_CHECK(*reinterpret_cast(&secdouble_res_value) == secdouble_expected_value); + BOOST_TEST(res_4.next_key == "7.00000000000000000"); + params.lower_bound = res_4.next_key; + auto more2_res_4 = plugin.get_table_rows(params); + float64_t more2_secdouble_expected_value = ui64_to_f64(7); + BOOST_REQUIRE(more2_res_4.rows.size() > 0); + double more2_secdouble_res_value = more2_res_4.rows[0].get_object()["secdouble"].as(); + BOOST_CHECK(*reinterpret_cast(&more2_secdouble_res_value) == more2_secdouble_expected_value); + + // float128 secondary key type + params.key_type = "float128"; + params.index_position = "5"; + params.lower_bound = "5.0"; + + auto res_5 = plugin.get_table_rows(params); + float128_t secldouble_expected_value = ui64_to_f128(5); + BOOST_REQUIRE(res_5.rows.size() > 0); + float128_t secldouble_res_value = res_5.rows[0].get_object()["secldouble"].as(); + BOOST_TEST(secldouble_res_value == secldouble_expected_value); + BOOST_TEST(res_5.next_key == "7.00000000000000000"); + params.lower_bound = res_5.next_key; + auto more2_res_5 = plugin.get_table_rows(params); + float128_t more2_secldouble_expected_value = ui64_to_f128(7); + BOOST_REQUIRE(more2_res_5.rows.size() > 0); + float128_t more2_secldouble_res_value = more2_res_5.rows[0].get_object()["secldouble"].as(); + BOOST_TEST(more2_secldouble_res_value == more2_secldouble_expected_value); + + params.table = N(hashobjs); + + // sha256 secondary key type + params.key_type = "sha256"; + params.index_position = "2"; + params.lower_bound = "2652d68fbbf6000c703b35fdc607b09cd8218cbeea1d108b5c9e84842cdd5ea5"; // This is hash of "thirdinput" + + auto res_6 = plugin.get_table_rows(params); + checksum256_type sec256_expected_value = checksum256_type::hash(std::string("thirdinput")); + BOOST_REQUIRE(res_6.rows.size() > 0); + checksum256_type sec256_res_value = res_6.rows[0].get_object()["sec256"].as(); + BOOST_TEST(sec256_res_value == sec256_expected_value); + BOOST_TEST(res_6.rows[0].get_object()["hash_input"].as() == std::string("thirdinput")); + BOOST_TEST(res_6.next_key == "3cb93a80b47b9d70c5296be3817d34b48568893b31468e3a76337bb7d3d0c264"); + params.lower_bound = res_6.next_key; + auto more2_res_6 = plugin.get_table_rows(params); + checksum256_type more2_sec256_expected_value = checksum256_type::hash(std::string("secondinput")); + BOOST_REQUIRE(more2_res_6.rows.size() > 0); + checksum256_type more2_sec256_res_value = more2_res_6.rows[0].get_object()["sec256"].as(); + BOOST_TEST(more2_sec256_res_value == more2_sec256_expected_value); + BOOST_TEST(more2_res_6.rows[0].get_object()["hash_input"].as() == std::string("secondinput")); + + // i256 secondary key type + params.key_type = "i256"; + params.index_position = "2"; + params.lower_bound = "0x2652d68fbbf6000c703b35fdc607b09cd8218cbeea1d108b5c9e84842cdd5ea5"; // This is sha256 hash of "thirdinput" as number + + auto res_7 = plugin.get_table_rows(params); + checksum256_type i256_expected_value = checksum256_type::hash(std::string("thirdinput")); + BOOST_REQUIRE(res_7.rows.size() > 0); + checksum256_type i256_res_value = res_7.rows[0].get_object()["sec256"].as(); + BOOST_TEST(i256_res_value == i256_expected_value); + BOOST_TEST(res_7.rows[0].get_object()["hash_input"].as() == "thirdinput"); + BOOST_TEST(res_7.next_key == "0x3cb93a80b47b9d70c5296be3817d34b48568893b31468e3a76337bb7d3d0c264"); + params.lower_bound = res_7.next_key; + auto more2_res_7 = plugin.get_table_rows(params); + checksum256_type more2_i256_expected_value = checksum256_type::hash(std::string("secondinput")); + BOOST_REQUIRE(more2_res_7.rows.size() > 0); + checksum256_type more2_i256_res_value = more2_res_7.rows[0].get_object()["sec256"].as(); + BOOST_TEST(more2_i256_res_value == more2_i256_expected_value); + BOOST_TEST(more2_res_7.rows[0].get_object()["hash_input"].as() == "secondinput"); + + // ripemd160 secondary key type + params.key_type = "ripemd160"; + params.index_position = "3"; + params.lower_bound = "ab4314638b573fdc39e5a7b107938ad1b5a16414"; // This is ripemd160 hash of "thirdinput" + + auto res_8 = plugin.get_table_rows(params); + ripemd160 sec160_expected_value = ripemd160::hash(std::string("thirdinput")); + BOOST_REQUIRE(res_8.rows.size() > 0); + ripemd160 sec160_res_value = res_8.rows[0].get_object()["sec160"].as(); + BOOST_TEST(sec160_res_value == sec160_expected_value); + BOOST_TEST(res_8.rows[0].get_object()["hash_input"].as() == "thirdinput"); + BOOST_TEST(res_8.next_key == "fb9d03d3012dc2a6c7b319f914542e3423550c2a"); + params.lower_bound = res_8.next_key; + auto more2_res_8 = plugin.get_table_rows(params); + ripemd160 more2_sec160_expected_value = ripemd160::hash(std::string("secondinput")); + BOOST_REQUIRE(more2_res_8.rows.size() > 0); + ripemd160 more2_sec160_res_value = more2_res_8.rows[0].get_object()["sec160"].as(); + BOOST_TEST(more2_sec160_res_value == more2_sec160_expected_value); + BOOST_TEST(more2_res_8.rows[0].get_object()["hash_input"].as() == "secondinput"); + +} FC_LOG_AND_RETHROW() /// get_table_next_key_test + BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/launcher_test.py b/tests/launcher_test.py index bac5ed447a2..19ef3fddc92 100755 --- a/tests/launcher_test.py +++ b/tests/launcher_test.py @@ -10,9 +10,10 @@ import re ############################################################### -# nodeos_run_test -# --dump-error-details -# --keep-logs +# launcher-test +# +# Specifically tests using the bios bootstrap script that is created by eosio-launcher +# ############################################################### Print=Utils.Print diff --git a/tests/main.cpp b/tests/main.cpp index 0644ce80545..eb7cbf7cae7 100644 --- a/tests/main.cpp +++ b/tests/main.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include diff --git a/tests/nodeos_chainbase_allocation_test.py b/tests/nodeos_chainbase_allocation_test.py new file mode 100755 index 00000000000..697ada3551e --- /dev/null +++ b/tests/nodeos_chainbase_allocation_test.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python3 + +from testUtils import Utils, Account +from Cluster import Cluster +from TestHelper import TestHelper +from WalletMgr import WalletMgr +from Node import Node + +import signal +import json +import time +import os +import filecmp + +############################################################### +# nodeos_chainbase_allocation_test +# +# Test snapshot creation and restarting from snapshot +# +############################################################### + +# Parse command line arguments +args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running","--keep-logs"}) +Utils.Debug = args.v +killAll=args.clean_run +dumpErrorDetails=args.dump_error_details +dontKill=args.leave_running +killEosInstances=not dontKill +killWallet=not dontKill +keepLogs=args.keep_logs + +walletMgr=WalletMgr(True) +cluster=Cluster(walletd=True) +cluster.setWalletMgr(walletMgr) + +testSuccessful = False +try: + TestHelper.printSystemInfo("BEGIN") + cluster.killall(allInstances=killAll) + cluster.cleanup() + + # The following is the list of chainbase objects that need to be verified: + # - account_object (bootstrap) + # - code_object (bootstrap) + # - generated_transaction_object + # - global_property_object + # - key_value_object (bootstrap) + # - protocol_state_object (bootstrap) + # - permission_object (bootstrap) + # The bootstrap process has created account_object and code_object (by uploading the bios contract), + # key_value_object (token creation), protocol_state_object (preactivation feature), and permission_object + # (automatically taken care by the automatically generated eosio account) + assert cluster.launch( + pnodes=1, + prodCount=1, + totalProducers=1, + totalNodes=2, + useBiosBootFile=False, + loadSystemContract=False, + specificExtraNodeosArgs={ + 1:"--read-mode irreversible --plugin eosio::producer_api_plugin"}) + + producerNodeId = 0 + irrNodeId = 1 + producerNode = cluster.getNode(producerNodeId) + irrNode = cluster.getNode(irrNodeId) + + # Create delayed transaction to create "generated_transaction_object" + cmd = "create account -j eosio sample EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV\ + EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV --delay-sec 600 -p eosio" + trans = producerNode.processCleosCmd(cmd, cmd, silentErrors=False) + assert trans + + # Schedule a new producer to trigger new producer schedule for "global_property_object" + newProducerAcc = Account("newprod") + newProducerAcc.ownerPublicKey = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + newProducerAcc.activePublicKey = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + producerNode.createAccount(newProducerAcc, cluster.eosioAccount) + + setProdsStr = '{"schedule": [' + setProdsStr += '{"producer_name":' + newProducerAcc.name + ',"block_signing_key":' + newProducerAcc.activePublicKey + '}' + setProdsStr += ']}' + cmd="push action -j eosio setprods '{}' -p eosio".format(setProdsStr) + trans = producerNode.processCleosCmd(cmd, cmd, silentErrors=False) + assert trans + setProdsBlockNum = int(trans["processed"]["block_num"]) + + # Wait until the block where set prods is executed become irreversible so the producer schedule + def isSetProdsBlockNumIrr(): + return producerNode.getIrreversibleBlockNum() >= setProdsBlockNum + Utils.waitForBool(isSetProdsBlockNumIrr, timeout=30, sleepTime=0.1) + # Once it is irreversible, immediately pause the producer so the promoted producer schedule is not cleared + producerNode.processCurlCmd("producer", "pause", "") + + producerNode.kill(signal.SIGTERM) + + # Create the snapshot and rename it to avoid name conflict later on + res = irrNode.createSnapshot() + beforeShutdownSnapshotPath = res["snapshot_name"] + snapshotPathWithoutExt, snapshotExt = os.path.splitext(beforeShutdownSnapshotPath) + os.rename(beforeShutdownSnapshotPath, snapshotPathWithoutExt + "_before_shutdown" + snapshotExt) + + # Restart irr node and ensure the snapshot is still identical + irrNode.kill(signal.SIGTERM) + isRelaunchSuccess = irrNode.relaunch(irrNodeId, "", timeout=5, cachePopen=True) + assert isRelaunchSuccess, "Fail to relaunch" + res = irrNode.createSnapshot() + afterShutdownSnapshotPath = res["snapshot_name"] + assert filecmp.cmp(beforeShutdownSnapshotPath, afterShutdownSnapshotPath), "snapshot is not identical" + + testSuccessful = True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + +exitCode = 0 if testSuccessful else 1 +exit(exitCode) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index d8b9553b7d8..745f18c2f51 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -16,9 +16,26 @@ ############################################################### # nodeos_forked_chain_test -# --dump-error-details -# --keep-logs +# +# This test sets up 2 producing nodes and one "bridge" node using test_control_api_plugin. +# One producing node has 11 of the elected producers and the other has 10 of the elected producers. +# All the producers are named in alphabetical order, so that the 11 producers, in the one production node, are +# scheduled first, followed by the 10 producers in the other producer node. Each producing node is only connected +# to the other producing node via the "bridge" node. +# The bridge node has the test_control_api_plugin, which exposes a restful interface that the test script uses to kill +# the "bridge" node at a specific producer in the production cycle. This is used to fork the producer network +# precisely when the 11 producer node has finished producing and the other producing node is about to produce. +# The fork in the producer network results in one fork of the block chain that advances with 10 producers with a LIB +# that has advanced, since all of the previous blocks were confirmed and the producer that was scheduled for that +# slot produced it, and one with 11 producers with a LIB that has not advanced. This situation is validated by +# the test script. +# After both chains are allowed to produce, the "bridge" node is turned back on. +# Time is allowed to progress so that the "bridge" node can catchup and both producer nodes to come to consensus +# The block log is then checked for both producer nodes to verify that the 10 producer fork is selected and that +# both nodes are in agreement on the block log. +# ############################################################### + Print=Utils.Print from core_symbol import CORE_SYMBOL @@ -249,7 +266,7 @@ def getMinHeadAndLib(prodNodes): blockProducer=node.getBlockProducerByNum(blockNum) - # *** Identify what the production cycel is *** + # *** Identify what the production cycle is *** productionCycle=[] producerToSlot={} diff --git a/tests/nodeos_irreversible_mode_test.py b/tests/nodeos_irreversible_mode_test.py index dd7c836e98f..f511132ab1d 100755 --- a/tests/nodeos_irreversible_mode_test.py +++ b/tests/nodeos_irreversible_mode_test.py @@ -8,7 +8,6 @@ from TestHelper import TestHelper from testUtils import Account -import urllib.request import re import os import time @@ -16,12 +15,11 @@ import subprocess import shutil - ############################################################### # nodeos_irreversible_mode_test -# --dump-error-details -# --keep-logs -# -v --leave-running --clean-run +# +# Many smaller tests centered around irreversible mode +# ############################################################### Print = Utils.Print @@ -46,10 +44,6 @@ cluster=Cluster(walletd=True) cluster.setWalletMgr(walletMgr) -def makeSnapshot(nodeId): - req = urllib.request.Request("http://127.0.0.1:{}/v1/producer/create_snapshot".format(8888 + int(nodeId))) - urllib.request.urlopen(req) - def backupBlksDir(nodeId): dataDir = Utils.getNodeDataDir(nodeId) sourceDir = os.path.join(dataDir, "blocks") @@ -144,8 +138,8 @@ def confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest, headLibAndForkDbHeadBefore assert forkDbHead == forkDbHeadBeforeSwitchMode, \ "Fork db head ({}) should be equal to fork db head before switch mode ({}) ".format(forkDbHead, forkDbHeadBeforeSwitchMode) -def relaunchNode(node: Node, nodeId, chainArg="", addOrSwapFlags=None, relaunchAssertMessage="Fail to relaunch"): - isRelaunchSuccess = node.relaunch(nodeId, chainArg=chainArg, addOrSwapFlags=addOrSwapFlags, timeout=relaunchTimeout, cachePopen=True) +def relaunchNode(node: Node, nodeId, chainArg="", addSwapFlags=None, relaunchAssertMessage="Fail to relaunch"): + isRelaunchSuccess = node.relaunch(nodeId, chainArg=chainArg, addSwapFlags=addSwapFlags, timeout=relaunchTimeout, cachePopen=True) time.sleep(1) # Give a second to replay or resync if needed assert isRelaunchSuccess, relaunchAssertMessage return isRelaunchSuccess @@ -258,7 +252,7 @@ def switchIrrToSpecMode(nodeIdOfNodeToTest, nodeToTest): # Kill and relaunch in speculative mode nodeToTest.kill(signal.SIGTERM) - relaunchNode(nodeToTest, nodeIdOfNodeToTest, addOrSwapFlags={"--read-mode": "speculative"}) + relaunchNode(nodeToTest, nodeIdOfNodeToTest, addSwapFlags={"--read-mode": "speculative"}) # Ensure the node condition is as expected after relaunch confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) @@ -293,7 +287,7 @@ def switchIrrToSpecModeWithConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): # Kill and relaunch in irreversible mode nodeToTest.kill(signal.SIGTERM) waitForBlksProducedAndLibAdvanced() # Wait for some blks to be produced and lib advance) - relaunchNode(nodeToTest, nodeIdOfNodeToTest, addOrSwapFlags={"--read-mode": "speculative"}) + relaunchNode(nodeToTest, nodeIdOfNodeToTest, addSwapFlags={"--read-mode": "speculative"}) # Ensure the node condition is as expected after relaunch ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) @@ -353,13 +347,13 @@ def switchToSpecModeWithIrrModeSnapshot(nodeIdOfNodeToTest, nodeToTest): # Relaunch in irreversible mode and create the snapshot relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible") confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) - makeSnapshot(nodeIdOfNodeToTest) + nodeToTest.createSnapshot() nodeToTest.kill(signal.SIGTERM) # Start from clean data dir, recover back up blocks, and then relaunch with irreversible snapshot removeState(nodeIdOfNodeToTest) recoverBackedupBlksDir(nodeIdOfNodeToTest) # this function will delete the existing blocks dir first - relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --snapshot {}".format(getLatestSnapshot(nodeIdOfNodeToTest)), addOrSwapFlags={"--read-mode": "speculative"}) + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --snapshot {}".format(getLatestSnapshot(nodeIdOfNodeToTest)), addSwapFlags={"--read-mode": "speculative"}) confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest) # Ensure it automatically replays "reversible blocks", i.e. head lib and fork db should be the same headLibAndForkDbHeadAfterRelaunch = getHeadLibAndForkDbHead(nodeToTest) diff --git a/tests/nodeos_multiple_version_protocol_feature_test.py b/tests/nodeos_multiple_version_protocol_feature_test.py index be3324f969e..fdc0c3785bc 100755 --- a/tests/nodeos_multiple_version_protocol_feature_test.py +++ b/tests/nodeos_multiple_version_protocol_feature_test.py @@ -13,6 +13,13 @@ from os.path import join, exists from datetime import datetime +############################################################### +# nodeos_multiple_version_protocol_feature_test +# +# Test for verifying that older versions of nodeos can work with newer versions of nodeos. +# +############################################################### + # Parse command line arguments args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running", "--keep-logs", "--alternate-version-labels-file"}) @@ -29,10 +36,10 @@ cluster=Cluster(walletd=True) cluster.setWalletMgr(walletMgr) -def restartNode(node: Node, nodeId, chainArg=None, addOrSwapFlags=None, nodeosPath=None): +def restartNode(node: Node, nodeId, chainArg=None, addSwapFlags=None, nodeosPath=None): if not node.killed: node.kill(signal.SIGTERM) - isRelaunchSuccess = node.relaunch(nodeId, chainArg, addOrSwapFlags=addOrSwapFlags, + isRelaunchSuccess = node.relaunch(nodeId, chainArg, addSwapFlags=addSwapFlags, timeout=5, cachePopen=True, nodeosPath=nodeosPath) assert isRelaunchSuccess, "Fail to relaunch" diff --git a/tests/nodeos_protocol_feature_test.py b/tests/nodeos_protocol_feature_test.py index e42f934b2f4..369068494ef 100755 --- a/tests/nodeos_protocol_feature_test.py +++ b/tests/nodeos_protocol_feature_test.py @@ -11,6 +11,13 @@ from os.path import join from datetime import datetime +############################################################### +# nodeos_protocol_feature_test +# +# Many smaller tests centered around irreversible mode +# +############################################################### + # Parse command line arguments args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running","--keep-logs"}) Utils.Debug = args.v @@ -23,17 +30,16 @@ # The following test case will test the Protocol Feature JSON reader of the blockchain -def restartNode(node: Node, nodeId, chainArg=None, addOrSwapFlags=None): +def restartNode(node: Node, nodeId, chainArg=None, addSwapFlags=None): if not node.killed: node.kill(signal.SIGTERM) - isRelaunchSuccess = node.relaunch(nodeId, chainArg, addOrSwapFlags=addOrSwapFlags, timeout=5, cachePopen=True) + isRelaunchSuccess = node.relaunch(nodeId, chainArg, addSwapFlags=addSwapFlags, timeout=5, cachePopen=True) assert isRelaunchSuccess, "Fail to relaunch" walletMgr=WalletMgr(True) cluster=Cluster(walletd=True) cluster.setWalletMgr(walletMgr) -# List to contain the test result message testSuccessful = False try: TestHelper.printSystemInfo("BEGIN") diff --git a/tests/nodeos_run_remote_test.py b/tests/nodeos_run_remote_test.py index 21e15bb9c72..8b02cabb3e5 100755 --- a/tests/nodeos_run_remote_test.py +++ b/tests/nodeos_run_remote_test.py @@ -8,8 +8,10 @@ ############################################################### # nodeos_run_remote_test +# # Tests remote capability of the nodeos_run_test. Test will setup cluster and pass nodes info to nodeos_run_test. E.g. # nodeos_run_remote_test.py -v --clean-run --dump-error-detail +# ############################################################### Print=Utils.Print diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index fbe7f8f6d05..90c41dc3485 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -12,8 +12,9 @@ ############################################################### # nodeos_run_test -# --dump-error-details -# --keep-logs +# +# General test that tests a wide range of general use actions around nodeos and keosd +# ############################################################### Print=Utils.Print @@ -244,7 +245,7 @@ cmdError("FAILURE - transfer failed") errorExit("Transfer verification failed. Excepted %s, actual: %s" % (expectedAmount, actualAmount)) - Print("Validating accounts after some user trasactions") + Print("Validating accounts after some user transactions") accounts=[testeraAccount, currencyAccount, exchangeAccount] cluster.validateAccounts(accounts) diff --git a/tests/nodeos_short_fork_take_over_test.py b/tests/nodeos_short_fork_take_over_test.py index b21172b8766..22b1c1ec7c4 100755 --- a/tests/nodeos_short_fork_take_over_test.py +++ b/tests/nodeos_short_fork_take_over_test.py @@ -16,8 +16,11 @@ ############################################################### # nodeos_short_fork_take_over_test -# --dump-error-details -# --keep-logs +# +# Similar scenario to nodeos_forked_chain_test, except that there are only 3 producers and, after the "bridge" node is +# shutdown, the second producer node is also shutdown. Then the "bridge" node is re-started followed by the producer +# node being started. +# ############################################################### Print=Utils.Print diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index 5934750d3a9..47cfc2e7b31 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -17,6 +17,7 @@ ############################################################### # nodeos_startup_catchup +# # Test configures a producing node and <--txn-plugins count> non-producing nodes with the # txn_test_gen_plugin. Each non-producing node starts generating transactions and sending them # to the producing node. @@ -26,6 +27,7 @@ # 4) restart the node # 5) the node is allowed to catch up to the producing node # 3) Repeat steps 2-5, <--catchup-count - 1> more times +# ############################################################### Print=Utils.Print @@ -141,12 +143,13 @@ def waitForNodeStarted(node): Print("Cycle through catchup scenarios") twoRounds=21*2*12 + twoRoundsTimeout=(twoRounds/2 + 10) #2 rounds in seconds + some leeway for catchup_num in range(0, catchupCount): Print("Start catchup node") cluster.launchUnstarted(cachePopen=True) lastLibNum=lib(node0) # verify producer lib is still advancing - waitForBlock(node0, lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + waitForBlock(node0, lastLibNum+1, timeout=twoRoundsTimeout, blockType=BlockType.lib) catchupNode=cluster.getNodes()[-1] catchupNodeNum=cluster.getNodes().index(catchupNode) @@ -155,11 +158,11 @@ def waitForNodeStarted(node): Print("Verify catchup node %s's LIB is advancing" % (catchupNodeNum)) # verify lib is advancing (before we wait for it to have to catchup with producer) - waitForBlock(catchupNode, lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + waitForBlock(catchupNode, lastCatchupLibNum+1, timeout=twoRoundsTimeout, blockType=BlockType.lib) Print("Verify catchup node is advancing to producer") numBlocksToCatchup=(lastLibNum-lastCatchupLibNum-1)+twoRounds - waitForBlock(catchupNode, lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) + waitForBlock(catchupNode, lastLibNum, timeout=twoRoundsTimeout, blockType=BlockType.lib) Print("Shutdown catchup node and validate exit code") catchupNode.interruptAndVerifyExitStatus(60) @@ -171,16 +174,16 @@ def waitForNodeStarted(node): Print("Verify catchup node is advancing") # verify catchup node is advancing to producer - waitForBlock(catchupNode, lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + waitForBlock(catchupNode, lastCatchupLibNum+1, timeout=twoRoundsTimeout, blockType=BlockType.lib) Print("Verify producer is still advancing LIB") lastLibNum=lib(node0) # verify producer lib is still advancing - node0.waitForBlock(lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + node0.waitForBlock(lastLibNum+1, timeout=twoRoundsTimeout, blockType=BlockType.lib) Print("Verify catchup node is advancing to producer") # verify catchup node is advancing to producer - waitForBlock(catchupNode, lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) + waitForBlock(catchupNode, lastLibNum, timeout=(numBlocksToCatchup/2 + 60), blockType=BlockType.lib) catchupNode.interruptAndVerifyExitStatus(60) catchupNode.popenProc=None diff --git a/tests/nodeos_under_min_avail_ram.py b/tests/nodeos_under_min_avail_ram.py index 70c7e428c9b..6c9d6c7fc00 100755 --- a/tests/nodeos_under_min_avail_ram.py +++ b/tests/nodeos_under_min_avail_ram.py @@ -13,6 +13,16 @@ import math import re +############################################################### +# nodeos_under_min_avail_ram +# +# Sets up 4 producing nodes using --chain-state-db-guard-size-mb and --chain-state-db-size-mb to verify that nodeos will +# shutdown safely when --chain-state-db-guard-size-mb is reached and restarts the shutdown nodes, with a higher +# --chain-state-db-size-mb size, to verify that the node can restart and continue till the guard is reached again. The +# test both verifies all nodes going down and 1 node at a time. +# +############################################################### + Print=Utils.Print errorExit=Utils.errorExit @@ -49,12 +59,6 @@ def setName(self, num): return retStr -############################################################### -# nodeos_voting_test -# --dump-error-details -# --keep-logs -############################################################### - args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--wallet-port"}) Utils.Debug=args.v totalNodes=4 @@ -111,6 +115,7 @@ def setName(self, num): nodes.append(cluster.getNode(1)) nodes.append(cluster.getNode(2)) nodes.append(cluster.getNode(3)) + numNodes=len(nodes) for account in accounts: @@ -163,7 +168,7 @@ def setName(self, num): data="{\"from\":\"%s\",\"to\":\"%s\",\"num\":%d}" % (fromAccount.name, toAccount.name, numAmount) opts="--permission %s@active --permission %s@active --expiration 90" % (contract, fromAccount.name) try: - trans=nodes[0].pushMessage(contract, action, data, opts) + trans=nodes[count % numNodes].pushMessage(contract, action, data, opts) if trans is None or not trans[0]: timeOutCount+=1 if timeOutCount>=3: @@ -206,23 +211,30 @@ def setName(self, num): errorExit("Failure - All Nodes should have died") time.sleep(5) + for i in range(numNodes): + f = open(Utils.getNodeDataDir(i) + "/stderr.txt") + contents = f.read() + if contents.find("database chain::guard_exception") == -1: + errorExit("Node%d is expected to exit because of database guard_exception, but was not." % (i)) + + Print("all nodes exited with expected reason database_guard_exception") + Print("relaunch nodes with new capacity") - addOrSwapFlags={} - numNodes=len(nodes) + addSwapFlags={} maxRAMValue+=2 currentMinimumMaxRAM=maxRAMValue enabledStaleProduction=False for i in range(numNodes): - addOrSwapFlags[maxRAMFlag]=str(maxRAMValue) - #addOrSwapFlags["--max-irreversible-block-age"]=str(-1) + addSwapFlags[maxRAMFlag]=str(maxRAMValue) + #addSwapFlags["--max-irreversible-block-age"]=str(-1) nodeIndex=numNodes-i-1 if not enabledStaleProduction: - addOrSwapFlags["--enable-stale-production"]="" # just enable stale production for the first node + addSwapFlags["--enable-stale-production"]="" # just enable stale production for the first node enabledStaleProduction=True - if not nodes[nodeIndex].relaunch(nodeIndex, "", newChain=False, addOrSwapFlags=addOrSwapFlags): + if not nodes[nodeIndex].relaunch(nodeIndex, "", newChain=False, addSwapFlags=addSwapFlags): Utils.cmdError("Failed to restart node0 with new capacity %s" % (maxRAMValue)) errorExit("Failure - Node should have restarted") - addOrSwapFlags={} + addSwapFlags={} maxRAMValue=currentMinimumMaxRAM+30 time.sleep(20) @@ -253,7 +265,7 @@ def setName(self, num): data="{\"from\":\"%s\",\"to\":\"%s\",\"num\":%d}" % (fromAccount.name, toAccount.name, numAmount) opts="--permission %s@active --permission %s@active --expiration 90" % (contract, fromAccount.name) try: - trans=nodes[0].pushMessage(contract, action, data, opts) + trans=nodes[count % numNodes].pushMessage(contract, action, data, opts) if trans is None or not trans[0]: Print("Failed to push create action to eosio contract. sleep for 60 seconds") time.sleep(60) @@ -276,16 +288,16 @@ def setName(self, num): errorExit("Failure - Node should be alive") Print("relaunch node with even more capacity") - addOrSwapFlags={} + addSwapFlags={} time.sleep(10) maxRAMValue=currentMinimumMaxRAM+5 currentMinimumMaxRAM=maxRAMValue - addOrSwapFlags[maxRAMFlag]=str(maxRAMValue) - if not nodes[len(nodes)-1].relaunch(nodeIndex, "", newChain=False, addOrSwapFlags=addOrSwapFlags): + addSwapFlags[maxRAMFlag]=str(maxRAMValue) + if not nodes[len(nodes)-1].relaunch(nodeIndex, "", newChain=False, addSwapFlags=addSwapFlags): Utils.cmdError("Failed to restart node %d with new capacity %s" % (numNodes-1, maxRAMValue)) errorExit("Failure - Node should have restarted") - addOrSwapFlags={} + addSwapFlags={} time.sleep(10) for node in nodes: @@ -312,7 +324,7 @@ def setName(self, num): data="{\"from\":\"%s\",\"to\":\"%s\",\"num\":%d}" % (fromAccount.name, toAccount.name, numAmount) opts="--permission %s@active --permission %s@active --expiration 90" % (contract, fromAccount.name) try: - trans=nodes[0].pushMessage(contract, action, data, opts) + trans=node.pushMessage(contract, action, data, opts) if trans is None or not trans[0]: Print("Failed to push create action to eosio contract. sleep for 60 seconds") time.sleep(60) diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index fad398a860a..a3c157e8027 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -13,9 +13,13 @@ ############################################################### # nodeos_voting_test -# --dump-error-details -# --keep-logs +# +# This test sets up multiple producing nodes, each with multiple producers per node. Different combinations of producers +# are voted into the production schedule and the block production is analyzed to determine if the correct producers are +# producing blocks and in the right number and order. +# ############################################################### + class ProducerToNode: map={} diff --git a/tests/prod_preactivation_test.py b/tests/prod_preactivation_test.py index 200477c7356..dbc96c2c457 100755 --- a/tests/prod_preactivation_test.py +++ b/tests/prod_preactivation_test.py @@ -108,7 +108,7 @@ node0 = cluster.getNode(0) contract="eosio.bios" - contractDir="unittests/contracts/%s" % (contract) + contractDir="unittests/contracts/old_versions/v1.7.0-develop-preactivate_feature/%s" % (contract) wasmFile="%s.wasm" % (contract) abiFile="%s.abi" % (contract) diff --git a/tests/release-build.sh b/tests/release-build.sh index abcfaa0ecae..4238df85e3b 100755 --- a/tests/release-build.sh +++ b/tests/release-build.sh @@ -10,16 +10,15 @@ echo 'asserts. This test checks that debug flag. Anyone intending to build and i echo 'nodeos from source should perform a "release build" which excludes asserts and' echo 'debugging symbols, and performs compiler optimizations.' echo '' -# check for xxd -if ! $(xxd --version 2>/dev/null); then - echo 'ERROR: Test requires xxd, but xxd was not found in your PATH!' +# check for jq +if ! $(jq --version 1>/dev/null); then + echo 'ERROR: Test requires jq, but jq was not found in your PATH!' echo '' - echo 'The xxd hex dump tool can be installed as part of the vim-common package on most operating systems.' exit 1 fi # find nodeos -[[ $(git --version) ]] && cd "$(git rev-parse --show-toplevel)/build" || cd "$(dirname "${BASH_SOURCE[0]}")/.." -if [[ ! -f programs/nodeos/nodeos ]]; then +[[ $(git --version) ]] && cd "$(git rev-parse --show-toplevel)/build/programs/nodeos" || cd "$(dirname "${BASH_SOURCE[0]}")/../programs/nodeos" +if [[ ! -f nodeos ]]; then echo 'ERROR: nodeos binary not found!' echo '' echo 'I looked here...' @@ -31,36 +30,29 @@ if [[ ! -f programs/nodeos/nodeos ]]; then echo '$ echo "$(dirname "${BASH_SOURCE[0]}")/.."' echo "$(dirname "${BASH_SOURCE[0]}")/.." echo 'Release build test not run.' - exit 2 + exit 1 fi # run nodeos to generate state files -mkdir release-build-test -programs/nodeos/nodeos --config-dir "$(pwd)/release-build-test/config" --data-dir "$(pwd)/release-build-test/data" 1>/dev/null 2>/dev/null & -sleep 10 -kill $! # kill nodeos gracefully, by PID -if [[ ! -f release-build-test/data/state/shared_memory.bin ]]; then - echo 'ERROR: nodeos state not found!' +./nodeos --extract-build-info build-info.json 1>/dev/null 2>/dev/null +if [[ ! -f build-info.json ]]; then + echo 'ERROR: Build info JSON file not found!' echo '' - echo 'Looked for shared_memory.bin in the following places:' - echo "$ ls -la \"$(pwd)/release-build-test/data/state\"" - ls -la "$(pwd)/release-build-test/data/state" + echo 'Looked in the following places:' + echo "$ ls -la \"$(pwd)\"" + ls -la "$(pwd)" echo 'Release build test not run.' - rm -rf release-build-test - exit 3 + exit 2 fi # test state files for debug flag -export DEBUG_BYTE="$(xxd -seek 9 -l 1 release-build-test/data/state/shared_memory.bin | awk '{print $2}')" -if [[ "$DEBUG_BYTE" == '00' ]]; then +if [[ "$(cat build-info.json | jq .debug)" == 'false' ]]; then echo 'PASS: Debug flag is not set.' echo '' - rm -rf release-build-test + rm build-info.json exit 0 fi echo 'FAIL: Debug flag is set!' -echo "Debug Byte = 0x$DEBUG_BYTE" echo '' -echo 'First kilobyte of shared_memory.bin:' -echo '$ xxd -l 1024 shared_memory.bin' -xxd -l 1024 release-build-test/data/state/shared_memory.bin -rm -rf release-build-test -exit 4 \ No newline at end of file +echo '$ cat build-info.json | jq .' +cat build-info.json | jq . +rm build-info.json +exit 3 \ No newline at end of file diff --git a/tests/restart-scenarios-test.py b/tests/restart-scenarios-test.py index 8d0f8721c10..c8425824d2d 100755 --- a/tests/restart-scenarios-test.py +++ b/tests/restart-scenarios-test.py @@ -8,18 +8,12 @@ import random ############################################################### -# Test for different nodes restart scenarios. -# Nodes can be producing or non-producing. -# -p -# -c -# -s -# -d -# -v -# --kill-sig -# --kill-count -# --dont-kill -# --dump-error-details -# --keep-logs +# restart-scenarios-test +# +# Tests restart scenarios for nodeos. Uses "-c" flag to indicate "replay" (--replay-blockchain), "resync" +# (--delete-all-blocks), "hardReplay"(--hard-replay-blockchain), and "none" to indicate what kind of restart flag should +# be used. This is one of the only test that actually verify that nodeos terminates with a good exit status. +# ############################################################### diff --git a/tests/testUtils.py b/tests/testUtils.py index 310237960c3..69d3899eef9 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -10,10 +10,44 @@ import json import shlex import socket +from datetime import datetime from sys import stdout from sys import exit import traceback +########################################################################################### + +def addEnum(enumClassType, type): + setattr(enumClassType, type, enumClassType(type)) + +def unhandledEnumType(type): + raise RuntimeError("No case defined for type=%s" % (type.type)) + +class EnumType: + + def __init__(self, type): + self.type=type + + def __str__(self): + return self.type + + +class ReturnType(EnumType): + pass + +addEnum(ReturnType, "raw") +addEnum(ReturnType, "json") + +########################################################################################### + +class BlockLogAction(EnumType): + pass + +addEnum(BlockLogAction, "make_index") +addEnum(BlockLogAction, "trim") +addEnum(BlockLogAction, "smoke_test") +addEnum(BlockLogAction, "return_blocks") + ########################################################################################### class Utils: Debug=False @@ -43,6 +77,7 @@ class Utils: def Print(*args, **kwargs): stackDepth=len(inspect.stack())-2 s=' '*stackDepth + stdout.write(datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f ")) stdout.write(s) print(*args, **kwargs) @@ -218,6 +253,11 @@ def runCmdArrReturnJson(cmdArr, trace=False, silentErrors=True): @staticmethod def runCmdReturnStr(cmd, trace=False): cmdArr=shlex.split(cmd) + return Utils.runCmdArrReturnStr(cmdArr) + + + @staticmethod + def runCmdArrReturnStr(cmdArr, trace=False): retStr=Utils.checkOutput(cmdArr) if trace: Utils.Print ("RAW > %s" % (retStr)) return retStr @@ -271,14 +311,37 @@ def pgrepCmd(serverName): return "pgrep %s %s" % (pgrepOpts, serverName) @staticmethod - def getBlockLog(blockLogLocation, silentErrors=False, exitOnError=False): + def getBlockLog(blockLogLocation, blockLogAction=BlockLogAction.return_blocks, outputFile=None, first=None, last=None, throwException=False, silentErrors=False, exitOnError=False): assert(isinstance(blockLogLocation, str)) - cmd="%s --blocks-dir %s --as-json-array" % (Utils.EosBlockLogPath, blockLogLocation) + outputFileStr=" --output-file %s " % (outputFile) if outputFile is not None else "" + firstStr=" --first %s " % (first) if first is not None else "" + lastStr=" --last %s " % (last) if last is not None else "" + + blockLogActionStr=None + returnType=ReturnType.raw + if blockLogAction==BlockLogAction.return_blocks: + blockLogActionStr="" + returnType=ReturnType.json + elif blockLogAction==BlockLogAction.make_index: + blockLogActionStr=" --make-index " + elif blockLogAction==BlockLogAction.trim: + blockLogActionStr=" --trim " + elif blockLogAction==BlockLogAction.smoke_test: + blockLogActionStr=" --smoke-test " + else: + unhandledEnumType(blockLogAction) + + cmd="%s --blocks-dir %s --as-json-array %s%s%s%s" % (Utils.EosBlockLogPath, blockLogLocation, outputFileStr, firstStr, lastStr, blockLogActionStr) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) rtn=None try: - rtn=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors) + if returnType==ReturnType.json: + rtn=Utils.runCmdReturnJson(cmd, silentErrors=silentErrors) + else: + rtn=Utils.runCmdReturnStr(cmd) except subprocess.CalledProcessError as ex: + if throwException: + raise if not silentErrors: msg=ex.output.decode("utf-8") errorMsg="Exception during \"%s\". %s" % (cmd, msg) @@ -373,18 +436,3 @@ def __init__(self, name): def __str__(self): return "Name: %s" % (self.name) -########################################################################################### - -def addEnum(enumClassType, type): - setattr(enumClassType, type, enumClassType(type)) - -def unhandledEnumType(type): - raise RuntimeError("No case defined for type=%s" % (type.type)) - -class EnumType: - - def __init__(self, type): - self.type=type - - def __str__(self): - return self.type diff --git a/tests/validate-dirty-db.py b/tests/validate-dirty-db.py index afcf2767b73..624ed4e87f6 100755 --- a/tests/validate-dirty-db.py +++ b/tests/validate-dirty-db.py @@ -9,7 +9,10 @@ import signal ############################################################### +# validate-dirty-db +# # Test for validating the dirty db flag sticks repeated nodeos restart attempts +# ############################################################### diff --git a/tests/wallet_tests.cpp b/tests/wallet_tests.cpp index 2f5956cf126..64a9fe0db4a 100644 --- a/tests/wallet_tests.cpp +++ b/tests/wallet_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include diff --git a/tutorials/bios-boot-tutorial/README.md b/tutorials/bios-boot-tutorial/README.md index 9b320d6697b..6e6aba98957 100644 --- a/tutorials/bios-boot-tutorial/README.md +++ b/tutorials/bios-boot-tutorial/README.md @@ -19,34 +19,57 @@ The `bios-boot-tutorial.py` script simulates the EOSIO bios boot sequence. ``Steps``: 1. Install eosio binaries by following the steps outlined in below tutorial -[Install eosio binaries](https://github.com/EOSIO/eos#mac-os-x-brew-install) +[Install eosio binaries](https://github.com/EOSIO/eos/tree/release/2.0.x#mac-os-x-brew-install). -2. Install eosio.cdt binaries by following the steps outlined in below tutorial -[Install eosio.cdt binaries](https://github.com/EOSIO/eosio.cdt#binary-releases) +2. Install eosio.cdt version 1.6.3 binaries by following the steps outlined in below tutorial +[Install eosio.cdt binaries](https://github.com/EOSIO/eosio.cdt/tree/release/1.6.x#binary-releases). -3. Compile eosio.contracts +3. Compile `eosio.contracts` version 1.8.x. + +```bash +$ cd ~ +$ git clone https://github.com/EOSIO/eosio.contracts.git eosio.contracts-1.8.x +$ cd ./eosio.contracts-1.8.x/ +$ git checkout release/1.8.x +$ ./build.sh +$ cd ./build/contracts/ +$ pwd + +``` + +4. Make note of the directory where the contracts were compiled. +The last command in the previous step printed on the bash console the contracts' directory, make note of it, we'll reference it from now on as `EOSIO_OLD_CONTRACTS_DIRECTORY`. + +5. Install eosio.cdt version 1.7.0 binaries by following the steps outlined in below tutorial, make sure you uninstall the previous one first. +[Install eosio.cdt binaries](https://github.com/EOSIO/eosio.cdt/tree/release/1.7.x#binary-releases) + +6. Compile `eosio.contracts` sources version 1.9.0 ```bash $ cd ~ $ git clone https://github.com/EOSIO/eosio.contracts.git $ cd ./eosio.contracts/ +$ git checkout release/1.9.x $ ./build.sh $ cd ./build/contracts/ $ pwd ``` -4. Make note of the directory where the contracts were compiled +7. Make note of the directory where the contracts were compiled The last command in the previous step printed on the bash console the contracts' directory, make note of it, we'll reference it from now on as `EOSIO_CONTRACTS_DIRECTORY` -5. Launch the `bios-boot-tutorial.py` script -Minimal command line to launch the script below, make sure you replace `EOSIO_CONTRACTS_DIRECTORY` with actual directory + +8. Launch the `bios-boot-tutorial.py` script. +The command line to launch the script, make sure you replace `EOSIO_OLD_CONTRACTS_DIRECTORY` and `EOSIO_CONTRACTS_DIRECTORY` with actual directory paths. ```bash $ cd ~ $ git clone https://github.com/EOSIO/eos.git $ cd ./eos/tutorials/bios-boot-tutorial/ -$ python3 bios-boot-tutorial.py --cleos="cleos --wallet-url http://127.0.0.1:6666 " --nodeos=nodeos --keosd=keosd --contracts-dir="EOSIO_CONTRACTS_DIRECTORY" -w -a +$ python3 bios-boot-tutorial.py --cleos="cleos --wallet-url http://127.0.0.1:6666 " --nodeos=nodeos --keosd=keosd --contracts-dir="EOSIO_CONTRACTS_DIRECTORY" --old-contracts-dir="EOSIO_OLD_CONTRACTS_DIRECTORY" -w -a ``` +6. At this point, when the script has finished running without error, you have a functional EOSIO based blockchain running locally with an latest version of `eosio.system` contract, 31 block producers out of which 21 active, `eosio` account resigned, 200k+ accounts with staked tokens, and votes allocated to each block producer. Enjoy exploring your freshly booted blockchain. + See [EOSIO Documentation Wiki: Tutorial - Bios Boot](https://github.com/EOSIO/eos/wiki/Tutorial-Bios-Boot-Sequence) for additional information. \ No newline at end of file diff --git a/tutorials/bios-boot-tutorial/bios-boot-tutorial.py b/tutorials/bios-boot-tutorial/bios-boot-tutorial.py index caf314d3cfb..c0e51459075 100755 --- a/tutorials/bios-boot-tutorial/bios-boot-tutorial.py +++ b/tutorials/bios-boot-tutorial/bios-boot-tutorial.py @@ -304,11 +304,11 @@ def stepSetSystemContract(): retry('curl -X POST http://127.0.0.1:%d' % args.http_port + '/v1/producer/schedule_protocol_feature_activations ' + '-d \'{"protocol_features_to_activate": ["0ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd"]}\'') - sleep(5) + sleep(3) - # install eosio.system - retry(args.cleos + 'set contract eosio ' + args.contracts_dir + '/eosio.system/') - sleep(1) + # install eosio.system the older version first + retry(args.cleos + 'set contract eosio ' + args.old_contracts_dir + '/eosio.system/') + sleep(3) # activate remaining features # GET_SENDER @@ -318,21 +318,31 @@ def stepSetSystemContract(): # ONLY_BILL_FIRST_AUTHORIZER retry(args.cleos + 'push action eosio activate \'["8ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a405"]\' -p eosio') # RESTRICT_ACTION_TO_SELF - retry(args.cleos + 'push action eosio activate \'["ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c43"]\' -p eosio') + retry(args.cleos + 'push action eosio activate \'["ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c43"]\' -p eosio@active') # DISALLOW_EMPTY_PRODUCER_SCHEDULE - retry(args.cleos + 'push action eosio activate \'["68dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a297428"]\' -p eosio') + retry(args.cleos + 'push action eosio activate \'["68dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a297428"]\' -p eosio@active') # FIX_LINKAUTH_RESTRICTION - retry(args.cleos + 'push action eosio activate \'["e0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff526"]\' -p eosio') + retry(args.cleos + 'push action eosio activate \'["e0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff526"]\' -p eosio@active') # REPLACE_DEFERRED - retry(args.cleos + 'push action eosio activate \'["ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea99"]\' -p eosio') + retry(args.cleos + 'push action eosio activate \'["ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea99"]\' -p eosio@active') # NO_DUPLICATE_DEFERRED_ID - retry(args.cleos + 'push action eosio activate \'["4a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0f"]\' -p eosio') + retry(args.cleos + 'push action eosio activate \'["4a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0f"]\' -p eosio@active') # ONLY_LINK_TO_EXISTING_PERMISSION - retry(args.cleos + 'push action eosio activate \'["1a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241"]\' -p eosio') + retry(args.cleos + 'push action eosio activate \'["1a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241"]\' -p eosio@active') # RAM_RESTRICTIONS - retry(args.cleos + 'push action eosio activate \'["4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d67"]\' -p eosio') + retry(args.cleos + 'push action eosio activate \'["4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d67"]\' -p eosio@active') + # WEBAUTHN_KEY + retry(args.cleos + 'push action eosio activate \'["4fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c2"]\' -p eosio@active') + # WTMSIG_BLOCK_SIGNATURES + retry(args.cleos + 'push action eosio activate \'["299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b4476707"]\' -p eosio@active') + sleep(1) + run(args.cleos + 'push action eosio setpriv' + jsonArg(['eosio.msig', 1]) + '-p eosio@active') + # install eosio.system latest version + retry(args.cleos + 'set contract eosio ' + args.contracts_dir + '/eosio.system/') + sleep(3) + def stepInitSystemContract(): run(args.cleos + 'push action eosio init' + jsonArg(['0', '4,' + args.symbol]) + '-p eosio@active') sleep(1) @@ -392,7 +402,8 @@ def stepLog(): parser.add_argument('--cleos', metavar='', help="Cleos command", default='../../build/programs/cleos/cleos --wallet-url http://127.0.0.1:6666 ') parser.add_argument('--nodeos', metavar='', help="Path to nodeos binary", default='../../build/programs/nodeos/nodeos') parser.add_argument('--keosd', metavar='', help="Path to keosd binary", default='../../build/programs/keosd/keosd') -parser.add_argument('--contracts-dir', metavar='', help="Path to contracts directory", default='../../build/contracts/') +parser.add_argument('--contracts-dir', metavar='', help="Path to latest contracts directory", default='../../build/contracts/') +parser.add_argument('--old-contracts-dir', metavar='', help="Path to 1.8.x contracts directory", default='../../build/contracts/') parser.add_argument('--nodes-dir', metavar='', help="Path to nodes directory", default='./nodes/') parser.add_argument('--genesis', metavar='', help="Path to genesis.json", default="./genesis.json") parser.add_argument('--wallet-dir', metavar='', help="Path to wallet directory", default='./wallet/') diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index 82e723e5563..26b859b5a53 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -29,14 +29,12 @@ if( GPERFTOOLS_FOUND ) list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) endif() -find_package(LLVM 4.0 REQUIRED CONFIG) - -link_directories(${LLVM_LIBRARY_DIR}) - add_subdirectory(contracts) - configure_file(${CMAKE_CURRENT_SOURCE_DIR}/contracts.hpp.in ${CMAKE_CURRENT_BINARY_DIR}/include/contracts.hpp ESCAPE_QUOTES) +add_subdirectory(snapshots) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/snapshots.hpp.in ${CMAKE_CURRENT_BINARY_DIR}/include/snapshots.hpp ESCAPE_QUOTES) + ### BUILD UNIT TEST EXECUTABLE ### file(GLOB UNIT_TESTS "*.cpp") # find all unit test suites add_executable( unit_test ${UNIT_TESTS}) # build unit tests as one executable @@ -58,18 +56,26 @@ foreach(TEST_SUITE ${UNIT_TESTS}) # create an independent target for each test s if (NOT "" STREQUAL "${SUITE_NAME}") # ignore empty lines execute_process(COMMAND bash -c "echo ${SUITE_NAME} | sed -e 's/s$//' | sed -e 's/_test$//'" OUTPUT_VARIABLE TRIMMED_SUITE_NAME OUTPUT_STRIP_TRAILING_WHITESPACE) # trim "_test" or "_tests" from the end of ${SUITE_NAME} # to run unit_test with all log from blockchain displayed, put "--verbose" after "--", i.e. "unit_test -- --verbose" - add_test(NAME ${TRIMMED_SUITE_NAME}_unit_test_wavm COMMAND unit_test --run_test=${SUITE_NAME} --report_level=detailed --color_output --catch_system_errors=no -- --wavm) - add_test(NAME ${TRIMMED_SUITE_NAME}_unit_test_wabt COMMAND unit_test --run_test=${SUITE_NAME} --report_level=detailed --color_output -- --wabt) - # build list of tests to run during coverage testing - if(NOT "" STREQUAL "${ctest_tests}") - set(ctest_tests "${ctest_tests}|${TRIMMED_SUITE_NAME}_unit_test_wavm|${TRIMMED_SUITE_NAME}_unit_test_wabt") - else() - set(ctest_tests "${TRIMMED_SUITE_NAME}_unit_test_wavm|${TRIMMED_SUITE_NAME}_unit_test_wabt") - endif() + foreach(RUNTIME ${EOSIO_WASM_RUNTIMES}) + add_test(NAME ${TRIMMED_SUITE_NAME}_unit_test_${RUNTIME} COMMAND unit_test --run_test=${SUITE_NAME} --report_level=detailed --color_output --catch_system_errors=no -- --${RUNTIME}) + # build list of tests to run during coverage testing + if(ctest_tests) + string(APPEND ctest_tests "|") + endif() + string(APPEND ctest_tests ${TRIMMED_SUITE_NAME}_unit_test_$RUNTIME) + endforeach() endif() endforeach(TEST_SUITE) set(ctest_tests "'${ctest_tests}' -j8") # surround test list string in apostrophies +# The following tests are known to take the longest, bump up their cost (priority) so that they'll run first +# even on fresh first time test runs before ctest auto-detects costs +foreach(RUNTIME ${EOSIO_WASM_RUNTIMES}) + set_tests_properties(api_unit_test_${RUNTIME} PROPERTIES COST 5000) + set_tests_properties(wasm_unit_test_${RUNTIME} PROPERTIES COST 4000) + set_tests_properties(delay_unit_test_${RUNTIME} PROPERTIES COST 3000) +endforeach() + ### COVERAGE TESTING ### if(ENABLE_COVERAGE_TESTING) set(Coverage_NAME ${PROJECT_NAME}_ut_coverage) diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index dab7532b28c..9c7a2b2d6ed 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include @@ -818,10 +814,10 @@ BOOST_AUTO_TEST_CASE(linkauth_test) auto var = fc::json::from_string(test_data); auto lauth = var.as(); - BOOST_TEST("lnkauth.acct" == lauth.account); - BOOST_TEST("lnkauth.code" == lauth.code); - BOOST_TEST("lnkauth.type" == lauth.type); - BOOST_TEST("lnkauth.rqm" == lauth.requirement); + BOOST_TEST(name("lnkauth.acct") == lauth.account); + BOOST_TEST(name("lnkauth.code") == lauth.code); + BOOST_TEST(name("lnkauth.type") == lauth.type); + BOOST_TEST(name("lnkauth.rqm") == lauth.requirement); auto var2 = verify_byte_round_trip_conversion( abis, "linkauth", var ); auto linkauth2 = var2.as(); @@ -851,9 +847,9 @@ BOOST_AUTO_TEST_CASE(unlinkauth_test) auto var = fc::json::from_string(test_data); auto unlauth = var.as(); - BOOST_TEST("lnkauth.acct" == unlauth.account); - BOOST_TEST("lnkauth.code" == unlauth.code); - BOOST_TEST("lnkauth.type" == unlauth.type); + BOOST_TEST(name("lnkauth.acct") == unlauth.account); + BOOST_TEST(name("lnkauth.code") == unlauth.code); + BOOST_TEST(name("lnkauth.type") == unlauth.type); auto var2 = verify_byte_round_trip_conversion( abis, "unlinkauth", var ); auto unlinkauth2 = var2.as(); @@ -890,9 +886,9 @@ BOOST_AUTO_TEST_CASE(updateauth_test) auto var = fc::json::from_string(test_data); auto updauth = var.as(); - BOOST_TEST("updauth.acct" == updauth.account); - BOOST_TEST("updauth.prm" == updauth.permission); - BOOST_TEST("updauth.prnt" == updauth.parent); + BOOST_TEST(name("updauth.acct") == updauth.account); + BOOST_TEST(name("updauth.prm") == updauth.permission); + BOOST_TEST(name("updauth.prnt") == updauth.parent); BOOST_TEST(2147483145u == updauth.auth.threshold); BOOST_TEST_REQUIRE(2u == updauth.auth.keys.size()); @@ -902,11 +898,11 @@ BOOST_AUTO_TEST_CASE(updateauth_test) BOOST_TEST(57605u == updauth.auth.keys[1].weight); BOOST_TEST_REQUIRE(2u == updauth.auth.accounts.size()); - BOOST_TEST("prm.acct1" == updauth.auth.accounts[0].permission.actor); - BOOST_TEST("prm.prm1" == updauth.auth.accounts[0].permission.permission); + BOOST_TEST(name("prm.acct1") == updauth.auth.accounts[0].permission.actor); + BOOST_TEST(name("prm.prm1") == updauth.auth.accounts[0].permission.permission); BOOST_TEST(53005u == updauth.auth.accounts[0].weight); - BOOST_TEST("prm.acct2" == updauth.auth.accounts[1].permission.actor); - BOOST_TEST("prm.prm2" == updauth.auth.accounts[1].permission.permission); + BOOST_TEST(name("prm.acct2") == updauth.auth.accounts[1].permission.actor); + BOOST_TEST(name("prm.prm2") == updauth.auth.accounts[1].permission.permission); BOOST_TEST(53405u == updauth.auth.accounts[1].weight); auto var2 = verify_byte_round_trip_conversion( abis, "updateauth", var ); @@ -951,8 +947,8 @@ BOOST_AUTO_TEST_CASE(deleteauth_test) auto var = fc::json::from_string(test_data); auto delauth = var.as(); - BOOST_TEST("delauth.acct" == delauth.account); - BOOST_TEST("delauth.prm" == delauth.permission); + BOOST_TEST(name("delauth.acct") == delauth.account); + BOOST_TEST(name("delauth.prm") == delauth.permission); auto var2 = verify_byte_round_trip_conversion( abis, "deleteauth", var ); auto deleteauth2 = var2.as(); @@ -994,8 +990,8 @@ BOOST_AUTO_TEST_CASE(newaccount_test) auto var = fc::json::from_string(test_data); auto newacct = var.as(); - BOOST_TEST("newacct.crtr" == newacct.creator); - BOOST_TEST("newacct.name" == newacct.name); + BOOST_TEST(name("newacct.crtr") == newacct.creator); + BOOST_TEST(name("newacct.name") == newacct.name); BOOST_TEST(2147483145u == newacct.owner.threshold); @@ -1006,11 +1002,11 @@ BOOST_AUTO_TEST_CASE(newaccount_test) BOOST_TEST(57605u == newacct.owner.keys[1].weight); BOOST_TEST_REQUIRE(2u == newacct.owner.accounts.size()); - BOOST_TEST("prm.acct1" == newacct.owner.accounts[0].permission.actor); - BOOST_TEST("prm.prm1" == newacct.owner.accounts[0].permission.permission); + BOOST_TEST(name("prm.acct1") == newacct.owner.accounts[0].permission.actor); + BOOST_TEST(name("prm.prm1") == newacct.owner.accounts[0].permission.permission); BOOST_TEST(53005u == newacct.owner.accounts[0].weight); - BOOST_TEST("prm.acct2" == newacct.owner.accounts[1].permission.actor); - BOOST_TEST("prm.prm2" == newacct.owner.accounts[1].permission.permission); + BOOST_TEST(name("prm.acct2") == newacct.owner.accounts[1].permission.actor); + BOOST_TEST(name("prm.prm2") == newacct.owner.accounts[1].permission.permission); BOOST_TEST(53405u == newacct.owner.accounts[1].weight); BOOST_TEST(2146483145u == newacct.active.threshold); @@ -1022,11 +1018,11 @@ BOOST_AUTO_TEST_CASE(newaccount_test) BOOST_TEST(57605u == newacct.active.keys[1].weight); BOOST_TEST_REQUIRE(2u == newacct.active.accounts.size()); - BOOST_TEST("prm.acct1" == newacct.active.accounts[0].permission.actor); - BOOST_TEST("prm.prm1" == newacct.active.accounts[0].permission.permission); + BOOST_TEST(name("prm.acct1") == newacct.active.accounts[0].permission.actor); + BOOST_TEST(name("prm.prm1") == newacct.active.accounts[0].permission.permission); BOOST_TEST(53005u == newacct.active.accounts[0].weight); - BOOST_TEST("prm.acct2" == newacct.active.accounts[1].permission.actor); - BOOST_TEST("prm.prm2" == newacct.active.accounts[1].permission.permission); + BOOST_TEST(name("prm.acct2") == newacct.active.accounts[1].permission.actor); + BOOST_TEST(name("prm.prm2") == newacct.active.accounts[1].permission.permission); BOOST_TEST(53405u == newacct.active.accounts[1].weight); @@ -1090,7 +1086,7 @@ BOOST_AUTO_TEST_CASE(setcode_test) auto var = fc::json::from_string(test_data); auto set_code = var.as(); - BOOST_TEST("setcode.acc" == set_code.account); + BOOST_TEST(name("setcode.acc") == set_code.account); BOOST_TEST(0 == set_code.vmtype); BOOST_TEST(0 == set_code.vmversion); BOOST_TEST("0061736d0100000001390a60037e7e7f017f60047e7e7f7f017f60017e0060057e7e7e7f7f" == fc::to_hex(set_code.code.data(), set_code.code.size())); @@ -1335,11 +1331,11 @@ BOOST_AUTO_TEST_CASE(setabi_test) BOOST_TEST("uint64" == abi.structs[2].fields[1].type); BOOST_TEST_REQUIRE(1u == abi.actions.size()); - BOOST_TEST("transfer" == abi.actions[0].name); + BOOST_TEST(name("transfer") == abi.actions[0].name); BOOST_TEST("transfer" == abi.actions[0].type); BOOST_TEST_REQUIRE(1u == abi.tables.size()); - BOOST_TEST("account" == abi.tables[0].name); + BOOST_TEST(name("account") == abi.tables[0].name); BOOST_TEST("account" == abi.tables[0].type); BOOST_TEST("i64" == abi.tables[0].index_type); BOOST_TEST_REQUIRE(1u == abi.tables[0].key_names.size()); @@ -1446,20 +1442,20 @@ struct action2 { template void verify_action_equal(const chain::action& exp, const chain::action& act) { - BOOST_REQUIRE_EQUAL((std::string)exp.account, (std::string)act.account); - BOOST_REQUIRE_EQUAL((std::string)exp.name, (std::string)act.name); + BOOST_REQUIRE_EQUAL(exp.account.to_string(), act.account.to_string()); + BOOST_REQUIRE_EQUAL(exp.name.to_string(), act.name.to_string()); BOOST_REQUIRE_EQUAL(exp.authorization.size(), act.authorization.size()); for(unsigned int i = 0; i < exp.authorization.size(); ++i) { - BOOST_REQUIRE_EQUAL((std::string)exp.authorization[i].actor, (std::string)act.authorization[i].actor); - BOOST_REQUIRE_EQUAL((std::string)exp.authorization[i].permission, (std::string)act.authorization[i].permission); + BOOST_REQUIRE_EQUAL(exp.authorization[i].actor.to_string(), act.authorization[i].actor.to_string()); + BOOST_REQUIRE_EQUAL(exp.authorization[i].permission.to_string(), act.authorization[i].permission.to_string()); } BOOST_REQUIRE_EQUAL(exp.data.size(), act.data.size()); BOOST_REQUIRE(!memcmp(exp.data.data(), act.data.data(), exp.data.size())); } private_key_type get_private_key( name keyname, string role ) { - return private_key_type::regenerate(fc::sha256::hash(string(keyname)+role)); + return private_key_type::regenerate(fc::sha256::hash(keyname.to_string()+role)); } public_key_type get_public_key( name keyname, string role ) { @@ -1955,8 +1951,8 @@ BOOST_AUTO_TEST_CASE(abi_type_redefine) } )====="; - auto is_type_exception = [](fc::exception const & e) -> bool { return e.to_detail_string().find("invalid type") != std::string::npos; }; - BOOST_CHECK_EXCEPTION( abi_serializer abis(fc::json::from_string(repeat_abi).as(), max_serialization_time), invalid_type_inside_abi, is_type_exception ); + auto is_type_exception = [](fc::exception const & e) -> bool { return e.to_detail_string().find("Circular reference in type account_name") != std::string::npos; }; + BOOST_CHECK_EXCEPTION( abi_serializer abis(fc::json::from_string(repeat_abi).as(), max_serialization_time), abi_circular_def_exception, is_type_exception ); } FC_LOG_AND_RETHROW() } @@ -2304,6 +2300,57 @@ BOOST_AUTO_TEST_CASE(variants) } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE(aliased_variants) +{ + using eosio::testing::fc_exception_message_starts_with; + + auto aliased_variant = R"({ + "version": "eosio::abi/1.1", + "types": [ + { "new_type_name": "foo", "type": "foo_variant" } + ], + "variants": [ + {"name": "foo_variant", "types": ["int8", "string"]} + ], + })"; + + try { + // round-trip abi through multiple formats + // json -> variant -> abi_def -> bin + auto bin = fc::raw::pack(fc::json::from_string(aliased_variant).as()); + // bin -> abi_def -> variant -> abi_def + abi_serializer abis(variant(fc::raw::unpack(bin)).as(), max_serialization_time ); + + verify_round_trip_conversion(abis, "foo", R"(["int8",21])", "0015"); + } FC_LOG_AND_RETHROW() +} + +BOOST_AUTO_TEST_CASE(variant_of_aliases) +{ + using eosio::testing::fc_exception_message_starts_with; + + auto aliased_variant = R"({ + "version": "eosio::abi/1.1", + "types": [ + { "new_type_name": "foo_0", "type": "int8" }, + { "new_type_name": "foo_1", "type": "string" } + ], + "variants": [ + {"name": "foo", "types": ["foo_0", "foo_1"]} + ], + })"; + + try { + // round-trip abi through multiple formats + // json -> variant -> abi_def -> bin + auto bin = fc::raw::pack(fc::json::from_string(aliased_variant).as()); + // bin -> abi_def -> variant -> abi_def + abi_serializer abis(variant(fc::raw::unpack(bin)).as(), max_serialization_time ); + + verify_round_trip_conversion(abis, "foo", R"(["foo_0",21])", "0015"); + } FC_LOG_AND_RETHROW() +} + BOOST_AUTO_TEST_CASE(extend) { using eosio::testing::fc_exception_message_starts_with; @@ -2720,4 +2767,24 @@ BOOST_AUTO_TEST_CASE(abi_deserialize_detailed_error_messages) } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE(serialize_optional_struct_type) +{ + auto abi = R"({ + "version": "eosio::abi/1.0", + "structs": [ + {"name": "s", "base": "", "fields": [ + {"name": "i0", "type": "int8"} + ]}, + ], + })"; + + try { + abi_serializer abis( fc::json::from_string(abi).as(), max_serialization_time ); + + verify_round_trip_conversion(abis, "s?", R"({"i0":5})", "0105"); + verify_round_trip_conversion(abis, "s?", R"(null)", "00"); + + } FC_LOG_AND_RETHROW() +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 8e2b9576506..7f40f291ef2 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file api_tests.cpp - * @copyright defined in eos/LICENSE - */ #include #include #include @@ -9,6 +5,7 @@ #include #include #include +#include #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wsign-compare" @@ -60,10 +57,10 @@ static constexpr unsigned long long WASM_TEST_ACTION(const char* cls, const char } struct dummy_action { - static uint64_t get_name() { + static eosio::chain::name get_name() { return N(dummyaction); } - static uint64_t get_account() { + static eosio::chain::name get_account() { return N(testapi); } @@ -77,10 +74,10 @@ struct u128_action { }; struct cf_action { - static uint64_t get_name() { + static eosio::chain::name get_name() { return N(cfaction); } - static uint64_t get_account() { + static eosio::chain::name get_account() { return N(testapi); } @@ -94,13 +91,13 @@ struct dtt_action { return WASM_TEST_ACTION("test_transaction", "send_deferred_tx_with_dtt_action"); } static uint64_t get_account() { - return N(testapi); + return N(testapi).to_uint64_t(); } - uint64_t payer = N(testapi); - uint64_t deferred_account = N(testapi); + uint64_t payer = N(testapi).to_uint64_t(); + uint64_t deferred_account = N(testapi).to_uint64_t(); uint64_t deferred_action = WASM_TEST_ACTION("test_transaction", "deferred_print"); - uint64_t permission_name = N(active); + uint64_t permission_name = N(active).to_uint64_t(); uint32_t delay_sec = 2; }; @@ -591,7 +588,8 @@ BOOST_FIXTURE_TEST_CASE(require_notice_tests, TESTER) { try { } FC_LOG_AND_RETHROW() } BOOST_AUTO_TEST_CASE(ram_billing_in_notify_tests) { try { - validating_tester chain( validating_tester::default_config() ); + fc::temp_directory tempdir; + validating_tester chain( tempdir, true ); chain.execute_setup_policy( setup_policy::preactivate_feature_and_new_bios ); chain.produce_blocks(2); @@ -604,15 +602,15 @@ BOOST_AUTO_TEST_CASE(ram_billing_in_notify_tests) { try { chain.produce_blocks(1); BOOST_CHECK_EXCEPTION( CALL_TEST_FUNCTION( chain, "test_action", "test_ram_billing_in_notify", - fc::raw::pack( ((unsigned __int128)N(testapi2) << 64) | N(testapi) ) ), + fc::raw::pack( ((unsigned __int128)N(testapi2).to_uint64_t() << 64) | N(testapi).to_uint64_t() ) ), subjective_block_production_exception, fc_exception_message_is("Cannot charge RAM to other accounts during notify.") ); - CALL_TEST_FUNCTION( chain, "test_action", "test_ram_billing_in_notify", fc::raw::pack( ((unsigned __int128)N(testapi2) << 64) | 0 ) ); + CALL_TEST_FUNCTION( chain, "test_action", "test_ram_billing_in_notify", fc::raw::pack( ((unsigned __int128)N(testapi2).to_uint64_t() << 64) | 0 ) ); - CALL_TEST_FUNCTION( chain, "test_action", "test_ram_billing_in_notify", fc::raw::pack( ((unsigned __int128)N(testapi2) << 64) | N(testapi2) ) ); + CALL_TEST_FUNCTION( chain, "test_action", "test_ram_billing_in_notify", fc::raw::pack( ((unsigned __int128)N(testapi2).to_uint64_t() << 64) | N(testapi2).to_uint64_t() ) ); BOOST_REQUIRE_EQUAL( chain.validate(), true ); } FC_LOG_AND_RETHROW() } @@ -740,7 +738,7 @@ BOOST_FIXTURE_TEST_CASE(cfa_tx_signature, TESTER) try { tx2.context_free_actions.push_back(cfa); set_transaction_headers(tx2); - const private_key_type& priv_key = get_private_key("dummy", "active"); + const private_key_type& priv_key = get_private_key(name("dummy"), "active"); BOOST_TEST((std::string)tx1.sign(priv_key, control->get_chain_id()) != (std::string)tx2.sign(priv_key, control->get_chain_id())); BOOST_REQUIRE_EQUAL( validate(), true ); @@ -1257,27 +1255,27 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { // Trigger a tx which in turn sends a deferred tx with payer != receiver // Payer is alice in this case, this tx should fail since we don't have the authorization of alice dtt_action dtt_act1; - dtt_act1.payer = N(alice); + dtt_act1.payer = N(alice).to_uint64_t(); BOOST_CHECK_THROW(CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_tx_with_dtt_action", fc::raw::pack(dtt_act1)), action_validate_exception); // Send a tx which in turn sends a deferred tx with the deferred tx's receiver != this tx receiver // This will include the authorization of the receiver, and impose any related delay associated with the authority // We set the authorization delay to be 10 sec here, and since the deferred tx delay is set to be 5 sec, so this tx should fail dtt_action dtt_act2; - dtt_act2.deferred_account = N(testapi2); - dtt_act2.permission_name = N(additional); + dtt_act2.deferred_account = N(testapi2).to_uint64_t(); + dtt_act2.permission_name = N(additional).to_uint64_t(); dtt_act2.delay_sec = 5; - auto auth = authority(get_public_key("testapi", name(dtt_act2.permission_name).to_string()), 10); + auto auth = authority(get_public_key(name("testapi"), name(dtt_act2.permission_name).to_string()), 10); auth.accounts.push_back( permission_level_weight{{N(testapi), config::eosio_code_name}, 1} ); - push_action(config::system_account_name, updateauth::get_name(), "testapi", fc::mutable_variant_object() + push_action(config::system_account_name, updateauth::get_name(), name("testapi"), fc::mutable_variant_object() ("account", "testapi") ("permission", name(dtt_act2.permission_name)) ("parent", "active") ("auth", auth) ); - push_action(config::system_account_name, linkauth::get_name(), "testapi", fc::mutable_variant_object() + push_action(config::system_account_name, linkauth::get_name(), name("testapi"), fc::mutable_variant_object() ("account", "testapi") ("code", name(dtt_act2.deferred_account)) ("type", name(dtt_act2.deferred_action)) @@ -1292,9 +1290,9 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { // But not anymore. With the RESTRICT_ACTION_TO_SELF protocol feature activated, it should now objectively // fail because testapi@additional permission is not unilaterally satisfied by testapi@eosio.code. dtt_action dtt_act3; - dtt_act3.deferred_account = N(testapi); - dtt_act3.permission_name = N(additional); - push_action(config::system_account_name, linkauth::get_name(), "testapi", fc::mutable_variant_object() + dtt_act3.deferred_account = N(testapi).to_uint64_t(); + dtt_act3.permission_name = N(additional).to_uint64_t(); + push_action(config::system_account_name, linkauth::get_name(), name("testapi"), fc::mutable_variant_object() ("account", "testapi") ("code", name(dtt_act3.deferred_account)) ("type", name(dtt_act3.deferred_action)) @@ -1319,9 +1317,8 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { } FC_LOG_AND_RETHROW() } BOOST_AUTO_TEST_CASE(more_deferred_transaction_tests) { try { - auto cfg = validating_tester::default_config(); - cfg.contracts_console = true; - validating_tester chain( cfg ); + fc::temp_directory tempdir; + validating_tester chain( tempdir, true ); chain.execute_setup_policy( setup_policy::preactivate_feature_and_new_bios ); const auto& pfm = chain.control->get_protocol_feature_manager(); @@ -1725,37 +1722,65 @@ BOOST_FIXTURE_TEST_CASE(multi_index_tests, TESTER) { try { * crypto_tests test cases *************************************************************************************/ BOOST_FIXTURE_TEST_CASE(crypto_tests, TESTER) { try { - produce_blocks(1000); + produce_block(); create_account(N(testapi) ); - produce_blocks(1000); + produce_block(); set_code(N(testapi), contracts::test_api_wasm() ); - produce_blocks(1000); - { - signed_transaction trx; + produce_block(); + { + signed_transaction trx; auto pl = vector{{N(testapi), config::active_name}}; action act(pl, test_api_action{}); - auto signatures = trx.sign(get_private_key(N(testapi), "active"), control->get_chain_id()); - - produce_block(); - - auto payload = fc::raw::pack( trx.sig_digest( control->get_chain_id() ) ); - auto pk = fc::raw::pack( get_public_key( N(testapi), "active" ) ); - auto sigs = fc::raw::pack( signatures ); - payload.insert( payload.end(), pk.begin(), pk.end() ); - payload.insert( payload.end(), sigs.begin(), sigs.end() ); + const auto priv_key = get_private_key(N(testapi), "active" ); + const auto pub_key = priv_key.get_public_key(); + auto hash = trx.sig_digest( control->get_chain_id() ); + auto sig = priv_key.sign(hash); + + auto pk = fc::raw::pack( pub_key ); + auto sigs = fc::raw::pack( sig ); + vector payload(8192); + datastream payload_ds(payload.data(), payload.size()); + fc::raw::pack(payload_ds, hash, (uint32_t)pk.size(), (uint32_t)sigs.size() ); + payload_ds.write(pk.data(), pk.size() ); + payload_ds.write(sigs.data(), sigs.size()); + payload.resize(payload_ds.tellp()); //No Error Here CALL_TEST_FUNCTION( *this, "test_crypto", "test_recover_key", payload ); - return; // Error Here CALL_TEST_FUNCTION( *this, "test_crypto", "test_recover_key_assert_true", payload ); payload[payload.size()-1] = 0; BOOST_CHECK_EXCEPTION( CALL_TEST_FUNCTION( *this, "test_crypto", "test_recover_key_assert_false", payload ), crypto_api_exception, fc_exception_message_is("Error expected key different than recovered key") ); - } + } + + { + signed_transaction trx; + auto pl = vector{{N(testapi), config::active_name}}; + + action act(pl, test_api_action{}); + + // construct a mock WebAuthN pubkey and signature, as it is the only type that would be variable-sized + const auto priv_key = get_private_key(N(testapi), "active" ); + const auto pub_key = priv_key.get_public_key(); + auto hash = trx.sig_digest( control->get_chain_id() ); + auto sig = priv_key.sign(hash); + + auto pk = fc::raw::pack( pub_key ); + auto sigs = fc::raw::pack( sig ); + vector payload(8192); + datastream payload_ds(payload.data(), payload.size()); + fc::raw::pack(payload_ds, hash, (uint32_t)pk.size(), (uint32_t)sigs.size() ); + payload_ds.write(pk.data(), pk.size() ); + payload_ds.write(sigs.data(), sigs.size()); + payload.resize(payload_ds.tellp()); + + //No Error Here + CALL_TEST_FUNCTION( *this, "test_crypto", "test_recover_key_partial", payload ); + } CALL_TEST_FUNCTION( *this, "test_crypto", "test_sha1", {} ); @@ -2327,99 +2352,99 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_test, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); BOOST_REQUIRE_EQUAL((int)atrace[0].closest_unnotified_ancestor_action_ordinal, 0); - BOOST_REQUIRE_EQUAL(atrace[0].receiver.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[0].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[0].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[0].receiver, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.name, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[0].receipt.valid(), true); int start_gseq = atrace[0].receipt->global_sequence; BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal,2); BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[1].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[1].receiver.value, N(bob)); - BOOST_REQUIRE_EQUAL(atrace[1].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[1].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[1].receiver, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[1].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[1].act.name, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[1].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[1].receipt->global_sequence, start_gseq + 1); BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[2].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[2].receiver.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[2].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[2].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); + BOOST_REQUIRE_EQUAL(atrace[2].receiver, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.name, TEST_METHOD("test_action", "test_action_ordinal2")); BOOST_REQUIRE_EQUAL(atrace[2].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[2].receipt->global_sequence, start_gseq + 4); BOOST_REQUIRE_EQUAL((int)atrace[3].action_ordinal, 4); BOOST_REQUIRE_EQUAL((int)atrace[3].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[3].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[3].receiver.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[3].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[3].act.name.value, TEST_METHOD("test_action", "test_action_ordinal3")); + BOOST_REQUIRE_EQUAL(atrace[3].receiver, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.name, TEST_METHOD("test_action", "test_action_ordinal3")); BOOST_REQUIRE_EQUAL(atrace[3].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[3].receipt->global_sequence, start_gseq + 8); BOOST_REQUIRE_EQUAL((int)atrace[4].action_ordinal, 5); BOOST_REQUIRE_EQUAL((int)atrace[4].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[4].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[4].receiver.value, N(charlie)); - BOOST_REQUIRE_EQUAL(atrace[4].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[4].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[4].receiver, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[4].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[4].act.name, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[4].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[4].receipt->global_sequence, start_gseq + 2); BOOST_REQUIRE_EQUAL((int)atrace[5].action_ordinal, 6); BOOST_REQUIRE_EQUAL((int)atrace[5].creator_action_ordinal, 2); BOOST_REQUIRE_EQUAL((int)atrace[5].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[5].receiver.value, N(bob)); - BOOST_REQUIRE_EQUAL(atrace[5].act.account.value, N(bob)); - BOOST_REQUIRE_EQUAL(atrace[5].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_foo")); + BOOST_REQUIRE_EQUAL(atrace[5].receiver, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.account, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.name, TEST_METHOD("test_action", "test_action_ordinal_foo")); BOOST_REQUIRE_EQUAL(atrace[5].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[5].receipt->global_sequence, start_gseq + 9); BOOST_REQUIRE_EQUAL((int)atrace[6].action_ordinal, 7); BOOST_REQUIRE_EQUAL((int)atrace[6].creator_action_ordinal,2); BOOST_REQUIRE_EQUAL((int)atrace[6].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[6].receiver.value, N(david)); - BOOST_REQUIRE_EQUAL(atrace[6].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[6].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[6].receiver, N(david)); + BOOST_REQUIRE_EQUAL(atrace[6].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[6].act.name, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[6].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[6].receipt->global_sequence, start_gseq + 3); BOOST_REQUIRE_EQUAL((int)atrace[7].action_ordinal, 8); BOOST_REQUIRE_EQUAL((int)atrace[7].creator_action_ordinal, 5); BOOST_REQUIRE_EQUAL((int)atrace[7].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[7].receiver.value, N(charlie)); - BOOST_REQUIRE_EQUAL(atrace[7].act.account.value, N(charlie)); - BOOST_REQUIRE_EQUAL(atrace[7].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_bar")); + BOOST_REQUIRE_EQUAL(atrace[7].receiver, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.account, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.name, TEST_METHOD("test_action", "test_action_ordinal_bar")); BOOST_REQUIRE_EQUAL(atrace[7].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[7].receipt->global_sequence, start_gseq + 10); BOOST_REQUIRE_EQUAL((int)atrace[8].action_ordinal, 9); BOOST_REQUIRE_EQUAL((int)atrace[8].creator_action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[8].closest_unnotified_ancestor_action_ordinal, 3); - BOOST_REQUIRE_EQUAL(atrace[8].receiver.value, N(david)); - BOOST_REQUIRE_EQUAL(atrace[8].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[8].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); + BOOST_REQUIRE_EQUAL(atrace[8].receiver, N(david)); + BOOST_REQUIRE_EQUAL(atrace[8].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[8].act.name, TEST_METHOD("test_action", "test_action_ordinal2")); BOOST_REQUIRE_EQUAL(atrace[8].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[8].receipt->global_sequence, start_gseq + 5); BOOST_REQUIRE_EQUAL((int)atrace[9].action_ordinal, 10); BOOST_REQUIRE_EQUAL((int)atrace[9].creator_action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[9].closest_unnotified_ancestor_action_ordinal, 3); - BOOST_REQUIRE_EQUAL(atrace[9].receiver.value, N(erin)); - BOOST_REQUIRE_EQUAL(atrace[9].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[9].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); + BOOST_REQUIRE_EQUAL(atrace[9].receiver, N(erin)); + BOOST_REQUIRE_EQUAL(atrace[9].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[9].act.name, TEST_METHOD("test_action", "test_action_ordinal2")); BOOST_REQUIRE_EQUAL(atrace[9].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[9].receipt->global_sequence, start_gseq + 6); BOOST_REQUIRE_EQUAL((int)atrace[10].action_ordinal, 11); BOOST_REQUIRE_EQUAL((int)atrace[10].creator_action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[10].closest_unnotified_ancestor_action_ordinal, 3); - BOOST_REQUIRE_EQUAL(atrace[10].receiver.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[10].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[10].act.name.value, TEST_METHOD("test_action", "test_action_ordinal4")); + BOOST_REQUIRE_EQUAL(atrace[10].receiver, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[10].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[10].act.name, TEST_METHOD("test_action", "test_action_ordinal4")); BOOST_REQUIRE_EQUAL(atrace[10].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[10].receipt->global_sequence, start_gseq + 7); } FC_LOG_AND_RETHROW() } @@ -2464,9 +2489,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest1, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); BOOST_REQUIRE_EQUAL((int)atrace[0].closest_unnotified_ancestor_action_ordinal, 0); - BOOST_REQUIRE_EQUAL(atrace[0].receiver.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[0].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[0].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[0].receiver, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.name, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[0].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[0].except.valid(), true); BOOST_REQUIRE_EQUAL(atrace[0].except->code(), 3050003); @@ -2475,9 +2500,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest1, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal, 2); BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[1].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[1].receiver.value, N(bob)); - BOOST_REQUIRE_EQUAL(atrace[1].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[1].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[1].receiver, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[1].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[1].act.name, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[1].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[1].except.valid(), false); @@ -2485,9 +2510,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest1, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[2].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[2].receiver.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[2].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[2].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); + BOOST_REQUIRE_EQUAL(atrace[2].receiver, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.name, TEST_METHOD("test_action", "test_action_ordinal2")); BOOST_REQUIRE_EQUAL(atrace[2].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[2].except.valid(), false); @@ -2532,9 +2557,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); BOOST_REQUIRE_EQUAL((int)atrace[0].closest_unnotified_ancestor_action_ordinal, 0); - BOOST_REQUIRE_EQUAL(atrace[0].receiver.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[0].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[0].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[0].receiver, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.name, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[0].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[0].except.valid(), false); int start_gseq = atrace[0].receipt->global_sequence; @@ -2543,9 +2568,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal,2); BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[1].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[1].receiver.value, N(bob)); - BOOST_REQUIRE_EQUAL(atrace[1].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[1].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[1].receiver, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[1].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[1].act.name, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[1].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[1].receipt->global_sequence, start_gseq + 1); @@ -2553,9 +2578,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[2].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[2].receiver.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[2].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[2].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); + BOOST_REQUIRE_EQUAL(atrace[2].receiver, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.name, TEST_METHOD("test_action", "test_action_ordinal2")); BOOST_REQUIRE_EQUAL(atrace[2].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[2].except.valid(), false); @@ -2563,9 +2588,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[3].action_ordinal, 4); BOOST_REQUIRE_EQUAL((int)atrace[3].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[3].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[3].receiver.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[3].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[3].act.name.value, TEST_METHOD("test_action", "test_action_ordinal3")); + BOOST_REQUIRE_EQUAL(atrace[3].receiver, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.name, TEST_METHOD("test_action", "test_action_ordinal3")); BOOST_REQUIRE_EQUAL(atrace[3].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[3].except.valid(), false); @@ -2573,9 +2598,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[4].action_ordinal, 5); BOOST_REQUIRE_EQUAL((int)atrace[4].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[4].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[4].receiver.value, N(charlie)); - BOOST_REQUIRE_EQUAL(atrace[4].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[4].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[4].receiver, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[4].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[4].act.name, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[4].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[4].except.valid(), true); BOOST_REQUIRE_EQUAL(atrace[4].except->code(), 3050003); @@ -2584,9 +2609,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[5].action_ordinal, 6); BOOST_REQUIRE_EQUAL((int)atrace[5].creator_action_ordinal, 2); BOOST_REQUIRE_EQUAL((int)atrace[5].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[5].receiver.value, N(bob)); - BOOST_REQUIRE_EQUAL(atrace[5].act.account.value, N(bob)); - BOOST_REQUIRE_EQUAL(atrace[5].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_foo")); + BOOST_REQUIRE_EQUAL(atrace[5].receiver, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.account, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.name, TEST_METHOD("test_action", "test_action_ordinal_foo")); BOOST_REQUIRE_EQUAL(atrace[5].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[5].except.valid(), false); @@ -2594,9 +2619,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[6].action_ordinal, 7); BOOST_REQUIRE_EQUAL((int)atrace[6].creator_action_ordinal,2); BOOST_REQUIRE_EQUAL((int)atrace[6].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[6].receiver.value, N(david)); - BOOST_REQUIRE_EQUAL(atrace[6].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[6].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[6].receiver, N(david)); + BOOST_REQUIRE_EQUAL(atrace[6].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[6].act.name, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[6].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[6].except.valid(), false); @@ -2604,9 +2629,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[7].action_ordinal, 8); BOOST_REQUIRE_EQUAL((int)atrace[7].creator_action_ordinal, 5); BOOST_REQUIRE_EQUAL((int)atrace[7].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[7].receiver.value, N(charlie)); - BOOST_REQUIRE_EQUAL(atrace[7].act.account.value, N(charlie)); - BOOST_REQUIRE_EQUAL(atrace[7].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_bar")); + BOOST_REQUIRE_EQUAL(atrace[7].receiver, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.account, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.name, TEST_METHOD("test_action", "test_action_ordinal_bar")); BOOST_REQUIRE_EQUAL(atrace[7].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[7].except.valid(), false); @@ -2651,9 +2676,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); BOOST_REQUIRE_EQUAL((int)atrace[0].closest_unnotified_ancestor_action_ordinal, 0); - BOOST_REQUIRE_EQUAL(atrace[0].receiver.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[0].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[0].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[0].receiver, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.name, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[0].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[0].except.valid(), false); int start_gseq = atrace[0].receipt->global_sequence; @@ -2662,9 +2687,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal,2); BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[1].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[1].receiver.value, N(bob)); - BOOST_REQUIRE_EQUAL(atrace[1].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[1].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[1].receiver, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[1].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[1].act.name, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[1].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[1].receipt->global_sequence, start_gseq + 1); @@ -2672,9 +2697,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[2].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[2].receiver.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[2].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[2].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); + BOOST_REQUIRE_EQUAL(atrace[2].receiver, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.name, TEST_METHOD("test_action", "test_action_ordinal2")); BOOST_REQUIRE_EQUAL(atrace[2].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[2].receipt->global_sequence, start_gseq + 4); @@ -2682,9 +2707,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[3].action_ordinal, 4); BOOST_REQUIRE_EQUAL((int)atrace[3].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[3].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[3].receiver.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[3].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[3].act.name.value, TEST_METHOD("test_action", "test_action_ordinal3")); + BOOST_REQUIRE_EQUAL(atrace[3].receiver, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.name, TEST_METHOD("test_action", "test_action_ordinal3")); BOOST_REQUIRE_EQUAL(atrace[3].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[3].except.valid(), true); BOOST_REQUIRE_EQUAL(atrace[3].except->code(), 3050003); @@ -2693,9 +2718,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[4].action_ordinal, 5); BOOST_REQUIRE_EQUAL((int)atrace[4].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[4].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[4].receiver.value, N(charlie)); - BOOST_REQUIRE_EQUAL(atrace[4].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[4].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[4].receiver, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[4].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[4].act.name, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[4].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[4].receipt->global_sequence, start_gseq + 2); @@ -2703,9 +2728,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[5].action_ordinal, 6); BOOST_REQUIRE_EQUAL((int)atrace[5].creator_action_ordinal, 2); BOOST_REQUIRE_EQUAL((int)atrace[5].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[5].receiver.value, N(bob)); - BOOST_REQUIRE_EQUAL(atrace[5].act.account.value, N(bob)); - BOOST_REQUIRE_EQUAL(atrace[5].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_foo")); + BOOST_REQUIRE_EQUAL(atrace[5].receiver, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.account, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.name, TEST_METHOD("test_action", "test_action_ordinal_foo")); BOOST_REQUIRE_EQUAL(atrace[5].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[5].except.valid(), false); @@ -2713,9 +2738,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[6].action_ordinal, 7); BOOST_REQUIRE_EQUAL((int)atrace[6].creator_action_ordinal,2); BOOST_REQUIRE_EQUAL((int)atrace[6].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[6].receiver.value, N(david)); - BOOST_REQUIRE_EQUAL(atrace[6].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[6].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); + BOOST_REQUIRE_EQUAL(atrace[6].receiver, N(david)); + BOOST_REQUIRE_EQUAL(atrace[6].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[6].act.name, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[6].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[6].receipt->global_sequence, start_gseq + 3); @@ -2723,9 +2748,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[7].action_ordinal, 8); BOOST_REQUIRE_EQUAL((int)atrace[7].creator_action_ordinal, 5); BOOST_REQUIRE_EQUAL((int)atrace[7].closest_unnotified_ancestor_action_ordinal, 1); - BOOST_REQUIRE_EQUAL(atrace[7].receiver.value, N(charlie)); - BOOST_REQUIRE_EQUAL(atrace[7].act.account.value, N(charlie)); - BOOST_REQUIRE_EQUAL(atrace[7].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_bar")); + BOOST_REQUIRE_EQUAL(atrace[7].receiver, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.account, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.name, TEST_METHOD("test_action", "test_action_ordinal_bar")); BOOST_REQUIRE_EQUAL(atrace[7].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[7].except.valid(), false); @@ -2733,9 +2758,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[8].action_ordinal, 9); BOOST_REQUIRE_EQUAL((int)atrace[8].creator_action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[8].closest_unnotified_ancestor_action_ordinal, 3); - BOOST_REQUIRE_EQUAL(atrace[8].receiver.value, N(david)); - BOOST_REQUIRE_EQUAL(atrace[8].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[8].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); + BOOST_REQUIRE_EQUAL(atrace[8].receiver, N(david)); + BOOST_REQUIRE_EQUAL(atrace[8].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[8].act.name, TEST_METHOD("test_action", "test_action_ordinal2")); BOOST_REQUIRE_EQUAL(atrace[8].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[8].receipt->global_sequence, start_gseq + 5); @@ -2743,9 +2768,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[9].action_ordinal, 10); BOOST_REQUIRE_EQUAL((int)atrace[9].creator_action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[9].closest_unnotified_ancestor_action_ordinal, 3); - BOOST_REQUIRE_EQUAL(atrace[9].receiver.value, N(erin)); - BOOST_REQUIRE_EQUAL(atrace[9].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[9].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); + BOOST_REQUIRE_EQUAL(atrace[9].receiver, N(erin)); + BOOST_REQUIRE_EQUAL(atrace[9].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[9].act.name, TEST_METHOD("test_action", "test_action_ordinal2")); BOOST_REQUIRE_EQUAL(atrace[9].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[9].receipt->global_sequence, start_gseq + 6); @@ -2753,9 +2778,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[10].action_ordinal, 11); BOOST_REQUIRE_EQUAL((int)atrace[10].creator_action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[10].closest_unnotified_ancestor_action_ordinal, 3); - BOOST_REQUIRE_EQUAL(atrace[10].receiver.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[10].act.account.value, N(testapi)); - BOOST_REQUIRE_EQUAL(atrace[10].act.name.value, TEST_METHOD("test_action", "test_action_ordinal4")); + BOOST_REQUIRE_EQUAL(atrace[10].receiver, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[10].act.account, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[10].act.name, TEST_METHOD("test_action", "test_action_ordinal4")); BOOST_REQUIRE_EQUAL(atrace[10].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[10].receipt->global_sequence, start_gseq + 7); diff --git a/unittests/auth_tests.cpp b/unittests/auth_tests.cpp index b944b532423..3bc01bc18bc 100644 --- a/unittests/auth_tests.cpp +++ b/unittests/auth_tests.cpp @@ -77,7 +77,8 @@ BOOST_FIXTURE_TEST_CASE( delegate_auth, TESTER ) { try { wdump((new_auth)); BOOST_CHECK_EQUAL((new_auth == delegated_auth), true); - produce_block( fc::milliseconds(config::block_interval_ms*2) ); + produce_block(); + produce_block(); auto auth = static_cast(control->get_authorization_manager().get_permission({N(alice), config::active_name}).auth); wdump((auth)); @@ -95,26 +96,26 @@ BOOST_FIXTURE_TEST_CASE( delegate_auth, TESTER ) { try { BOOST_AUTO_TEST_CASE(update_auths) { try { TESTER chain; - chain.create_account("alice"); - chain.create_account("bob"); + chain.create_account(name("alice")); + chain.create_account(name("bob")); // Deleting active or owner should fail - BOOST_CHECK_THROW(chain.delete_authority("alice", "active"), action_validate_exception); - BOOST_CHECK_THROW(chain.delete_authority("alice", "owner"), action_validate_exception); + BOOST_CHECK_THROW(chain.delete_authority(name("alice"), name("active")), action_validate_exception); + BOOST_CHECK_THROW(chain.delete_authority(name("alice"), name("owner")), action_validate_exception); // Change owner permission - const auto new_owner_priv_key = chain.get_private_key("alice", "new_owner"); + const auto new_owner_priv_key = chain.get_private_key(name("alice"), "new_owner"); const auto new_owner_pub_key = new_owner_priv_key.get_public_key(); - chain.set_authority("alice", "owner", authority(new_owner_pub_key), ""); + chain.set_authority(name("alice"), name("owner"), authority(new_owner_pub_key), {}); chain.produce_blocks(); // Ensure the permission is updated permission_object::id_type owner_id; { - auto obj = chain.find(boost::make_tuple("alice", "owner")); + auto obj = chain.find(boost::make_tuple(name("alice"), name("owner"))); BOOST_TEST(obj != nullptr); - BOOST_TEST(obj->owner == "alice"); - BOOST_TEST(obj->name == "owner"); + BOOST_TEST(obj->owner == name("alice")); + BOOST_TEST(obj->name == name("owner")); BOOST_TEST(obj->parent == 0); owner_id = obj->id; auto auth = obj->auth.to_authority(); @@ -126,17 +127,17 @@ try { } // Change active permission, remember that the owner key has been changed - const auto new_active_priv_key = chain.get_private_key("alice", "new_active"); + const auto new_active_priv_key = chain.get_private_key(name("alice"), "new_active"); const auto new_active_pub_key = new_active_priv_key.get_public_key(); - chain.set_authority("alice", "active", authority(new_active_pub_key), "owner", - { permission_level{"alice", "active"} }, { chain.get_private_key("alice", "active") }); + chain.set_authority(name("alice"), name("active"), authority(new_active_pub_key), name("owner"), + { permission_level{name("alice"), name("active")} }, { chain.get_private_key(name("alice"), "active") }); chain.produce_blocks(); { - auto obj = chain.find(boost::make_tuple("alice", "active")); + auto obj = chain.find(boost::make_tuple(name("alice"), name("active"))); BOOST_TEST(obj != nullptr); - BOOST_TEST(obj->owner == "alice"); - BOOST_TEST(obj->name == "active"); + BOOST_TEST(obj->owner == name("alice")); + BOOST_TEST(obj->name == name("active")); BOOST_TEST(obj->parent == owner_id); auto auth = obj->auth.to_authority(); BOOST_TEST(auth.threshold == 1u); @@ -146,221 +147,222 @@ try { BOOST_TEST(auth.keys[0].weight == 1u); } - auto spending_priv_key = chain.get_private_key("alice", "spending"); + auto spending_priv_key = chain.get_private_key(name("alice"), "spending"); auto spending_pub_key = spending_priv_key.get_public_key(); - auto trading_priv_key = chain.get_private_key("alice", "trading"); + auto trading_priv_key = chain.get_private_key(name("alice"), "trading"); auto trading_pub_key = trading_priv_key.get_public_key(); // Bob attempts to create new spending auth for Alice - BOOST_CHECK_THROW( chain.set_authority( "alice", "spending", authority(spending_pub_key), "active", - { permission_level{"bob", "active"} }, { chain.get_private_key("bob", "active") } ), + BOOST_CHECK_THROW( chain.set_authority( name("alice"), name("spending"), authority(spending_pub_key), name("active"), + { permission_level{name("bob"), name("active")} }, + { chain.get_private_key(name("bob"), "active") } ), irrelevant_auth_exception ); // Create new spending auth - chain.set_authority("alice", "spending", authority(spending_pub_key), "active", - { permission_level{"alice", "active"} }, { new_active_priv_key }); + chain.set_authority(name("alice"), name("spending"), authority(spending_pub_key), name("active"), + { permission_level{name("alice"), name("active")} }, { new_active_priv_key }); chain.produce_blocks(); { - auto obj = chain.find(boost::make_tuple("alice", "spending")); + auto obj = chain.find(boost::make_tuple(name("alice"), name("spending"))); BOOST_TEST(obj != nullptr); - BOOST_TEST(obj->owner == "alice"); - BOOST_TEST(obj->name == "spending"); - BOOST_TEST(chain.get(obj->parent).owner == "alice"); - BOOST_TEST(chain.get(obj->parent).name == "active"); + BOOST_TEST(obj->owner == name("alice")); + BOOST_TEST(obj->name == name("spending")); + BOOST_TEST(chain.get(obj->parent).owner == name("alice")); + BOOST_TEST(chain.get(obj->parent).name == name("active")); } // Update spending auth parent to be its own, should fail - BOOST_CHECK_THROW(chain.set_authority("alice", "spending", spending_pub_key, "spending", - { permission_level{"alice", "spending"} }, { spending_priv_key }), action_validate_exception); + BOOST_CHECK_THROW(chain.set_authority(name("alice"), name("spending"), spending_pub_key, name("spending"), + { permission_level{name("alice"), name("spending")} }, { spending_priv_key }), action_validate_exception); // Update spending auth parent to be owner, should fail - BOOST_CHECK_THROW(chain.set_authority("alice", "spending", spending_pub_key, "owner", - { permission_level{"alice", "spending"} }, { spending_priv_key }), action_validate_exception); + BOOST_CHECK_THROW(chain.set_authority(name("alice"), name("spending"), spending_pub_key, name("owner"), + { permission_level{name("alice"), name("spending")} }, { spending_priv_key }), action_validate_exception); // Remove spending auth - chain.delete_authority("alice", "spending", { permission_level{"alice", "active"} }, { new_active_priv_key }); + chain.delete_authority(name("alice"), name("spending"), { permission_level{name("alice"), name("active")} }, { new_active_priv_key }); { - auto obj = chain.find(boost::make_tuple("alice", "spending")); + auto obj = chain.find(boost::make_tuple(name("alice"), name("spending"))); BOOST_TEST(obj == nullptr); } chain.produce_blocks(); // Create new trading auth - chain.set_authority("alice", "trading", trading_pub_key, "active", - { permission_level{"alice", "active"} }, { new_active_priv_key }); + chain.set_authority(name("alice"), name("trading"), trading_pub_key, name("active"), + { permission_level{name("alice"), name("active")} }, { new_active_priv_key }); // Recreate spending auth again, however this time, it's under trading instead of owner - chain.set_authority("alice", "spending", spending_pub_key, "trading", - { permission_level{"alice", "trading"} }, { trading_priv_key }); + chain.set_authority(name("alice"), name("spending"), spending_pub_key, name("trading"), + { permission_level{name("alice"), name("trading")} }, { trading_priv_key }); chain.produce_blocks(); // Verify correctness of trading and spending { - const auto* trading = chain.find(boost::make_tuple("alice", "trading")); - const auto* spending = chain.find(boost::make_tuple("alice", "spending")); + const auto* trading = chain.find(boost::make_tuple(name("alice"), name("trading"))); + const auto* spending = chain.find(boost::make_tuple(name("alice"), name("spending"))); BOOST_TEST(trading != nullptr); BOOST_TEST(spending != nullptr); - BOOST_TEST(trading->owner == "alice"); - BOOST_TEST(spending->owner == "alice"); - BOOST_TEST(trading->name == "trading"); - BOOST_TEST(spending->name == "spending"); + BOOST_TEST(trading->owner == name("alice")); + BOOST_TEST(spending->owner == name("alice")); + BOOST_TEST(trading->name == name("trading")); + BOOST_TEST(spending->name == name("spending")); BOOST_TEST(spending->parent == trading->id); - BOOST_TEST(chain.get(trading->parent).owner == "alice"); - BOOST_TEST(chain.get(trading->parent).name == "active"); + BOOST_TEST(chain.get(trading->parent).owner == name("alice")); + BOOST_TEST(chain.get(trading->parent).name == name("active")); } // Delete trading, should fail since it has children (spending) - BOOST_CHECK_THROW(chain.delete_authority("alice", "trading", - { permission_level{"alice", "active"} }, { new_active_priv_key }), action_validate_exception); + BOOST_CHECK_THROW(chain.delete_authority(name("alice"), name("trading"), + { permission_level{name("alice"), name("active")} }, { new_active_priv_key }), action_validate_exception); // Update trading parent to be spending, should fail since changing parent authority is not supported - BOOST_CHECK_THROW(chain.set_authority("alice", "trading", trading_pub_key, "spending", - { permission_level{"alice", "trading"} }, { trading_priv_key }), action_validate_exception); + BOOST_CHECK_THROW(chain.set_authority(name("alice"), name("trading"), trading_pub_key, name("spending"), + { permission_level{name("alice"), name("trading")} }, { trading_priv_key }), action_validate_exception); // Delete spending auth - chain.delete_authority("alice", "spending", { permission_level{"alice", "active"} }, { new_active_priv_key }); - BOOST_TEST((chain.find(boost::make_tuple("alice", "spending"))) == nullptr); + chain.delete_authority(name("alice"), name("spending"), { permission_level{name("alice"), name("active")} }, { new_active_priv_key }); + BOOST_TEST((chain.find(boost::make_tuple(name("alice"), name("spending")))) == nullptr); // Delete trading auth, now it should succeed since it doesn't have any children anymore - chain.delete_authority("alice", "trading", { permission_level{"alice", "active"} }, { new_active_priv_key }); - BOOST_TEST((chain.find(boost::make_tuple("alice", "trading"))) == nullptr); + chain.delete_authority(name("alice"), name("trading"), { permission_level{name("alice"), name("active")} }, { new_active_priv_key }); + BOOST_TEST((chain.find(boost::make_tuple(name("alice"), name("trading")))) == nullptr); } FC_LOG_AND_RETHROW() } BOOST_AUTO_TEST_CASE(link_auths) { try { TESTER chain; - chain.create_accounts({"alice","bob"}); + chain.create_accounts({name("alice"),name("bob")}); - const auto spending_priv_key = chain.get_private_key("alice", "spending"); + const auto spending_priv_key = chain.get_private_key(name("alice"), "spending"); const auto spending_pub_key = spending_priv_key.get_public_key(); - const auto scud_priv_key = chain.get_private_key("alice", "scud"); + const auto scud_priv_key = chain.get_private_key(name("alice"), "scud"); const auto scud_pub_key = scud_priv_key.get_public_key(); - chain.set_authority("alice", "spending", spending_pub_key, "active"); - chain.set_authority("alice", "scud", scud_pub_key, "spending"); + chain.set_authority(name("alice"), name("spending"), spending_pub_key, name("active")); + chain.set_authority(name("alice"), name("scud"), scud_pub_key, name("spending")); // Send req auth action with alice's spending key, it should fail - BOOST_CHECK_THROW(chain.push_reqauth("alice", { permission_level{N(alice), "spending"} }, { spending_priv_key }), irrelevant_auth_exception); + BOOST_CHECK_THROW(chain.push_reqauth(name("alice"), { permission_level{N(alice), name("spending")} }, { spending_priv_key }), irrelevant_auth_exception); // Link authority for eosio reqauth action with alice's spending key - chain.link_authority("alice", "eosio", "spending", "reqauth"); + chain.link_authority(name("alice"), name("eosio"), name("spending"), name("reqauth")); // Now, req auth action with alice's spending key should succeed - chain.push_reqauth("alice", { permission_level{N(alice), "spending"} }, { spending_priv_key }); + chain.push_reqauth(name("alice"), { permission_level{N(alice), name("spending")} }, { spending_priv_key }); chain.produce_block(); // Relink the same auth should fail - BOOST_CHECK_THROW( chain.link_authority("alice", "eosio", "spending", "reqauth"), action_validate_exception); + BOOST_CHECK_THROW( chain.link_authority(name("alice"), name("eosio"), name("spending"), name("reqauth")), action_validate_exception); // Unlink alice with eosio reqauth - chain.unlink_authority("alice", "eosio", "reqauth"); + chain.unlink_authority(name("alice"), name("eosio"), name("reqauth")); // Now, req auth action with alice's spending key should fail - BOOST_CHECK_THROW(chain.push_reqauth("alice", { permission_level{N(alice), "spending"} }, { spending_priv_key }), irrelevant_auth_exception); + BOOST_CHECK_THROW(chain.push_reqauth(name("alice"), { permission_level{N(alice), name("spending")} }, { spending_priv_key }), irrelevant_auth_exception); chain.produce_block(); // Send req auth action with scud key, it should fail - BOOST_CHECK_THROW(chain.push_reqauth("alice", { permission_level{N(alice), "scud"} }, { scud_priv_key }), irrelevant_auth_exception); + BOOST_CHECK_THROW(chain.push_reqauth(name("alice"), { permission_level{N(alice), name("scud")} }, { scud_priv_key }), irrelevant_auth_exception); // Link authority for any eosio action with alice's scud key - chain.link_authority("alice", "eosio", "scud"); + chain.link_authority(name("alice"), name("eosio"), name("scud")); // Now, req auth action with alice's scud key should succeed - chain.push_reqauth("alice", { permission_level{N(alice), "scud"} }, { scud_priv_key }); + chain.push_reqauth(name("alice"), { permission_level{N(alice), name("scud")} }, { scud_priv_key }); // req auth action with alice's spending key should also be fine, since it is the parent of alice's scud key - chain.push_reqauth("alice", { permission_level{N(alice), "spending"} }, { spending_priv_key }); + chain.push_reqauth(name("alice"), { permission_level{N(alice), name("spending")} }, { spending_priv_key }); } FC_LOG_AND_RETHROW() } BOOST_AUTO_TEST_CASE(link_then_update_auth) { try { TESTER chain; - chain.create_account("alice"); + chain.create_account(name("alice")); - const auto first_priv_key = chain.get_private_key("alice", "first"); + const auto first_priv_key = chain.get_private_key(name("alice"), "first"); const auto first_pub_key = first_priv_key.get_public_key(); - const auto second_priv_key = chain.get_private_key("alice", "second"); + const auto second_priv_key = chain.get_private_key(name("alice"), "second"); const auto second_pub_key = second_priv_key.get_public_key(); - chain.set_authority("alice", "first", first_pub_key, "active"); + chain.set_authority(name("alice"), name("first"), first_pub_key, name("active")); - chain.link_authority("alice", "eosio", "first", "reqauth"); - chain.push_reqauth("alice", { permission_level{N(alice), "first"} }, { first_priv_key }); + chain.link_authority(name("alice"), name("eosio"), name("first"), name("reqauth")); + chain.push_reqauth(name("alice"), { permission_level{N(alice), name("first")} }, { first_priv_key }); chain.produce_blocks(13); // Wait at least 6 seconds for first push_reqauth transaction to expire. // Update "first" auth public key - chain.set_authority("alice", "first", second_pub_key, "active"); + chain.set_authority(name("alice"), name("first"), second_pub_key, name("active")); // Authority updated, using previous "first" auth should fail on linked auth - BOOST_CHECK_THROW(chain.push_reqauth("alice", { permission_level{N(alice), "first"} }, { first_priv_key }), unsatisfied_authorization); + BOOST_CHECK_THROW(chain.push_reqauth(name("alice"), { permission_level{N(alice), name("first")} }, { first_priv_key }), unsatisfied_authorization); // Using updated authority, should succeed - chain.push_reqauth("alice", { permission_level{N(alice), "first"} }, { second_priv_key }); + chain.push_reqauth(name("alice"), { permission_level{N(alice), name("first")} }, { second_priv_key }); } FC_LOG_AND_RETHROW() } BOOST_AUTO_TEST_CASE(create_account) { try { TESTER chain; - chain.create_account("joe"); + chain.create_account(name("joe")); chain.produce_block(); // Verify account created properly - const auto& joe_owner_authority = chain.get(boost::make_tuple("joe", "owner")); + const auto& joe_owner_authority = chain.get(boost::make_tuple(name("joe"), name("owner"))); BOOST_TEST(joe_owner_authority.auth.threshold == 1u); BOOST_TEST(joe_owner_authority.auth.accounts.size() == 1u); BOOST_TEST(joe_owner_authority.auth.keys.size() == 1u); - BOOST_TEST(string(joe_owner_authority.auth.keys[0].key) == string(chain.get_public_key("joe", "owner"))); + BOOST_TEST(string(joe_owner_authority.auth.keys[0].key) == string(chain.get_public_key(name("joe"), "owner"))); BOOST_TEST(joe_owner_authority.auth.keys[0].weight == 1u); - const auto& joe_active_authority = chain.get(boost::make_tuple("joe", "active")); + const auto& joe_active_authority = chain.get(boost::make_tuple(name("joe"), name("active"))); BOOST_TEST(joe_active_authority.auth.threshold == 1u); BOOST_TEST(joe_active_authority.auth.accounts.size() == 1u); BOOST_TEST(joe_active_authority.auth.keys.size() == 1u); - BOOST_TEST(string(joe_active_authority.auth.keys[0].key) == string(chain.get_public_key("joe", "active"))); + BOOST_TEST(string(joe_active_authority.auth.keys[0].key) == string(chain.get_public_key(name("joe"), "active"))); BOOST_TEST(joe_active_authority.auth.keys[0].weight == 1u); // Create duplicate name - BOOST_CHECK_EXCEPTION(chain.create_account("joe"), action_validate_exception, + BOOST_CHECK_EXCEPTION(chain.create_account(name("joe")), action_validate_exception, fc_exception_message_is("Cannot create account named joe, as that name is already taken")); // Creating account with name more than 12 chars - BOOST_CHECK_EXCEPTION(chain.create_account("aaaaaaaaaaaaa"), action_validate_exception, + BOOST_CHECK_EXCEPTION(chain.create_account(name("aaaaaaaaaaaaa")), action_validate_exception, fc_exception_message_is("account names can only be 12 chars long")); // Creating account with eosio. prefix with privileged account - chain.create_account("eosio.test1"); + chain.create_account(name("eosio.test1")); // Creating account with eosio. prefix with non-privileged account, should fail - BOOST_CHECK_EXCEPTION(chain.create_account("eosio.test2", "joe"), action_validate_exception, + BOOST_CHECK_EXCEPTION(chain.create_account(name("eosio.test2"), name("joe")), action_validate_exception, fc_exception_message_is("only privileged accounts can have names that start with 'eosio.'")); } FC_LOG_AND_RETHROW() } BOOST_AUTO_TEST_CASE( any_auth ) { try { TESTER chain; - chain.create_accounts( {"alice","bob"} ); + chain.create_accounts( {name("alice"), name("bob")} ); chain.produce_block(); - const auto spending_priv_key = chain.get_private_key("alice", "spending"); + const auto spending_priv_key = chain.get_private_key(name("alice"), "spending"); const auto spending_pub_key = spending_priv_key.get_public_key(); - const auto bob_spending_priv_key = chain.get_private_key("bob", "spending"); + const auto bob_spending_priv_key = chain.get_private_key(name("bob"), "spending"); const auto bob_spending_pub_key = spending_priv_key.get_public_key(); - chain.set_authority("alice", "spending", spending_pub_key, "active"); - chain.set_authority("bob", "spending", bob_spending_pub_key, "active"); + chain.set_authority(name("alice"), name("spending"), spending_pub_key, name("active")); + chain.set_authority(name("bob"), name("spending"), bob_spending_pub_key, name("active")); /// this should fail because spending is not active which is default for reqauth - BOOST_REQUIRE_THROW( chain.push_reqauth("alice", { permission_level{N(alice), "spending"} }, { spending_priv_key }), + BOOST_REQUIRE_THROW( chain.push_reqauth(name("alice"), { permission_level{N(alice), name("spending")} }, { spending_priv_key }), irrelevant_auth_exception ); chain.produce_block(); //test.push_reqauth( N(alice), { permission_level{N(alice),"spending"} }, { spending_priv_key }); - chain.link_authority( "alice", "eosio", "eosio.any", "reqauth" ); - chain.link_authority( "bob", "eosio", "eosio.any", "reqauth" ); + chain.link_authority( name("alice"), name("eosio"), name("eosio.any"), name("reqauth") ); + chain.link_authority( name("bob"), name("eosio"), name("eosio.any"), name("reqauth") ); /// this should succeed because eosio::reqauth is linked to any permission - chain.push_reqauth("alice", { permission_level{N(alice), "spending"} }, { spending_priv_key }); + chain.push_reqauth(name("alice"), { permission_level{N(alice), name("spending")} }, { spending_priv_key }); /// this should fail because bob cannot authorize for alice, the permission given must be one-of alices - BOOST_REQUIRE_THROW( chain.push_reqauth("alice", { permission_level{N(bob), "spending"} }, { spending_priv_key }), + BOOST_REQUIRE_THROW( chain.push_reqauth(name("alice"), { permission_level{N(bob), name("spending")} }, { spending_priv_key }), missing_auth_exception ); @@ -370,7 +372,8 @@ BOOST_AUTO_TEST_CASE( any_auth ) { try { BOOST_AUTO_TEST_CASE(no_double_billing) { try { - validating_tester chain( validating_tester::default_config() ); + fc::temp_directory tempdir; + validating_tester chain( tempdir, true ); chain.execute_setup_policy( setup_policy::preactivate_feature_and_new_bios ); chain.produce_block(); @@ -395,9 +398,9 @@ try { authority owner_auth = authority( chain.get_public_key( a, "owner" ) ); - vector pls = {{acc1, "active"}}; - pls.push_back({acc1, "owner"}); // same account but different permission names - pls.push_back({acc1a, "owner"}); + vector pls = {{acc1, name("active")}}; + pls.push_back({acc1, name("owner")}); // same account but different permission names + pls.push_back({acc1a, name("owner")}); trx.actions.emplace_back( pls, newaccount{ .creator = acc1, @@ -449,7 +452,7 @@ try { authority invalid_auth = authority(threshold, {key_weight{chain.get_public_key( a, "owner" ), 1}}, {permission_level_weight{{creator, config::active_name}, 1}}); vector pls; - pls.push_back({creator, "active"}); + pls.push_back({creator, name("active")}); trx.actions.emplace_back( pls, newaccount{ .creator = creator, diff --git a/unittests/block_tests.cpp b/unittests/block_tests.cpp index fab79a306a1..023907ce3e6 100644 --- a/unittests/block_tests.cpp +++ b/unittests/block_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include @@ -43,7 +39,7 @@ BOOST_AUTO_TEST_CASE(block_with_invalid_tx_test) tester validator; auto bs = validator.control->create_block_state_future( copy_b ); validator.control->abort_block(); - BOOST_REQUIRE_EXCEPTION(validator.control->push_block( bs ), fc::exception , + BOOST_REQUIRE_EXCEPTION(validator.control->push_block( bs, forked_branch_callback{}, trx_meta_cache_lookup{} ), fc::exception , [] (const fc::exception &e)->bool { return e.code() == account_name_exists_exception::code_value ; }) ; @@ -188,7 +184,94 @@ BOOST_AUTO_TEST_CASE(broadcasted_block_test) bytes bcasted_blk_by_prod_node_packed = fc::raw::pack(*bcasted_blk_by_prod_node); bytes bcasted_blk_by_recv_node_packed = fc::raw::pack(*bcasted_blk_by_recv_node); BOOST_CHECK(std::equal(bcasted_blk_by_prod_node_packed.begin(), bcasted_blk_by_prod_node_packed.end(), bcasted_blk_by_recv_node_packed.begin())); - } +/** + * Verify abort block returns applied transactions in block + */ +BOOST_FIXTURE_TEST_CASE( abort_block_transactions, validating_tester) { try { + + produce_blocks(2); + signed_transaction trx; + + account_name a = N(newco); + account_name creator = config::system_account_name; + + // account does not exist before test + BOOST_REQUIRE_EXCEPTION(control->get_account( a ), fc::exception, + [a] (const fc::exception& e)->bool { + return std::string( e.what() ).find( a.to_string() ) != std::string::npos; + }) ; + + auto owner_auth = authority( get_public_key( a, "owner" ) ); + trx.actions.emplace_back( vector{{creator,config::active_name}}, + newaccount{ + .creator = creator, + .name = a, + .owner = owner_auth, + .active = authority( get_public_key( a, "active" ) ) + }); + set_transaction_headers(trx); + trx.sign( get_private_key( creator, "active" ), control->get_chain_id() ); + auto trace = push_transaction( trx ); + + control->get_account( a ); // throws if it does not exist + + vector unapplied_trxs = control->abort_block(); + + // verify transaction returned from abort_block() + BOOST_REQUIRE_EQUAL( 1, unapplied_trxs.size() ); + BOOST_REQUIRE_EQUAL( trx.id(), unapplied_trxs.at(0)->id() ); + + // account does not exist block was aborted which had transaction + BOOST_REQUIRE_EXCEPTION(control->get_account( a ), fc::exception, + [a] (const fc::exception& e)->bool { + return std::string( e.what() ).find( a.to_string() ) != std::string::npos; + }) ; + + produce_blocks(1); + + } FC_LOG_AND_RETHROW() } + +/** + * Verify abort block returns applied transactions in block + */ +BOOST_FIXTURE_TEST_CASE( abort_block_transactions_tester, validating_tester) { try { + + produce_blocks(2); + signed_transaction trx; + + account_name a = N(newco); + account_name creator = config::system_account_name; + + // account does not exist before test + BOOST_REQUIRE_EXCEPTION(control->get_account( a ), fc::exception, + [a] (const fc::exception& e)->bool { + return std::string( e.what() ).find( a.to_string() ) != std::string::npos; + }) ; + + auto owner_auth = authority( get_public_key( a, "owner" ) ); + trx.actions.emplace_back( vector{{creator,config::active_name}}, + newaccount{ + .creator = creator, + .name = a, + .owner = owner_auth, + .active = authority( get_public_key( a, "active" ) ) + }); + set_transaction_headers(trx); + trx.sign( get_private_key( creator, "active" ), control->get_chain_id() ); + auto trace = push_transaction( trx ); + + control->get_account( a ); // throws if it does not exist + + produce_block( fc::milliseconds(config::block_interval_ms*2) ); // aborts block, tester should reapply trx + + control->get_account( a ); // throws if it does not exist + + vector unapplied_trxs = control->abort_block(); // should be empty now + + BOOST_REQUIRE_EQUAL( 0, unapplied_trxs.size() ); + + } FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/block_timestamp_tests.cpp b/unittests/block_timestamp_tests.cpp index 81e513189f9..71ea0105f32 100644 --- a/unittests/block_timestamp_tests.cpp +++ b/unittests/block_timestamp_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include diff --git a/unittests/bootseq_tests.cpp b/unittests/bootseq_tests.cpp index 561a04622cc..5673db3e936 100644 --- a/unittests/bootseq_tests.cpp +++ b/unittests/bootseq_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ #include #include @@ -286,7 +282,7 @@ BOOST_FIXTURE_TEST_CASE( bootseq_test, bootseq_tester ) { produce_blocks_for_n_rounds(2); // 2 rounds since new producer schedule is set when the first block of next round is irreversible auto active_schedule = control->head_block_state()->active_schedule; BOOST_TEST(active_schedule.producers.size() == 1u); - BOOST_TEST(active_schedule.producers.front().producer_name == "eosio"); + BOOST_TEST(active_schedule.producers.front().producer_name == name("eosio")); // Spend some time so the producer pay pool is filled by the inflation rate produce_min_num_of_blocks_to_spend_time_wo_inactive_prod(fc::seconds(30 * 24 * 3600)); // 30 days @@ -301,27 +297,27 @@ BOOST_FIXTURE_TEST_CASE( bootseq_test, bootseq_tester ) { produce_blocks_for_n_rounds(2); // 2 rounds since new producer schedule is set when the first block of next round is irreversible active_schedule = control->head_block_state()->active_schedule; BOOST_REQUIRE(active_schedule.producers.size() == 21); - BOOST_TEST(active_schedule.producers.at(0).producer_name == "proda"); - BOOST_TEST(active_schedule.producers.at(1).producer_name == "prodb"); - BOOST_TEST(active_schedule.producers.at(2).producer_name == "prodc"); - BOOST_TEST(active_schedule.producers.at(3).producer_name == "prodd"); - BOOST_TEST(active_schedule.producers.at(4).producer_name == "prode"); - BOOST_TEST(active_schedule.producers.at(5).producer_name == "prodf"); - BOOST_TEST(active_schedule.producers.at(6).producer_name == "prodg"); - BOOST_TEST(active_schedule.producers.at(7).producer_name == "prodh"); - BOOST_TEST(active_schedule.producers.at(8).producer_name == "prodi"); - BOOST_TEST(active_schedule.producers.at(9).producer_name == "prodj"); - BOOST_TEST(active_schedule.producers.at(10).producer_name == "prodk"); - BOOST_TEST(active_schedule.producers.at(11).producer_name == "prodl"); - BOOST_TEST(active_schedule.producers.at(12).producer_name == "prodm"); - BOOST_TEST(active_schedule.producers.at(13).producer_name == "prodn"); - BOOST_TEST(active_schedule.producers.at(14).producer_name == "prodo"); - BOOST_TEST(active_schedule.producers.at(15).producer_name == "prodp"); - BOOST_TEST(active_schedule.producers.at(16).producer_name == "prodq"); - BOOST_TEST(active_schedule.producers.at(17).producer_name == "prodr"); - BOOST_TEST(active_schedule.producers.at(18).producer_name == "prods"); - BOOST_TEST(active_schedule.producers.at(19).producer_name == "prodt"); - BOOST_TEST(active_schedule.producers.at(20).producer_name == "produ"); + BOOST_TEST(active_schedule.producers.at( 0).producer_name == name("proda")); + BOOST_TEST(active_schedule.producers.at( 1).producer_name == name("prodb")); + BOOST_TEST(active_schedule.producers.at( 2).producer_name == name("prodc")); + BOOST_TEST(active_schedule.producers.at( 3).producer_name == name("prodd")); + BOOST_TEST(active_schedule.producers.at( 4).producer_name == name("prode")); + BOOST_TEST(active_schedule.producers.at( 5).producer_name == name("prodf")); + BOOST_TEST(active_schedule.producers.at( 6).producer_name == name("prodg")); + BOOST_TEST(active_schedule.producers.at( 7).producer_name == name("prodh")); + BOOST_TEST(active_schedule.producers.at( 8).producer_name == name("prodi")); + BOOST_TEST(active_schedule.producers.at( 9).producer_name == name("prodj")); + BOOST_TEST(active_schedule.producers.at(10).producer_name == name("prodk")); + BOOST_TEST(active_schedule.producers.at(11).producer_name == name("prodl")); + BOOST_TEST(active_schedule.producers.at(12).producer_name == name("prodm")); + BOOST_TEST(active_schedule.producers.at(13).producer_name == name("prodn")); + BOOST_TEST(active_schedule.producers.at(14).producer_name == name("prodo")); + BOOST_TEST(active_schedule.producers.at(15).producer_name == name("prodp")); + BOOST_TEST(active_schedule.producers.at(16).producer_name == name("prodq")); + BOOST_TEST(active_schedule.producers.at(17).producer_name == name("prodr")); + BOOST_TEST(active_schedule.producers.at(18).producer_name == name("prods")); + BOOST_TEST(active_schedule.producers.at(19).producer_name == name("prodt")); + BOOST_TEST(active_schedule.producers.at(20).producer_name == name("produ")); // Spend some time so the producer pay pool is filled by the inflation rate produce_min_num_of_blocks_to_spend_time_wo_inactive_prod(fc::seconds(30 * 24 * 3600)); // 30 days diff --git a/unittests/contracts.hpp.in b/unittests/contracts.hpp.in index efafa1846c8..9b7d50114dc 100644 --- a/unittests/contracts.hpp.in +++ b/unittests/contracts.hpp.in @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ #pragma once #include @@ -36,12 +32,14 @@ namespace eosio { MAKE_READ_WASM_ABI(eosio_token, eosio.token, contracts) MAKE_READ_WASM_ABI(eosio_wrap, eosio.wrap, contracts) - MAKE_READ_WASM_ABI(before_preactivate_eosio_bios, eosio.bios, contracts/old_versions/v1.6.0-rc3) + MAKE_READ_WASM_ABI(before_producer_authority_eosio_bios, eosio.bios, contracts/old_versions/v1.7.0-develop-preactivate_feature) + MAKE_READ_WASM_ABI(before_preactivate_eosio_bios, eosio.bios, contracts/old_versions/v1.6.0-rc3) // Contracts in `eos/unittests/unittests/test-contracts' directory MAKE_READ_WASM_ABI(asserter, asserter, test-contracts) MAKE_READ_WASM_ABI(deferred_test, deferred_test, test-contracts) MAKE_READ_WASM_ABI(get_sender_test, get_sender_test, test-contracts) + MAKE_READ_WASM_ABI(get_table_test, get_table_test, test-contracts) MAKE_READ_WASM_ABI(noop, noop, test-contracts) MAKE_READ_WASM_ABI(payloadless, payloadless, test-contracts) MAKE_READ_WASM_ABI(proxy, proxy, test-contracts) diff --git a/unittests/contracts/CMakeLists.txt b/unittests/contracts/CMakeLists.txt index f64c79de062..1c1c3003901 100644 --- a/unittests/contracts/CMakeLists.txt +++ b/unittests/contracts/CMakeLists.txt @@ -8,3 +8,4 @@ file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/eosio.token/ DESTINATION ${CMAKE_CURRENT_B file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/eosio.wrap/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/eosio.wrap/) file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/old_versions/v1.6.0-rc3/eosio.bios/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/old_versions/v1.6.0-rc3/eosio.bios/) +file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/old_versions/v1.7.0-develop-preactivate_feature/eosio.bios/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/old_versions/v1.7.0-develop-preactivate_feature/eosio.bios/) diff --git a/unittests/contracts/eosio.bios/eosio.bios.abi b/unittests/contracts/eosio.bios/eosio.bios.abi index 01f62c976f5..9c9a46eda6f 100644 --- a/unittests/contracts/eosio.bios/eosio.bios.abi +++ b/unittests/contracts/eosio.bios/eosio.bios.abi @@ -1,7 +1,12 @@ { "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", "version": "eosio::abi/1.1", - "types": [], + "types": [ + { + "new_type_name": "block_signing_authority", + "type": "variant_block_signing_authority_v0" + } + ], "structs": [ { "name": "abi_hash", @@ -49,6 +54,20 @@ } ] }, + { + "name": "block_signing_authority_v0", + "base": "", + "fields": [ + { + "name": "threshold", + "type": "uint32" + }, + { + "name": "keys", + "type": "key_weight[]" + } + ] + }, { "name": "blockchain_parameters", "base": "", @@ -252,7 +271,7 @@ ] }, { - "name": "producer_key", + "name": "producer_authority", "base": "", "fields": [ { @@ -260,8 +279,8 @@ "type": "name" }, { - "name": "block_signing_key", - "type": "public_key" + "name": "authority", + "type": "block_signing_authority" } ] }, @@ -391,7 +410,7 @@ "fields": [ { "name": "schedule", - "type": "producer_key[]" + "type": "producer_authority[]" } ] }, @@ -454,87 +473,87 @@ { "name": "activate", "type": "activate", - "ricardian_contract": "" + "ricardian_contract": "---\ntitle: TITLE\nsummary: SUMMARY\nicon: ICON\n---\n\nBODY" }, { "name": "canceldelay", "type": "canceldelay", - "ricardian_contract": "" + "ricardian_contract": "---\ntitle: TITLE\nsummary: SUMMARY\nicon: ICON\n---\n\nBODY" }, { "name": "deleteauth", "type": "deleteauth", - "ricardian_contract": "" + "ricardian_contract": "---\ntitle: TITLE\nsummary: SUMMARY\nicon: ICON\n---\n\nBODY" }, { "name": "linkauth", "type": "linkauth", - "ricardian_contract": "" + "ricardian_contract": "---\ntitle: TITLE\nsummary: SUMMARY\nicon: ICON\n---\n\nBODY" }, { "name": "newaccount", "type": "newaccount", - "ricardian_contract": "" + "ricardian_contract": "---\ntitle: TITLE\nsummary: SUMMARY\nicon: ICON\n---\n\nBODY" }, { "name": "onerror", "type": "onerror", - "ricardian_contract": "" + "ricardian_contract": "---\ntitle: TITLE\nsummary: SUMMARY\nicon: ICON\n---\n\nBODY" }, { "name": "reqactivated", "type": "reqactivated", - "ricardian_contract": "" + "ricardian_contract": "---\ntitle: TITLE\nsummary: SUMMARY\nicon: ICON\n---\n\nBODY" }, { "name": "reqauth", "type": "reqauth", - "ricardian_contract": "" + "ricardian_contract": "---\ntitle: TITLE\nsummary: SUMMARY\nicon: ICON\n---\n\nBODY" }, { "name": "setabi", "type": "setabi", - "ricardian_contract": "" + "ricardian_contract": "---\ntitle: TITLE\nsummary: SUMMARY\nicon: ICON\n---\n\nBODY" }, { "name": "setalimits", "type": "setalimits", - "ricardian_contract": "" + "ricardian_contract": "---\ntitle: TITLE\nsummary: SUMMARY\nicon: ICON\n---\n\nBODY" }, { "name": "setcode", "type": "setcode", - "ricardian_contract": "" + "ricardian_contract": "---\ntitle: TITLE\nsummary: SUMMARY\nicon: ICON\n---\n\nBODY" }, { "name": "setglimits", "type": "setglimits", - "ricardian_contract": "" + "ricardian_contract": "---\ntitle: TITLE\nsummary: SUMMARY\nicon: ICON\n---\n\nBODY" }, { "name": "setparams", "type": "setparams", - "ricardian_contract": "" + "ricardian_contract": "---\ntitle: TITLE\nsummary: SUMMARY\nicon: ICON\n---\n\nBODY" }, { "name": "setpriv", "type": "setpriv", - "ricardian_contract": "" + "ricardian_contract": "---\ntitle: TITLE\nsummary: SUMMARY\nicon: ICON\n---\n\nBODY" }, { "name": "setprods", "type": "setprods", - "ricardian_contract": "" + "ricardian_contract": "---\ntitle: TITLE\nsummary: SUMMARY\nicon: ICON\n---\n\nBODY" }, { "name": "unlinkauth", "type": "unlinkauth", - "ricardian_contract": "" + "ricardian_contract": "---\ntitle: TITLE\nsummary: SUMMARY\nicon: ICON\n---\n\nBODY" }, { "name": "updateauth", "type": "updateauth", - "ricardian_contract": "" + "ricardian_contract": "---\ntitle: TITLE\nsummary: SUMMARY\nicon: ICON\n---\n\nBODY" } ], "tables": [ @@ -547,5 +566,10 @@ } ], "ricardian_clauses": [], - "variants": [] + "variants": [ + { + "name": "variant_block_signing_authority_v0", + "types": ["block_signing_authority_v0"] + } + ] } \ No newline at end of file diff --git a/unittests/contracts/eosio.bios/eosio.bios.wasm b/unittests/contracts/eosio.bios/eosio.bios.wasm index 968bd1529dc..e5950df279b 100755 Binary files a/unittests/contracts/eosio.bios/eosio.bios.wasm and b/unittests/contracts/eosio.bios/eosio.bios.wasm differ diff --git a/unittests/contracts/old_versions/v1.7.0-develop-preactivate_feature/eosio.bios/eosio.bios.abi b/unittests/contracts/old_versions/v1.7.0-develop-preactivate_feature/eosio.bios/eosio.bios.abi new file mode 100644 index 00000000000..01f62c976f5 --- /dev/null +++ b/unittests/contracts/old_versions/v1.7.0-develop-preactivate_feature/eosio.bios/eosio.bios.abi @@ -0,0 +1,551 @@ +{ + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", + "version": "eosio::abi/1.1", + "types": [], + "structs": [ + { + "name": "abi_hash", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "hash", + "type": "checksum256" + } + ] + }, + { + "name": "activate", + "base": "", + "fields": [ + { + "name": "feature_digest", + "type": "checksum256" + } + ] + }, + { + "name": "authority", + "base": "", + "fields": [ + { + "name": "threshold", + "type": "uint32" + }, + { + "name": "keys", + "type": "key_weight[]" + }, + { + "name": "accounts", + "type": "permission_level_weight[]" + }, + { + "name": "waits", + "type": "wait_weight[]" + } + ] + }, + { + "name": "blockchain_parameters", + "base": "", + "fields": [ + { + "name": "max_block_net_usage", + "type": "uint64" + }, + { + "name": "target_block_net_usage_pct", + "type": "uint32" + }, + { + "name": "max_transaction_net_usage", + "type": "uint32" + }, + { + "name": "base_per_transaction_net_usage", + "type": "uint32" + }, + { + "name": "net_usage_leeway", + "type": "uint32" + }, + { + "name": "context_free_discount_net_usage_num", + "type": "uint32" + }, + { + "name": "context_free_discount_net_usage_den", + "type": "uint32" + }, + { + "name": "max_block_cpu_usage", + "type": "uint32" + }, + { + "name": "target_block_cpu_usage_pct", + "type": "uint32" + }, + { + "name": "max_transaction_cpu_usage", + "type": "uint32" + }, + { + "name": "min_transaction_cpu_usage", + "type": "uint32" + }, + { + "name": "max_transaction_lifetime", + "type": "uint32" + }, + { + "name": "deferred_trx_expiration_window", + "type": "uint32" + }, + { + "name": "max_transaction_delay", + "type": "uint32" + }, + { + "name": "max_inline_action_size", + "type": "uint32" + }, + { + "name": "max_inline_action_depth", + "type": "uint16" + }, + { + "name": "max_authority_depth", + "type": "uint16" + } + ] + }, + { + "name": "canceldelay", + "base": "", + "fields": [ + { + "name": "canceling_auth", + "type": "permission_level" + }, + { + "name": "trx_id", + "type": "checksum256" + } + ] + }, + { + "name": "deleteauth", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "permission", + "type": "name" + } + ] + }, + { + "name": "key_weight", + "base": "", + "fields": [ + { + "name": "key", + "type": "public_key" + }, + { + "name": "weight", + "type": "uint16" + } + ] + }, + { + "name": "linkauth", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "code", + "type": "name" + }, + { + "name": "type", + "type": "name" + }, + { + "name": "requirement", + "type": "name" + } + ] + }, + { + "name": "newaccount", + "base": "", + "fields": [ + { + "name": "creator", + "type": "name" + }, + { + "name": "name", + "type": "name" + }, + { + "name": "owner", + "type": "authority" + }, + { + "name": "active", + "type": "authority" + } + ] + }, + { + "name": "onerror", + "base": "", + "fields": [ + { + "name": "sender_id", + "type": "uint128" + }, + { + "name": "sent_trx", + "type": "bytes" + } + ] + }, + { + "name": "permission_level", + "base": "", + "fields": [ + { + "name": "actor", + "type": "name" + }, + { + "name": "permission", + "type": "name" + } + ] + }, + { + "name": "permission_level_weight", + "base": "", + "fields": [ + { + "name": "permission", + "type": "permission_level" + }, + { + "name": "weight", + "type": "uint16" + } + ] + }, + { + "name": "producer_key", + "base": "", + "fields": [ + { + "name": "producer_name", + "type": "name" + }, + { + "name": "block_signing_key", + "type": "public_key" + } + ] + }, + { + "name": "reqactivated", + "base": "", + "fields": [ + { + "name": "feature_digest", + "type": "checksum256" + } + ] + }, + { + "name": "reqauth", + "base": "", + "fields": [ + { + "name": "from", + "type": "name" + } + ] + }, + { + "name": "setabi", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "abi", + "type": "bytes" + } + ] + }, + { + "name": "setalimits", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "ram_bytes", + "type": "int64" + }, + { + "name": "net_weight", + "type": "int64" + }, + { + "name": "cpu_weight", + "type": "int64" + } + ] + }, + { + "name": "setcode", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "vmtype", + "type": "uint8" + }, + { + "name": "vmversion", + "type": "uint8" + }, + { + "name": "code", + "type": "bytes" + } + ] + }, + { + "name": "setglimits", + "base": "", + "fields": [ + { + "name": "ram", + "type": "uint64" + }, + { + "name": "net", + "type": "uint64" + }, + { + "name": "cpu", + "type": "uint64" + } + ] + }, + { + "name": "setparams", + "base": "", + "fields": [ + { + "name": "params", + "type": "blockchain_parameters" + } + ] + }, + { + "name": "setpriv", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "is_priv", + "type": "uint8" + } + ] + }, + { + "name": "setprods", + "base": "", + "fields": [ + { + "name": "schedule", + "type": "producer_key[]" + } + ] + }, + { + "name": "unlinkauth", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "code", + "type": "name" + }, + { + "name": "type", + "type": "name" + } + ] + }, + { + "name": "updateauth", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "permission", + "type": "name" + }, + { + "name": "parent", + "type": "name" + }, + { + "name": "auth", + "type": "authority" + } + ] + }, + { + "name": "wait_weight", + "base": "", + "fields": [ + { + "name": "wait_sec", + "type": "uint32" + }, + { + "name": "weight", + "type": "uint16" + } + ] + } + ], + "actions": [ + { + "name": "activate", + "type": "activate", + "ricardian_contract": "" + }, + { + "name": "canceldelay", + "type": "canceldelay", + "ricardian_contract": "" + }, + { + "name": "deleteauth", + "type": "deleteauth", + "ricardian_contract": "" + }, + { + "name": "linkauth", + "type": "linkauth", + "ricardian_contract": "" + }, + { + "name": "newaccount", + "type": "newaccount", + "ricardian_contract": "" + }, + { + "name": "onerror", + "type": "onerror", + "ricardian_contract": "" + }, + { + "name": "reqactivated", + "type": "reqactivated", + "ricardian_contract": "" + }, + { + "name": "reqauth", + "type": "reqauth", + "ricardian_contract": "" + }, + { + "name": "setabi", + "type": "setabi", + "ricardian_contract": "" + }, + { + "name": "setalimits", + "type": "setalimits", + "ricardian_contract": "" + }, + { + "name": "setcode", + "type": "setcode", + "ricardian_contract": "" + }, + { + "name": "setglimits", + "type": "setglimits", + "ricardian_contract": "" + }, + { + "name": "setparams", + "type": "setparams", + "ricardian_contract": "" + }, + { + "name": "setpriv", + "type": "setpriv", + "ricardian_contract": "" + }, + { + "name": "setprods", + "type": "setprods", + "ricardian_contract": "" + }, + { + "name": "unlinkauth", + "type": "unlinkauth", + "ricardian_contract": "" + }, + { + "name": "updateauth", + "type": "updateauth", + "ricardian_contract": "" + } + ], + "tables": [ + { + "name": "abihash", + "type": "abi_hash", + "index_type": "i64", + "key_names": [], + "key_types": [] + } + ], + "ricardian_clauses": [], + "variants": [] +} \ No newline at end of file diff --git a/unittests/contracts/old_versions/v1.7.0-develop-preactivate_feature/eosio.bios/eosio.bios.wasm b/unittests/contracts/old_versions/v1.7.0-develop-preactivate_feature/eosio.bios/eosio.bios.wasm new file mode 100755 index 00000000000..968bd1529dc Binary files /dev/null and b/unittests/contracts/old_versions/v1.7.0-develop-preactivate_feature/eosio.bios/eosio.bios.wasm differ diff --git a/unittests/contracts/test_wasts.hpp b/unittests/contracts/test_wasts.hpp index b1272a7afef..935874bbba5 100644 --- a/unittests/contracts/test_wasts.hpp +++ b/unittests/contracts/test_wasts.hpp @@ -179,6 +179,15 @@ static const char entry_wast_2[] = R"=====( ) )====="; +static const char entry_import_wast[] = R"=====( +(module + (import "env" "abort" (func $abort)) + (export "apply" (func $apply)) + (start $abort) + (func $apply (param $0 i64) (param $1 i64) (param $2 i64)) +) +)====="; + static const char simple_no_memory_wast[] = R"=====( (module (import "env" "require_auth" (func $require_auth (param i64))) @@ -430,6 +439,38 @@ static const char table_checker_small_wast[] = R"=====( ) )====="; +static const char table_init_oob_wast[] = R"=====( +(module + (type $mahsig (func (param i64) (param i64) (param i64))) + (table 1024 anyfunc) + (export "apply" (func $apply)) + (func $apply (param $0 i64) (param $1 i64) (param $2 i64) + ) + (elem (i32.const 1024) $apply) +) +)====="; + +static const char table_init_oob_smaller_wast[] = R"=====( +(module + (type $mahsig (func (param i64) (param i64) (param i64))) + (table 620 anyfunc) + (export "apply" (func $apply)) + (func $apply (param $0 i64) (param $1 i64) (param $2 i64) + ) + (elem (i32.const 700) $apply) +) +)====="; + +static const char table_init_oob_no_table_wast[] = R"=====( +(module + (type $mahsig (func (param i64) (param i64) (param i64))) + (export "apply" (func $apply)) + (func $apply (param $0 i64) (param $1 i64) (param $2 i64) + ) + (elem (i32.const 0) $apply) +) +)====="; + static const char global_protection_none_get_wast[] = R"=====( (module (export "apply" (func $apply)) @@ -646,4 +687,91 @@ static const char large_maligned_host_ptr[] = R"=====( )) ) ) -)====="; \ No newline at end of file +)====="; + +static const char depth_assert_wasm[] = R"=====( +(module + (export "apply" (func $$apply)) + (func $$apply (param $$0 i64) (param $$1 i64) (param $$2 i64) + (if (i64.eq (get_global $$depth) (i64.const 0)) (then + (return) + )) + (set_global $$depth + (i64.sub + (get_global $$depth) + (i64.const 1) + ) + ) + (call $$apply + (get_local $$0) + (get_local $$1) + (get_local $$2) + ) + ) + (global $$depth (mut i64) (i64.const ${MAX_DEPTH})) +) +)====="; + +static const char depth_assert_intrinsic[] = R"=====( +(module + (import "env" "current_time" (func $$current_time (result i64))) + (export "apply" (func $$apply)) + (func $$apply (param $$0 i64) (param $$1 i64) (param $$2 i64) + (if (i64.eq (get_global $$depth) (i64.const 1)) (then + (drop (call $$current_time)) + (return) + )) + (set_global $$depth + (i64.sub + (get_global $$depth) + (i64.const 1) + ) + ) + (call $$apply + (get_local $$0) + (get_local $$1) + (get_local $$2) + ) + ) + (global $$depth (mut i64) (i64.const ${MAX_DEPTH})) +) +)====="; + +static const char depth_assert_wasm_float[] = R"=====( +(module + (export "apply" (func $$apply)) + (func $$apply (param $$0 i64) (param $$1 i64) (param $$2 i64) + (set_global $$mcfloaty + (f64.mul + (get_global $$mcfloaty) + (f64.const 3.1415) + ) + ) + (if (i64.eq (get_global $$depth) (i64.const 0)) (then + (return) + )) + (set_global $$depth + (i64.sub + (get_global $$depth) + (i64.const 1) + ) + ) + (call $$apply + (get_local $$0) + (get_local $$1) + (get_local $$2) + ) + ) + (global $$depth (mut i64) (i64.const ${MAX_DEPTH})) + (global $$mcfloaty (mut f64) (f64.const 3.14)) +) +)====="; + +static const std::vector varuint_memory_flags{ + 0x00, 'a', 's', 'm', 0x01, 0x00, 0x00, 0x00, + 0x01, 0x07, 0x01, 0x60, 0x03, 0x7e, 0x7e, 0x7e, 0x00, // types + 0x03, 0x02, 0x01, 0x00, // functions + 0x04, 0x08, 0x01, 0x70, 0x80, 0x02, 0x80, 0x80, 0x80, 0x00, // memory with flags varuint(0x80 0x02) -> 0x2 + 0x07, 0x09, 0x01, 0x05, 'a', 'p', 'p', 'l', 'y', 0x00, 0x00, // exports + 0x0a, 0x04, 0x01, 0x02, 0x00, 0x0b // code +}; diff --git a/unittests/currency_tests.cpp b/unittests/currency_tests.cpp index 0485d0a4681..7bad1b92194 100644 --- a/unittests/currency_tests.cpp +++ b/unittests/currency_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wsign-compare" #include @@ -99,10 +95,10 @@ class currency_tester : public TESTER { } abi_serializer abi_ser; - static const std::string eosio_token; + static const name eosio_token; }; -const std::string currency_tester::eosio_token = name(N(eosio.token)).to_string(); +const name currency_tester::eosio_token = N(eosio.token); BOOST_AUTO_TEST_SUITE(currency_tests) diff --git a/unittests/database_tests.cpp b/unittests/database_tests.cpp index bd00e7b60eb..e2d047d4e88 100644 --- a/unittests/database_tests.cpp +++ b/unittests/database_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include @@ -33,18 +29,18 @@ BOOST_AUTO_TEST_SUITE(database_tests) // Create an account db.create([](account_object &a) { - a.name = "billy"; + a.name = name("billy"); }); // Make sure we can retrieve that account by name - auto ptr = db.find("billy"); + auto ptr = db.find(name("billy")); BOOST_TEST(ptr != nullptr); // Undo creation of the account ses.undo(); // Make sure we can no longer find the account - ptr = db.find("billy"); + ptr = db.find(name("billy")); BOOST_TEST(ptr == nullptr); } FC_LOG_AND_RETHROW() } diff --git a/unittests/delay_tests.cpp b/unittests/delay_tests.cpp index 2d6f77c7ee9..8d5874c7a8f 100644 --- a/unittests/delay_tests.cpp +++ b/unittests/delay_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ #include #include #include diff --git a/unittests/eosio.token_tests.cpp b/unittests/eosio.token_tests.cpp index 41185faae75..beee6506c26 100644 --- a/unittests/eosio.token_tests.cpp +++ b/unittests/eosio.token_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ #include #include @@ -50,14 +46,14 @@ class eosio_token_tester : public tester { act.name = name; act.data = abi_ser.variant_to_binary( action_type_name, data, abi_serializer_max_time ); - return base_tester::push_action( std::move(act), uint64_t(signer)); + return base_tester::push_action( std::move(act), signer.to_uint64_t() ); } fc::variant get_stats( const string& symbolname ) { auto symb = eosio::chain::symbol::from_string(symbolname); auto symbol_code = symb.to_symbol_code().value; - vector data = get_row_by_account( N(eosio.token), symbol_code, N(stat), symbol_code ); + vector data = get_row_by_account( N(eosio.token), name(symbol_code), N(stat), name(symbol_code) ); return data.empty() ? fc::variant() : abi_ser.binary_to_variant( "currency_stats", data, abi_serializer_max_time ); } @@ -65,7 +61,7 @@ class eosio_token_tester : public tester { { auto symb = eosio::chain::symbol::from_string(symbolname); auto symbol_code = symb.to_symbol_code().value; - vector data = get_row_by_account( N(eosio.token), acc, N(accounts), symbol_code ); + vector data = get_row_by_account( N(eosio.token), acc, N(accounts), name(symbol_code) ); return data.empty() ? fc::variant() : abi_ser.binary_to_variant( "account", data, abi_serializer_max_time ); } diff --git a/unittests/eosio_system_tester.hpp b/unittests/eosio_system_tester.hpp index 8c06f187910..44c06f18332 100644 --- a/unittests/eosio_system_tester.hpp +++ b/unittests/eosio_system_tester.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include @@ -56,7 +52,7 @@ class eosio_system_tester : public TESTER { create_currency( N(eosio.token), config::system_account_name, core_from_string("10000000000.0000") ); issue(config::system_account_name, core_from_string("1000000000.0000")); - BOOST_REQUIRE_EQUAL( core_from_string("1000000000.0000"), get_balance( "eosio" ) ); + BOOST_REQUIRE_EQUAL( core_from_string("1000000000.0000"), get_balance( name("eosio") ) ); set_code( config::system_account_name, contracts::eosio_system_wasm() ); set_abi( config::system_account_name, contracts::eosio_system_abi().data() ); @@ -79,7 +75,8 @@ class eosio_system_tester : public TESTER { create_account_with_resources( N(bob111111111), config::system_account_name, core_from_string("0.4500"), false ); create_account_with_resources( N(carol1111111), config::system_account_name, core_from_string("1.0000"), false ); - BOOST_REQUIRE_EQUAL( core_from_string("1000000000.0000"), get_balance("eosio") + get_balance("eosio.ramfee") + get_balance("eosio.stake") + get_balance("eosio.ram") ); + BOOST_REQUIRE_EQUAL( core_from_string("1000000000.0000"), + get_balance(name("eosio")) + get_balance(name("eosio.ramfee")) + get_balance(name("eosio.stake")) + get_balance(name("eosio.ram")) ); } action_result open( account_name owner, @@ -237,7 +234,8 @@ class eosio_system_tester : public TESTER { act.name = name; act.data = abi_ser.variant_to_binary( action_type_name, data, abi_serializer_max_time ); - return base_tester::push_action( std::move(act), auth ? uint64_t(signer) : signer == N(bob111111111) ? N(alice1111111) : N(bob111111111) ); + return base_tester::push_action( std::move(act), auth ? signer.to_uint64_t() : + signer == N(bob111111111) ? N(alice1111111).to_uint64_t() : N(bob111111111).to_uint64_t() ); } action_result stake( const account_name& from, const account_name& to, const asset& net, const asset& cpu ) { @@ -335,7 +333,7 @@ class eosio_system_tester : public TESTER { } asset get_balance( const account_name& act ) { - vector data = get_row_by_account( N(eosio.token), act, N(accounts), symbol(CORE_SYMBOL).to_symbol_code().value ); + vector data = get_row_by_account( N(eosio.token), act, N(accounts), name(symbol(CORE_SYMBOL).to_symbol_code().value) ); return data.empty() ? asset(0, symbol(CORE_SYMBOL)) : token_abi_ser.binary_to_variant("account", data, abi_serializer_max_time)["balance"].as(); } @@ -390,7 +388,7 @@ class eosio_system_tester : public TESTER { fc::variant get_stats( const string& symbolname ) { auto symb = eosio::chain::symbol::from_string(symbolname); auto symbol_code = symb.to_symbol_code().value; - vector data = get_row_by_account( N(eosio.token), symbol_code, N(stat), symbol_code ); + vector data = get_row_by_account( N(eosio.token), name(symbol_code), N(stat), name(symbol_code) ); return data.empty() ? fc::variant() : token_abi_ser.binary_to_variant( "currency_stats", data, abi_serializer_max_time ); } @@ -414,7 +412,7 @@ class eosio_system_tester : public TESTER { abi_serializer msig_abi_ser; { create_account_with_resources( N(eosio.msig), config::system_account_name ); - BOOST_REQUIRE_EQUAL( success(), buyram( "eosio", "eosio.msig", core_from_string("5000.0000") ) ); + BOOST_REQUIRE_EQUAL( success(), buyram( name("eosio"), name("eosio.msig"), core_from_string("5000.0000") ) ); produce_block(); auto trace = base_tester::push_action(config::system_account_name, N(setpriv), @@ -437,8 +435,8 @@ class eosio_system_tester : public TESTER { vector active_and_vote_producers() { //stake more than 15% of total EOS supply to activate chain - transfer( "eosio", "alice1111111", core_from_string("650000000.0000"), "eosio" ); - BOOST_REQUIRE_EQUAL( success(), stake( "alice1111111", "alice1111111", core_from_string("300000000.0000"), core_from_string("300000000.0000") ) ); + transfer( name("eosio"), name("alice1111111"), core_from_string("650000000.0000"), name("eosio") ); + BOOST_REQUIRE_EQUAL( success(), stake( name("alice1111111"), name("alice1111111"), core_from_string("300000000.0000"), core_from_string("300000000.0000") ) ); // create accounts {defproducera, defproducerb, ..., defproducerz} and register as producers std::vector producer_names; @@ -470,9 +468,9 @@ class eosio_system_tester : public TESTER { //vote for producers { - transfer( config::system_account_name, "alice1111111", core_from_string("100000000.0000"), config::system_account_name ); - BOOST_REQUIRE_EQUAL(success(), stake( "alice1111111", core_from_string("30000000.0000"), core_from_string("30000000.0000") ) ); - BOOST_REQUIRE_EQUAL(success(), buyram( "alice1111111", "alice1111111", core_from_string("30000000.0000") ) ); + transfer( config::system_account_name, name("alice1111111"), core_from_string("100000000.0000"), config::system_account_name ); + BOOST_REQUIRE_EQUAL(success(), stake( name("alice1111111"), core_from_string("30000000.0000"), core_from_string("30000000.0000") ) ); + BOOST_REQUIRE_EQUAL(success(), buyram( name("alice1111111"), name("alice1111111"), core_from_string("30000000.0000") ) ); BOOST_REQUIRE_EQUAL(success(), push_action(N(alice1111111), N(voteproducer), mvo() ("voter", "alice1111111") ("proxy", name(0).to_string()) diff --git a/unittests/fork_test_utilities.cpp b/unittests/fork_test_utilities.cpp index a8caaeeb233..8ede804daf2 100644 --- a/unittests/fork_test_utilities.cpp +++ b/unittests/fork_test_utilities.cpp @@ -1,11 +1,7 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include "fork_test_utilities.hpp" private_key_type get_private_key( name keyname, string role ) { - return private_key_type::regenerate(fc::sha256::hash(string(keyname)+role)); + return private_key_type::regenerate(fc::sha256::hash(keyname.to_string()+role)); } public_key_type get_public_key( name keyname, string role ){ @@ -21,22 +17,39 @@ void push_blocks( tester& from, tester& to, uint32_t block_num_limit ) { } } -bool produce_empty_blocks_until( tester& t, - account_name last_producer, - account_name next_producer, - uint32_t max_num_blocks_to_produce ) -{ - auto condition_satisfied = [&t, last_producer, next_producer]() { - return t.control->pending_block_producer() == next_producer && t.control->head_block_producer() == last_producer; - }; +namespace { + template + bool produce_empty_blocks_until( base_tester& t, uint32_t max_num_blocks_to_produce, Pred&& pred) { + for( uint32_t blocks_produced = 0; + blocks_produced < max_num_blocks_to_produce; + t.produce_block(), ++blocks_produced ) + { + if( pred() ) + return true; + } - for( uint32_t blocks_produced = 0; - blocks_produced < max_num_blocks_to_produce; - t.produce_block(), ++blocks_produced ) - { - if( condition_satisfied() ) - return true; + return pred(); } +} + +bool produce_until_transition( base_tester& t, + account_name last_producer, + account_name next_producer, + uint32_t max_num_blocks_to_produce ) +{ + return produce_empty_blocks_until(t, max_num_blocks_to_produce, [&t, last_producer, next_producer]() { + return t.control->pending_block_producer() == next_producer && t.control->head_block_producer() == last_producer; + }); +} - return condition_satisfied(); +bool produce_until_blocks_from( base_tester& t, + const std::set& expected_producers, + uint32_t max_num_blocks_to_produce ) +{ + auto remaining_producers = expected_producers; + return produce_empty_blocks_until(t, max_num_blocks_to_produce, [&t, &remaining_producers]() { + remaining_producers.erase(t.control->head_block_producer()); + return remaining_producers.size() == 0; + }); } + diff --git a/unittests/fork_test_utilities.hpp b/unittests/fork_test_utilities.hpp index f5ae33ae718..6e228d519e8 100644 --- a/unittests/fork_test_utilities.hpp +++ b/unittests/fork_test_utilities.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include @@ -15,7 +11,11 @@ public_key_type get_public_key( name keyname, string role ); void push_blocks( tester& from, tester& to, uint32_t block_num_limit = std::numeric_limits::max() ); -bool produce_empty_blocks_until( tester& t, - account_name last_producer, - account_name next_producer, - uint32_t max_num_blocks_to_produce = std::numeric_limits::max() ); +bool produce_until_transition( base_tester& t, + account_name last_producer, + account_name next_producer, + uint32_t max_num_blocks_to_produce = std::numeric_limits::max() ); + +bool produce_until_blocks_from( base_tester& t, + const std::set& expected_producers, + uint32_t max_num_blocks_to_produce = std::numeric_limits::max() ); diff --git a/unittests/forked_tests.cpp b/unittests/forked_tests.cpp index 920d759e951..7268a6d78c6 100644 --- a/unittests/forked_tests.cpp +++ b/unittests/forked_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ #include #include #include @@ -28,11 +24,7 @@ BOOST_AUTO_TEST_CASE( irrblock ) try { c.produce_blocks(10); auto r = c.create_accounts( {N(dan),N(sam),N(pam),N(scott)} ); auto res = c.set_producers( {N(dan),N(sam),N(pam),N(scott)} ); - vector sch = { {N(dan),get_public_key(N(dan), "active")}, - {N(sam),get_public_key(N(sam), "active")}, - {N(scott),get_public_key(N(scott), "active")}, - {N(pam),get_public_key(N(pam), "active")} - }; + wlog("set producer schedule to [dan,sam,pam]"); c.produce_blocks(50); @@ -53,10 +45,10 @@ BOOST_AUTO_TEST_CASE( fork_with_bad_block ) try { auto res = bios.set_producers( {N(a),N(b),N(c),N(d),N(e)} ); // run until the producers are installed and its the start of "a's" round - BOOST_REQUIRE( produce_empty_blocks_until( bios, N(e), N(a) ) ); + BOOST_REQUIRE( produce_until_transition( bios, N(e), N(a) ) ); // sync remote node - tester remote; + tester remote(setup_policy::none); push_blocks(bios, remote); // produce 6 blocks on bios @@ -143,9 +135,7 @@ BOOST_AUTO_TEST_CASE( forking ) try { wdump((fc::json::to_pretty_string(r))); c.produce_block(); auto res = c.set_producers( {N(dan),N(sam),N(pam)} ); - vector sch = { {N(dan),get_public_key(N(dan), "active")}, - {N(sam),get_public_key(N(sam), "active")}, - {N(pam),get_public_key(N(pam), "active")}}; + wdump((fc::json::to_pretty_string(res))); wlog("set producer schedule to [dan,sam,pam]"); c.produce_blocks(30); @@ -176,7 +166,7 @@ BOOST_AUTO_TEST_CASE( forking ) try { ); - tester c2; + tester c2(setup_policy::none); wlog( "push c1 blocks to c2" ); push_blocks(c, c2); wlog( "end push c1 blocks to c2" ); @@ -278,9 +268,9 @@ BOOST_AUTO_TEST_CASE( forking ) try { bad_block.transaction_mroot = bad_block.previous; auto bad_block_bs = c.control->create_block_state_future( std::make_shared(std::move(bad_block)) ); c.control->abort_block(); - BOOST_REQUIRE_EXCEPTION(c.control->push_block( bad_block_bs ), fc::exception, + BOOST_REQUIRE_EXCEPTION(c.control->push_block( bad_block_bs, forked_branch_callback{}, trx_meta_cache_lookup{} ), fc::exception, [] (const fc::exception &ex)->bool { - return ex.to_detail_string().find("block not signed by expected key") != std::string::npos; + return ex.to_detail_string().find("block signed by unexpected key") != std::string::npos; }); } FC_LOG_AND_RETHROW() @@ -299,7 +289,7 @@ BOOST_AUTO_TEST_CASE( prune_remove_branch ) try { wlog("set producer schedule to [dan,sam,pam,scott]"); c.produce_blocks(50); - tester c2; + tester c2(setup_policy::none); wlog( "push c1 blocks to c2" ); push_blocks(c, c2); @@ -352,9 +342,9 @@ BOOST_AUTO_TEST_CASE( prune_remove_branch ) try { */ BOOST_AUTO_TEST_CASE( validator_accepts_valid_blocks ) try { - tester n1; - tester n2; - tester n3; + tester n1(setup_policy::none); + tester n2(setup_policy::none); + tester n3(setup_policy::none); n1.produce_block(); @@ -371,7 +361,7 @@ BOOST_AUTO_TEST_CASE( validator_accepts_valid_blocks ) try { BOOST_CHECK_EQUAL( n2.control->head_block_id(), id ); BOOST_REQUIRE( first_block ); - first_block->verify_signee( first_block->signee() ); + first_block->verify_signee(); BOOST_CHECK_EQUAL( first_block->header.id(), first_block->block->id() ); BOOST_CHECK( first_block->header.producer_signature == first_block->block->producer_signature ); @@ -395,7 +385,7 @@ BOOST_AUTO_TEST_CASE( read_modes ) try { auto head_block_num = c.control->head_block_num(); auto last_irreversible_block_num = c.control->last_irreversible_block_num(); - tester head(setup_policy::old_bios_only, db_read_mode::HEAD); + tester head(setup_policy::none, db_read_mode::HEAD); push_blocks(c, head); BOOST_CHECK_EQUAL(head_block_num, head.control->fork_db_head_block_num()); BOOST_CHECK_EQUAL(head_block_num, head.control->head_block_num()); @@ -405,7 +395,7 @@ BOOST_AUTO_TEST_CASE( read_modes ) try { BOOST_CHECK_EQUAL(head_block_num, read_only.control->fork_db_head_block_num()); BOOST_CHECK_EQUAL(head_block_num, read_only.control->head_block_num()); - tester irreversible(setup_policy::old_bios_only, db_read_mode::IRREVERSIBLE); + tester irreversible(setup_policy::none, db_read_mode::IRREVERSIBLE); push_blocks(c, irreversible); BOOST_CHECK_EQUAL(head_block_num, irreversible.control->fork_db_pending_head_block_num()); BOOST_CHECK_EQUAL(last_irreversible_block_num, irreversible.control->fork_db_head_block_num()); @@ -426,27 +416,27 @@ BOOST_AUTO_TEST_CASE( irreversible_mode ) try { main.produce_block(); main.set_producers( {N(producer1), N(producer2)} ); main.produce_block(); - BOOST_REQUIRE( produce_empty_blocks_until( main, N(producer1), N(producer2), 26) ); + BOOST_REQUIRE( produce_until_transition( main, N(producer1), N(producer2), 26) ); main.create_accounts( {N(alice)} ); main.produce_block(); auto hbn1 = main.control->head_block_num(); auto lib1 = main.control->last_irreversible_block_num(); - BOOST_REQUIRE( produce_empty_blocks_until( main, N(producer2), N(producer1), 11) ); + BOOST_REQUIRE( produce_until_transition( main, N(producer2), N(producer1), 11) ); auto hbn2 = main.control->head_block_num(); auto lib2 = main.control->last_irreversible_block_num(); BOOST_REQUIRE( lib2 < hbn1 ); - tester other; + tester other(setup_policy::none); push_blocks( main, other ); BOOST_CHECK_EQUAL( other.control->head_block_num(), hbn2 ); - BOOST_REQUIRE( produce_empty_blocks_until( main, N(producer1), N(producer2), 12) ); - BOOST_REQUIRE( produce_empty_blocks_until( main, N(producer2), N(producer1), 12) ); + BOOST_REQUIRE( produce_until_transition( main, N(producer1), N(producer2), 12) ); + BOOST_REQUIRE( produce_until_transition( main, N(producer2), N(producer1), 12) ); auto hbn3 = main.control->head_block_num(); auto lib3 = main.control->last_irreversible_block_num(); @@ -463,15 +453,15 @@ BOOST_AUTO_TEST_CASE( irreversible_mode ) try { auto fork_first_block_id = other.control->head_block_id(); wlog( "{w}", ("w", fork_first_block_id)); - BOOST_REQUIRE( produce_empty_blocks_until( other, N(producer2), N(producer1), 11) ); // finish producer2's round + BOOST_REQUIRE( produce_until_transition( other, N(producer2), N(producer1), 11) ); // finish producer2's round BOOST_REQUIRE_EQUAL( other.control->pending_block_producer().to_string(), "producer1" ); // Repeat two more times to ensure other has a longer chain than main other.produce_block( fc::milliseconds( 13 * config::block_interval_ms ) ); // skip over producer1's round - BOOST_REQUIRE( produce_empty_blocks_until( other, N(producer2), N(producer1), 11) ); // finish producer2's round + BOOST_REQUIRE( produce_until_transition( other, N(producer2), N(producer1), 11) ); // finish producer2's round other.produce_block( fc::milliseconds( 13 * config::block_interval_ms ) ); // skip over producer1's round - BOOST_REQUIRE( produce_empty_blocks_until( other, N(producer2), N(producer1), 11) ); // finish producer2's round + BOOST_REQUIRE( produce_until_transition( other, N(producer2), N(producer1), 11) ); // finish producer2's round auto hbn4 = other.control->head_block_num(); auto lib4 = other.control->last_irreversible_block_num(); @@ -535,11 +525,11 @@ BOOST_AUTO_TEST_CASE( reopen_forkdb ) try { BOOST_REQUIRE_EQUAL( c1.control->active_producers().version, 1u ); - produce_empty_blocks_until( c1, N(carol), N(alice) ); + produce_until_transition( c1, N(carol), N(alice) ); c1.produce_block(); - produce_empty_blocks_until( c1, N(carol), N(alice) ); + produce_until_transition( c1, N(carol), N(alice) ); - tester c2; + tester c2(setup_policy::none); push_blocks( c1, c2 ); @@ -573,8 +563,156 @@ BOOST_AUTO_TEST_CASE( reopen_forkdb ) try { c1.close(); - c1.open( nullptr ); + c1.open(); + +} FC_LOG_AND_RETHROW() + +BOOST_AUTO_TEST_CASE( push_block_returns_forked_transactions ) try { + tester c; + while (c.control->head_block_num() < 3) { + c.produce_block(); + } + auto r = c.create_accounts( {N(dan),N(sam),N(pam)} ); + c.produce_block(); + auto res = c.set_producers( {N(dan),N(sam),N(pam)} ); + wlog("set producer schedule to [dan,sam,pam]"); + c.produce_blocks(40); + + tester c2(setup_policy::none); + wlog( "push c1 blocks to c2" ); + push_blocks(c, c2); + + wlog( "c1 blocks:" ); + signed_block_ptr cb; + c.produce_blocks(3); + signed_block_ptr b; + cb = b = c.produce_block(); + account_name expected_producer = N(dan); + BOOST_REQUIRE_EQUAL( b->producer.to_string(), expected_producer.to_string() ); + + b = c.produce_block(); + expected_producer = N(sam); + BOOST_REQUIRE_EQUAL( b->producer.to_string(), expected_producer.to_string() ); + c.produce_blocks(10); + c.create_accounts( {N(cam)} ); + c.set_producers( {N(dan),N(sam),N(pam),N(cam)} ); + wlog("set producer schedule to [dan,sam,pam,cam]"); + c.produce_block(); + // The next block should be produced by pam. + + // Sync second chain with first chain. + wlog( "push c1 blocks to c2" ); + push_blocks(c, c2); + wlog( "end push c1 blocks to c2" ); + + // Now sam and pam go on their own fork while dan is producing blocks by himself. + + wlog( "sam and pam go off on their own fork on c2 while dan produces blocks by himself in c1" ); + auto fork_block_num = c.control->head_block_num(); + + wlog( "c2 blocks:" ); + c2.produce_blocks(12); // pam produces 12 blocks + b = c2.produce_block( fc::milliseconds(config::block_interval_ms * 13) ); // sam skips over dan's blocks + expected_producer = N(sam); + BOOST_REQUIRE_EQUAL( b->producer.to_string(), expected_producer.to_string() ); + c2.produce_blocks(11 + 12); + + + wlog( "c1 blocks:" ); + b = c.produce_block( fc::milliseconds(config::block_interval_ms * 13) ); // dan skips over pam's blocks + expected_producer = N(dan); + BOOST_REQUIRE_EQUAL( b->producer.to_string(), expected_producer.to_string() ); + // create accounts on c1 which will be forked out + c.produce_block(); + + transaction_trace_ptr trace1, trace2, trace3; + { // create account the hard way so we can set reference block and expiration + signed_transaction trx; + authority active_auth( get_public_key( N(test1), "active" ) ); + authority owner_auth( get_public_key( N(test1), "owner" ) ); + trx.actions.emplace_back( vector{{config::system_account_name,config::active_name}}, + newaccount{ + .creator = config::system_account_name, + .name = N(test1), + .owner = owner_auth, + .active = active_auth, + }); + trx.expiration = c.control->head_block_time() + fc::seconds( 60 ); + trx.set_reference_block( cb->id() ); + trx.sign( get_private_key( config::system_account_name, "active" ), c.control->get_chain_id() ); + trace1 = c.push_transaction( trx ); + } + c.produce_block(); + { + signed_transaction trx; + authority active_auth( get_public_key( N(test2), "active" ) ); + authority owner_auth( get_public_key( N(test2), "owner" ) ); + trx.actions.emplace_back( vector{{config::system_account_name,config::active_name}}, + newaccount{ + .creator = config::system_account_name, + .name = N(test2), + .owner = owner_auth, + .active = active_auth, + }); + trx.expiration = c.control->head_block_time() + fc::seconds( 60 ); + trx.set_reference_block( cb->id() ); + trx.sign( get_private_key( config::system_account_name, "active" ), c.control->get_chain_id() ); + trace2 = c.push_transaction( trx ); + } + { + signed_transaction trx; + authority active_auth( get_public_key( N(test3), "active" ) ); + authority owner_auth( get_public_key( N(test3), "owner" ) ); + trx.actions.emplace_back( vector{{config::system_account_name,config::active_name}}, + newaccount{ + .creator = config::system_account_name, + .name = N(test3), + .owner = owner_auth, + .active = active_auth, + }); + trx.expiration = c.control->head_block_time() + fc::seconds( 60 ); + trx.set_reference_block( cb->id() ); + trx.sign( get_private_key( config::system_account_name, "active" ), c.control->get_chain_id() ); + trace3 = c.push_transaction( trx ); + } + c.produce_block(); + c.produce_blocks(9); + + // dan on chain 1 now gets all of the blocks from chain 2 which should cause fork switch + wlog( "push c2 blocks to c1" ); + for( uint32_t start = fork_block_num + 1, end = c2.control->head_block_num(); start <= end; ++start ) { + auto fb = c2.control->fetch_block_by_number( start ); + c.push_block( fb ); + } + + // verify transaction on fork is reported by push_block in order + BOOST_REQUIRE_EQUAL( 3, c.get_unapplied_transaction_queue().size() ); + BOOST_REQUIRE_EQUAL( trace1->id, c.get_unapplied_transaction_queue().begin()->id() ); + BOOST_REQUIRE_EQUAL( trace2->id, (++c.get_unapplied_transaction_queue().begin())->id() ); + BOOST_REQUIRE_EQUAL( trace3->id, (++(++c.get_unapplied_transaction_queue().begin()))->id() ); + + BOOST_REQUIRE_EXCEPTION(c.control->get_account( N(test1) ), fc::exception, + [a=N(test1)] (const fc::exception& e)->bool { + return std::string( e.what() ).find( a.to_string() ) != std::string::npos; + }) ; + BOOST_REQUIRE_EXCEPTION(c.control->get_account( N(test2) ), fc::exception, + [a=N(test2)] (const fc::exception& e)->bool { + return std::string( e.what() ).find( a.to_string() ) != std::string::npos; + }) ; + BOOST_REQUIRE_EXCEPTION(c.control->get_account( N(test3) ), fc::exception, + [a=N(test3)] (const fc::exception& e)->bool { + return std::string( e.what() ).find( a.to_string() ) != std::string::npos; + }) ; + + // produce block which will apply the unapplied transactions + c.produce_block(); + + // verify unapplied transactions ran + BOOST_REQUIRE_EQUAL( c.control->get_account( N(test1) ).name, N(test1) ); + BOOST_REQUIRE_EQUAL( c.control->get_account( N(test2) ).name, N(test2) ); + BOOST_REQUIRE_EQUAL( c.control->get_account( N(test3) ).name, N(test3) ); } FC_LOG_AND_RETHROW() + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/main.cpp b/unittests/main.cpp index cb26aeb625b..e51899278da 100644 --- a/unittests/main.cpp +++ b/unittests/main.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include diff --git a/unittests/message_buffer_tests.cpp b/unittests/message_buffer_tests.cpp deleted file mode 100644 index f810a80d2be..00000000000 --- a/unittests/message_buffer_tests.cpp +++ /dev/null @@ -1,362 +0,0 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ -#include - -#include - -#include - -namespace eosio { -using namespace std; - -size_t mb_size(boost::asio::mutable_buffer& mb) { -#if BOOST_VERSION >= 106600 - return mb.size(); -#else - return boost::asio::detail::buffer_size_helper(mb); -#endif -} - -void* mb_data(boost::asio::mutable_buffer& mb) { -#if BOOST_VERSION >= 106600 - return mb.data(); -#else - return boost::asio::detail::buffer_cast_helper(mb); -#endif -} - -BOOST_AUTO_TEST_SUITE(message_buffer_tests) - -constexpr size_t def_buffer_size_mb = 4; -constexpr size_t def_buffer_size = 1024*1024*def_buffer_size_mb; - -/// Test default construction and buffer sequence generation -BOOST_AUTO_TEST_CASE(message_buffer_construction) -{ - try { - fc::message_buffer mb; - BOOST_CHECK_EQUAL(mb.total_bytes(), def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_write(), def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0u); - BOOST_CHECK_EQUAL(mb.read_ptr(), mb.write_ptr()); - - auto mbs = mb.get_buffer_sequence_for_boost_async_read(); - auto mbsi = mbs.begin(); - BOOST_CHECK_EQUAL(mb_size(*mbsi), def_buffer_size); - BOOST_CHECK_EQUAL(mb_data(*mbsi), mb.write_ptr()); - mbsi++; - BOOST_CHECK(mbsi == mbs.end()); - } - FC_LOG_AND_RETHROW() -} - -/// Test buffer growth and shrinking -BOOST_AUTO_TEST_CASE(message_buffer_growth) -{ - try { - fc::message_buffer mb; - mb.add_buffer_to_chain(); - BOOST_CHECK_EQUAL(mb.total_bytes(), 2 * def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_write(), 2 * def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0u); - BOOST_CHECK_EQUAL(mb.read_ptr(), mb.write_ptr()); - - { - auto mbs = mb.get_buffer_sequence_for_boost_async_read(); - auto mbsi = mbs.begin(); - BOOST_CHECK_EQUAL(mb_size(*mbsi), def_buffer_size); - BOOST_CHECK_EQUAL(mb_data(*mbsi), mb.write_ptr()); - mbsi++; - BOOST_CHECK(mbsi != mbs.end()); - BOOST_CHECK_EQUAL(mb_size(*mbsi), def_buffer_size); - BOOST_CHECK_NE(mb_data(*mbsi), nullptr); - mbsi++; - BOOST_CHECK(mbsi == mbs.end()); - } - - mb.advance_write_ptr(100); - BOOST_CHECK_EQUAL(mb.total_bytes(), 2 * def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_write(), 2 * def_buffer_size - 100); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 100u); - BOOST_CHECK_NE(mb.read_ptr(), nullptr); - BOOST_CHECK_NE(mb.write_ptr(), nullptr); - BOOST_CHECK_EQUAL((mb.read_ptr() + 100), mb.write_ptr()); - - { - auto mbs = mb.get_buffer_sequence_for_boost_async_read(); - auto mbsi = mbs.begin(); - BOOST_CHECK_EQUAL(mb_size(*mbsi), def_buffer_size - 100); - BOOST_CHECK_EQUAL(mb_data(*mbsi), mb.write_ptr()); - mbsi++; - BOOST_CHECK(mbsi != mbs.end()); - BOOST_CHECK_EQUAL(mb_size(*mbsi), def_buffer_size); - BOOST_CHECK_NE(mb_data(*mbsi), nullptr); - mbsi++; - BOOST_CHECK(mbsi == mbs.end()); - } - - mb.advance_read_ptr(50); - BOOST_CHECK_EQUAL(mb.total_bytes(), 2 * def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_write(), 2 * def_buffer_size - 100); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 50u); - - mb.advance_write_ptr(def_buffer_size); - BOOST_CHECK_EQUAL(mb.total_bytes(), 2 * def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_write(), def_buffer_size - 100); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 50 + def_buffer_size); - - // Moving read_ptr into second block should reset second block to first - mb.advance_read_ptr(def_buffer_size); - BOOST_CHECK_EQUAL(mb.total_bytes(), def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_write(), def_buffer_size - 100); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 50u); - - // Moving read_ptr to write_ptr should shrink chain and reset ptrs - mb.advance_read_ptr(50); - BOOST_CHECK_EQUAL(mb.total_bytes(), def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_write(), def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0u); - - mb.add_buffer_to_chain(); - BOOST_CHECK_EQUAL(mb.total_bytes(), 2 * def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_write(), 2 * def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0u); - - mb.advance_write_ptr(50); - BOOST_CHECK_EQUAL(mb.total_bytes(), 2 * def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_write(), 2 * def_buffer_size - 50); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 50u); - - // Moving read_ptr to write_ptr should shrink chain and reset ptrs - mb.advance_read_ptr(50); - BOOST_CHECK_EQUAL(mb.total_bytes(), def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_write(), def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0u); - } - FC_LOG_AND_RETHROW() -} - -/// Test peek and read across multiple buffers -BOOST_AUTO_TEST_CASE(message_buffer_peek_read) -{ - try { - { - const uint32_t small = 32; - fc::message_buffer mb; - BOOST_CHECK_EQUAL(mb.total_bytes(), small); - BOOST_CHECK_EQUAL(mb.bytes_to_write(), small); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0); - BOOST_CHECK_EQUAL(mb.read_ptr(), mb.write_ptr()); - BOOST_CHECK_EQUAL(mb.read_index().first, 0); - BOOST_CHECK_EQUAL(mb.read_index().second, 0); - BOOST_CHECK_EQUAL(mb.write_index().first, 0); - BOOST_CHECK_EQUAL(mb.write_index().second, 0); - - mb.add_space(100 - small); - BOOST_CHECK_EQUAL(mb.total_bytes(), 4 * small); - BOOST_CHECK_EQUAL(mb.bytes_to_write(), 4 * small); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0); - BOOST_CHECK_EQUAL(mb.read_ptr(), mb.write_ptr()); - - char* write_ptr = mb.write_ptr(); - for (char ind = 0; ind < 100; ) { - *write_ptr = ind; - ind++; - if (ind % small == 0) { - mb.advance_write_ptr(small); - write_ptr = mb.write_ptr(); - } else { - write_ptr++; - } - } - mb.advance_write_ptr(100 % small); - - BOOST_CHECK_EQUAL(mb.total_bytes(), 4 * small); - BOOST_CHECK_EQUAL(mb.bytes_to_write(), 4 * small - 100); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 100); - BOOST_CHECK_NE((void*) mb.read_ptr(), (void*) mb.write_ptr()); - BOOST_CHECK_EQUAL(mb.read_index().first, 0); - BOOST_CHECK_EQUAL(mb.read_index().second, 0); - BOOST_CHECK_EQUAL(mb.write_index().first, 3); - BOOST_CHECK_EQUAL(mb.write_index().second, 4); - - char buffer[100]; - auto index = mb.read_index(); - mb.peek(buffer, 50, index); - mb.peek(buffer+50, 50, index); - for (int i=0; i < 100; i++) { - BOOST_CHECK_EQUAL(i, buffer[i]); - } - - BOOST_CHECK_EQUAL(mb.total_bytes(), 4 * small); - BOOST_CHECK_EQUAL(mb.bytes_to_write(), 4 * small - 100); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 100); - BOOST_CHECK_NE((void*) mb.read_ptr(), (void*) mb.write_ptr()); - - char buffer2[100]; - mb.read(buffer2, 100); - for (int i=0; i < 100; i++) { - BOOST_CHECK_EQUAL(i, buffer2[i]); - } - - BOOST_CHECK_EQUAL(mb.total_bytes(), small); - BOOST_CHECK_EQUAL(mb.bytes_to_write(), small); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0); - BOOST_CHECK_EQUAL(mb.read_ptr(), mb.write_ptr()); - } - } - FC_LOG_AND_RETHROW() -} - -/// Test automatic allocation when advancing the read_ptr to the end. -BOOST_AUTO_TEST_CASE(message_buffer_write_ptr_to_end) -{ - try { - { - const uint32_t small = 32; - fc::message_buffer mb; - BOOST_CHECK_EQUAL(mb.total_bytes(), small); - BOOST_CHECK_EQUAL(mb.bytes_to_write(), small); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0); - BOOST_CHECK_EQUAL(mb.read_ptr(), mb.write_ptr()); - BOOST_CHECK_EQUAL(mb.read_index().first, 0); - BOOST_CHECK_EQUAL(mb.read_index().second, 0); - BOOST_CHECK_EQUAL(mb.write_index().first, 0); - BOOST_CHECK_EQUAL(mb.write_index().second, 0); - - char* write_ptr = mb.write_ptr(); - for (uint32_t ind = 0; ind < small; ind++) { - *write_ptr = ind; - write_ptr++; - } - mb.advance_write_ptr(small); - - BOOST_CHECK_EQUAL(mb.total_bytes(), 2 * small); - BOOST_CHECK_EQUAL(mb.bytes_to_write(), small); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), small); - BOOST_CHECK_NE((void*) mb.read_ptr(), (void*) mb.write_ptr()); - BOOST_CHECK_EQUAL(mb.read_index().first, 0); - BOOST_CHECK_EQUAL(mb.read_index().second, 0); - BOOST_CHECK_EQUAL(mb.write_index().first, 1); - BOOST_CHECK_EQUAL(mb.write_index().second, 0); - - auto mbs = mb.get_buffer_sequence_for_boost_async_read(); - auto mbsi = mbs.begin(); - BOOST_CHECK_EQUAL(mb_size(*mbsi), small); - BOOST_CHECK_EQUAL(mb_data(*mbsi), mb.write_ptr()); - BOOST_CHECK_EQUAL(mb.read_ptr()+small, mb.write_ptr()); - mbsi++; - BOOST_CHECK(mbsi == mbs.end()); - } - } - FC_LOG_AND_RETHROW() -} - -BOOST_AUTO_TEST_CASE(message_buffer_read_peek_bounds) { - using my_message_buffer_t = fc::message_buffer<1024>; - my_message_buffer_t mbuff; - unsigned char stuff[] = { - 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff - }; - memcpy(mbuff.write_ptr(), stuff, sizeof(stuff)); - mbuff.advance_write_ptr(sizeof(stuff)); - - my_message_buffer_t::index_t index = mbuff.read_index(); - uint8_t throw_away_buffer[4]; - mbuff.peek(&throw_away_buffer, 4, index); //8 bytes left to peek afterwards - mbuff.peek(&throw_away_buffer, 4, index); //4 bytes left to peek afterwards - mbuff.peek(&throw_away_buffer, 2, index); //2 bytes left to peek afterwards - BOOST_CHECK_THROW(mbuff.peek(&throw_away_buffer, 3, index), fc::out_of_range_exception); - mbuff.peek(&throw_away_buffer, 1, index); //1 byte left to peek afterwards - mbuff.peek(&throw_away_buffer, 0, index); //1 byte left to peek afterwards - mbuff.peek(&throw_away_buffer, 1, index); //no bytes left to peek afterwards - BOOST_CHECK_THROW(mbuff.peek(&throw_away_buffer, 1, index), fc::out_of_range_exception); - - mbuff.read(&throw_away_buffer, 4); //8 bytes left to read afterwards - mbuff.read(&throw_away_buffer, 4); //4 bytes left to read afterwards - mbuff.read(&throw_away_buffer, 2); //2 bytes left to read afterwards - BOOST_CHECK_THROW(mbuff.read(&throw_away_buffer, 4), fc::out_of_range_exception); - mbuff.read(&throw_away_buffer, 1); //1 byte left to read afterwards - mbuff.read(&throw_away_buffer, 0); //1 byte left to read afterwards - mbuff.read(&throw_away_buffer, 1); //no bytes left to read afterwards - BOOST_CHECK_THROW(mbuff.read(&throw_away_buffer, 1), fc::out_of_range_exception); -} - -BOOST_AUTO_TEST_CASE(message_buffer_read_peek_bounds_multi) { - using my_message_buffer_t = fc::message_buffer<5>; - my_message_buffer_t mbuff; - unsigned char stuff[] = { - 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff - }; - memcpy(mbuff.write_ptr(), stuff, 5); - mbuff.advance_write_ptr(5); - memcpy(mbuff.write_ptr(), stuff+5, 5); - mbuff.advance_write_ptr(5); - memcpy(mbuff.write_ptr(), stuff+10, 2); - mbuff.advance_write_ptr(2); - - my_message_buffer_t::index_t index = mbuff.read_index(); - uint8_t throw_away_buffer[4]; - mbuff.peek(&throw_away_buffer, 4, index); //8 bytes left to peek afterwards - mbuff.peek(&throw_away_buffer, 4, index); //4 bytes left to peek afterwards - mbuff.peek(&throw_away_buffer, 2, index); //2 bytes left to peek afterwards - BOOST_CHECK_THROW(mbuff.peek(&throw_away_buffer, 3, index), fc::out_of_range_exception); - mbuff.peek(&throw_away_buffer, 1, index); //1 bytes left to peek afterwards - mbuff.peek(&throw_away_buffer, 0, index); //1 bytes left to peek afterwards - mbuff.peek(&throw_away_buffer, 1, index); //no bytes left to peek afterwards - BOOST_CHECK_THROW(mbuff.peek(&throw_away_buffer, 1, index), fc::out_of_range_exception); - - mbuff.read(&throw_away_buffer, 4); //8 bytes left to read afterwards - mbuff.read(&throw_away_buffer, 4); //4 bytes left to read afterwards - mbuff.read(&throw_away_buffer, 2); //2 bytes left to read afterwards - BOOST_CHECK_THROW(mbuff.read(&throw_away_buffer, 4), fc::out_of_range_exception); - mbuff.read(&throw_away_buffer, 1); //1 bytes left to read afterwards - mbuff.read(&throw_away_buffer, 0); //1 bytes left to read afterwards - mbuff.read(&throw_away_buffer, 1); //no bytes left to read afterwards - BOOST_CHECK_THROW(mbuff.read(&throw_away_buffer, 1), fc::out_of_range_exception); -} - -BOOST_AUTO_TEST_CASE(message_buffer_datastream) { - using my_message_buffer_t = fc::message_buffer<1024>; - my_message_buffer_t mbuff; - - char buf[1024]; - fc::datastream ds( buf, 1024 ); - - int v = 13; - fc::raw::pack( ds, v ); - v = 42; - fc::raw::pack( ds, 42 ); - fc::raw::pack( ds, std::string( "hello" ) ); - - memcpy(mbuff.write_ptr(), buf, 1024); - mbuff.advance_write_ptr(1024); - - for( int i = 0; i < 3; ++i ) { - auto ds2 = mbuff.create_peek_datastream(); - fc::raw::unpack( ds2, v ); - BOOST_CHECK_EQUAL( 13, v ); - fc::raw::unpack( ds2, v ); - BOOST_CHECK_EQUAL( 42, v ); - std::string s; - fc::raw::unpack( ds2, s ); - BOOST_CHECK_EQUAL( s, std::string( "hello" ) ); - } - - { - auto ds2 = mbuff.create_datastream(); - fc::raw::unpack( ds2, v ); - BOOST_CHECK_EQUAL( 13, v ); - fc::raw::unpack( ds2, v ); - BOOST_CHECK_EQUAL( 42, v ); - std::string s; - fc::raw::unpack( ds2, s ); - BOOST_CHECK_EQUAL( s, std::string( "hello" ) ); - } -} - -BOOST_AUTO_TEST_SUITE_END() - -} // namespace eosio diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index 62a38b4785e..7414c8a104b 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include @@ -13,6 +9,7 @@ #include #include #include +#include #include @@ -87,7 +84,8 @@ namespace eosio using namespace chain; using namespace std; -static constexpr uint64_t name_suffix( uint64_t n ) { +static constexpr uint64_t name_suffix( name nv ) { + uint64_t n = nv.to_uint64_t(); uint32_t remaining_bits_after_last_actual_dot = 0; uint32_t tmp = 0; for( int32_t remaining_bits = 59; remaining_bits >= 4; remaining_bits -= 5 ) { // Note: remaining_bits must remain signed integer @@ -119,9 +117,17 @@ static constexpr uint64_t name_suffix( uint64_t n ) { BOOST_AUTO_TEST_SUITE(misc_tests) +BOOST_AUTO_TEST_CASE(reverse_endian_tests) +{ + BOOST_CHECK_EQUAL( endian_reverse_u64(0x0123456789abcdef), 0xefcdab8967452301 ); + BOOST_CHECK_EQUAL( endian_reverse_u64(0x0102030405060708), 0x0807060504030201 ); + BOOST_CHECK_EQUAL( endian_reverse_u32(0x01234567), 0x67452301 ); + BOOST_CHECK_EQUAL( endian_reverse_u32(0x01020304), 0x04030201 ); +} + BOOST_AUTO_TEST_CASE(name_suffix_tests) { - BOOST_CHECK_EQUAL( name{name_suffix(0)}, name{0} ); + BOOST_CHECK_EQUAL( name{name_suffix(name(0))}, name{0} ); BOOST_CHECK_EQUAL( name{name_suffix(N(abcdehijklmn))}, name{N(abcdehijklmn)} ); BOOST_CHECK_EQUAL( name{name_suffix(N(abcdehijklmn1))}, name{N(abcdehijklmn1)} ); BOOST_CHECK_EQUAL( name{name_suffix(N(abc.def))}, name{N(def)} ); @@ -317,9 +323,9 @@ struct permission_visitor { BOOST_AUTO_TEST_CASE(authority_checker) { try { testing::TESTER test; - auto a = test.get_public_key("a", "active"); - auto b = test.get_public_key("b", "active"); - auto c = test.get_public_key("c", "active"); + auto a = test.get_public_key(name("a"), "active"); + auto b = test.get_public_key(name("b"), "active"); + auto c = test.get_public_key(name("c"), "active"); auto GetNullAuthority = [](auto){abort(); return authority();}; @@ -375,7 +381,7 @@ BOOST_AUTO_TEST_CASE(authority_checker) return authority(1, {key_weight{c, 1}}); }; - A = authority(2, {key_weight{a, 2}, key_weight{b, 1}}, {permission_level_weight{{"hello", "world"}, 1}}); + A = authority(2, {key_weight{a, 2}, key_weight{b, 1}}, {permission_level_weight{{name("hello"), name("world")}, 1}}); { auto checker = make_auth_checker(GetCAuthority, 2, {a}); BOOST_TEST(checker.satisfied(A)); @@ -415,7 +421,7 @@ BOOST_AUTO_TEST_CASE(authority_checker) BOOST_TEST(checker.unused_keys().count(c) == 1u); } - A = authority(3, {key_weight{a, 2}, key_weight{b, 1}}, {permission_level_weight{{"hello", "world"}, 3}}); + A = authority(3, {key_weight{a, 2}, key_weight{b, 1}}, {permission_level_weight{{name("hello"), name("world")}, 3}}); { auto checker = make_auth_checker(GetCAuthority, 2, {a, b}); BOOST_TEST(checker.satisfied(A)); @@ -432,7 +438,7 @@ BOOST_AUTO_TEST_CASE(authority_checker) BOOST_TEST(checker.unused_keys().count(b) == 1u); } - A = authority(2, {key_weight{a, 1}, key_weight{b, 1}}, {permission_level_weight{{"hello", "world"}, 1}}); + A = authority(2, {key_weight{a, 1}, key_weight{b, 1}}, {permission_level_weight{{name("hello"), name("world")}, 1}}); BOOST_TEST(!make_auth_checker(GetCAuthority, 2, {a}).satisfied(A)); BOOST_TEST(!make_auth_checker(GetCAuthority, 2, {b}).satisfied(A)); BOOST_TEST(!make_auth_checker(GetCAuthority, 2, {c}).satisfied(A)); @@ -448,7 +454,7 @@ BOOST_AUTO_TEST_CASE(authority_checker) BOOST_TEST(checker.unused_keys().count(c) == 1u); } - A = authority(2, {key_weight{a, 1}, key_weight{b, 1}}, {permission_level_weight{{"hello", "world"}, 2}}); + A = authority(2, {key_weight{a, 1}, key_weight{b, 1}}, {permission_level_weight{{name("hello"), name("world")}, 2}}); BOOST_TEST(make_auth_checker(GetCAuthority, 2, {a, b}).satisfied(A)); BOOST_TEST(make_auth_checker(GetCAuthority, 2, {c}).satisfied(A)); BOOST_TEST(!make_auth_checker(GetCAuthority, 2, {a}).satisfied(A)); @@ -462,16 +468,16 @@ BOOST_AUTO_TEST_CASE(authority_checker) BOOST_TEST(checker.used_keys().count(c) == 1u); } - auto d = test.get_public_key("d", "active"); - auto e = test.get_public_key("e", "active"); + auto d = test.get_public_key(name("d"), "active"); + auto e = test.get_public_key(name("e"), "active"); auto GetAuthority = [d, e] (const permission_level& perm) { - if (perm.actor == "top") - return authority(2, {key_weight{d, 1}}, {permission_level_weight{{"bottom", "bottom"}, 1}}); + if (perm.actor == name("top")) + return authority(2, {key_weight{d, 1}}, {permission_level_weight{{name("bottom"), name("bottom")}, 1}}); return authority{1, {{e, 1}}, {}}; }; - A = authority(5, {key_weight{a, 2}, key_weight{b, 2}, key_weight{c, 2}}, {permission_level_weight{{"top", "top"}, 5}}); + A = authority(5, {key_weight{a, 2}, key_weight{b, 2}, key_weight{c, 2}}, {permission_level_weight{{name("top"), name("top")}, 5}}); { auto checker = make_auth_checker(GetAuthority, 2, {d, e}); BOOST_TEST(checker.satisfied(A)); @@ -538,38 +544,38 @@ BOOST_AUTO_TEST_CASE(authority_checker) } { auto A2 = authority(4, {key_weight{b, 1}, key_weight{a, 1}, key_weight{c, 1}}, - { permission_level_weight{{"a", "world"}, 1}, - permission_level_weight{{"hello", "world"}, 1}, - permission_level_weight{{"hi", "world"}, 1} + { permission_level_weight{{name("a"), name("world")}, 1}, + permission_level_weight{{name("hello"), name("world")}, 1}, + permission_level_weight{{name("hi"), name("world")}, 1} }); auto B2 = authority(4, {key_weight{b, 1}, key_weight{a, 1}, key_weight{c, 1}}, - {permission_level_weight{{"hello", "world"}, 1} + {permission_level_weight{{name("hello"), name("world")}, 1} }); auto C2 = authority(4, {key_weight{b, 1}, key_weight{a, 1}, key_weight{c, 1}}, - { permission_level_weight{{"hello", "there"}, 1}, - permission_level_weight{{"hello", "world"}, 1} + { permission_level_weight{{name("hello"), name("there")}, 1}, + permission_level_weight{{name("hello"), name("world")}, 1} }); // invalid: duplicate auto D2 = authority(4, {key_weight{b, 1}, key_weight{a, 1}, key_weight{c, 1}}, - { permission_level_weight{{"hello", "world"}, 1}, - permission_level_weight{{"hello", "world"}, 2} + { permission_level_weight{{name("hello"), name("world")}, 1}, + permission_level_weight{{name("hello"), name("world")}, 2} }); // invalid: wrong order auto E2 = authority(4, {key_weight{b, 1}, key_weight{a, 1}, key_weight{c, 1}}, - { permission_level_weight{{"hello", "world"}, 2}, - permission_level_weight{{"hello", "there"}, 1} + { permission_level_weight{{name("hello"), name("world")}, 2}, + permission_level_weight{{name("hello"), name("there")}, 1} }); // invalid: wrong order auto F2 = authority(4, {key_weight{b, 1}, key_weight{a, 1}, key_weight{c, 1}}, - { permission_level_weight{{"hi", "world"}, 2}, - permission_level_weight{{"hello", "world"}, 1} + { permission_level_weight{{name("hi"), name("world")}, 2}, + permission_level_weight{{name("hello"), name("world")}, 1} }); // invalid: insufficient weight auto G2 = authority(7, {key_weight{b, 1}, key_weight{a, 1}, key_weight{c, 1}}, - { permission_level_weight{{"a", "world"}, 1}, - permission_level_weight{{"hello", "world"}, 1}, - permission_level_weight{{"hi", "world"}, 1} + { permission_level_weight{{name("a"), name("world")}, 1}, + permission_level_weight{{name("hello"), name("world")}, 1}, + permission_level_weight{{name("hi"), name("world")}, 1} }); BOOST_TEST(validate(A2)); @@ -614,7 +620,7 @@ BOOST_AUTO_TEST_CASE(alphabetic_sort) vector uwords; for(const auto w: words) { auto n = name(w.c_str()); - uwords.push_back(n.value); + uwords.push_back(n.to_uint64_t()); } std::sort(uwords.begin(), uwords.end(), std::less()); @@ -676,9 +682,9 @@ BOOST_AUTO_TEST_CASE(transaction_test) { try { BOOST_CHECK_EQUAL(1u, trx.signatures.size()); trx.validate(); - packed_transaction pkt(trx, packed_transaction::none); + packed_transaction pkt(trx, packed_transaction::compression_type::none); - packed_transaction pkt2(trx, packed_transaction::zlib); + packed_transaction pkt2(trx, packed_transaction::compression_type::zlib); BOOST_CHECK_EQUAL(true, trx.expiration == pkt.expiration()); BOOST_CHECK_EQUAL(true, trx.expiration == pkt2.expiration()); @@ -815,56 +821,40 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { trx.sign( private_key, test.control->get_chain_id() ); BOOST_CHECK_EQUAL(1u, trx.signatures.size()); - packed_transaction pkt(trx, packed_transaction::none); - packed_transaction pkt2(trx, packed_transaction::zlib); + packed_transaction pkt(trx, packed_transaction::compression_type::none); + packed_transaction pkt2(trx, packed_transaction::compression_type::zlib); - transaction_metadata_ptr mtrx = std::make_shared( std::make_shared( trx, packed_transaction::none) ); - transaction_metadata_ptr mtrx2 = std::make_shared( std::make_shared( trx, packed_transaction::zlib) ); + packed_transaction_ptr ptrx = std::make_shared( trx, packed_transaction::compression_type::none); + packed_transaction_ptr ptrx2 = std::make_shared( trx, packed_transaction::compression_type::zlib); BOOST_CHECK_EQUAL(trx.id(), pkt.id()); BOOST_CHECK_EQUAL(trx.id(), pkt2.id()); - BOOST_CHECK_EQUAL(trx.id(), mtrx->id); - BOOST_CHECK_EQUAL(trx.id(), mtrx2->id); + BOOST_CHECK_EQUAL(trx.id(), ptrx->id()); + BOOST_CHECK_EQUAL(trx.id(), ptrx2->id()); named_thread_pool thread_pool( "misc", 5 ); - BOOST_CHECK( !mtrx->signing_keys_future.valid() ); - BOOST_CHECK( !mtrx2->signing_keys_future.valid() ); - - transaction_metadata::start_recover_keys( mtrx, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::start_recover_keys( mtrx2, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); - - BOOST_CHECK( mtrx->signing_keys_future.valid() ); - BOOST_CHECK( mtrx2->signing_keys_future.valid() ); + auto fut = transaction_metadata::start_recover_keys( ptrx, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); + auto fut2 = transaction_metadata::start_recover_keys( ptrx2, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); - // no-op - transaction_metadata::start_recover_keys( mtrx, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::start_recover_keys( mtrx2, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); + // start another key reovery on same packed_transaction, creates a new future with transaction_metadata, should not interfere with above + transaction_metadata::start_recover_keys( ptrx, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( ptrx2, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); - auto keys = mtrx->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1u, keys.second.size()); - BOOST_CHECK_EQUAL(public_key, *keys.second.begin()); + auto mtrx = fut.get(); + const auto& keys = mtrx->recovered_keys(); + BOOST_CHECK_EQUAL(1u, keys.size()); + BOOST_CHECK_EQUAL(public_key, *keys.begin()); - // again - auto keys2 = mtrx->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1u, keys2.second.size()); - BOOST_CHECK_EQUAL(public_key, *keys2.second.begin()); + // again, can be called multiple times, current implementation it is just an attribute of transaction_metadata + const auto& keys2 = mtrx->recovered_keys(); + BOOST_CHECK_EQUAL(1u, keys2.size()); + BOOST_CHECK_EQUAL(public_key, *keys2.begin()); - auto keys3 = mtrx2->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1u, keys3.second.size()); - BOOST_CHECK_EQUAL(public_key, *keys3.second.begin()); - - // recover keys without first calling start_recover_keys - transaction_metadata_ptr mtrx4 = std::make_shared( std::make_shared( trx, packed_transaction::none) ); - transaction_metadata_ptr mtrx5 = std::make_shared( std::make_shared( trx, packed_transaction::zlib) ); - - auto keys4 = mtrx4->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1u, keys4.second.size()); - BOOST_CHECK_EQUAL(public_key, *keys4.second.begin()); - - auto keys5 = mtrx5->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1u, keys5.second.size()); - BOOST_CHECK_EQUAL(public_key, *keys5.second.begin()); + auto mtrx2 = fut2.get(); + const auto& keys3 = mtrx2->recovered_keys(); + BOOST_CHECK_EQUAL(1u, keys3.size()); + BOOST_CHECK_EQUAL(public_key, *keys3.begin()); thread_pool.stop(); @@ -1120,6 +1110,16 @@ BOOST_AUTO_TEST_CASE(stable_priority_queue_test) { } FC_LOG_AND_RETHROW() } +// test that std::bad_alloc is being thrown +BOOST_AUTO_TEST_CASE(bad_alloc_test) { + tester t; // force a controller to be constructed and set the new_handler + int* ptr = nullptr; + const auto fail = [&]() { + ptr = new int[std::numeric_limits::max()/16]; + }; + BOOST_CHECK_THROW( fail(), std::bad_alloc ); + BOOST_CHECK( ptr == nullptr ); +} BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/payloadless_tests.cpp b/unittests/payloadless_tests.cpp index c9519e0ed71..4e4e3ca16cc 100644 --- a/unittests/payloadless_tests.cpp +++ b/unittests/payloadless_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wsign-compare" #include diff --git a/unittests/producer_schedule_tests.cpp b/unittests/producer_schedule_tests.cpp index bec63376ff5..32655b3fbc8 100644 --- a/unittests/producer_schedule_tests.cpp +++ b/unittests/producer_schedule_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ #include #include @@ -22,13 +18,13 @@ using mvo = fc::mutable_variant_object; BOOST_AUTO_TEST_SUITE(producer_schedule_tests) // Calculate expected producer given the schedule and slot number - account_name get_expected_producer(const vector& schedule, const uint64_t slot) { + account_name get_expected_producer(const vector& schedule, const uint64_t slot) { const auto& index = (slot % (schedule.size() * config::producer_repetitions)) / config::producer_repetitions; return schedule.at(index).producer_name; }; // Check if two schedule is equal - bool is_schedule_equal(const vector& first, const vector& second) { + bool is_schedule_equal(const vector& first, const vector& second) { bool is_equal = first.size() == second.size(); for (uint32_t i = 0; i < first.size(); i++) { is_equal = is_equal && first.at(i) == second.at(i); @@ -209,14 +205,14 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_promotion_test, TESTER ) try { produce_block(); } - auto compare_schedules = [&]( const vector& a, const producer_schedule_type& b ) { + auto compare_schedules = [&]( const vector& a, const producer_authority_schedule& b ) { return std::equal( a.begin(), a.end(), b.producers.begin(), b.producers.end() ); }; auto res = set_producers( {N(alice),N(bob)} ); - vector sch1 = { - {N(alice), get_public_key(N(alice), "active")}, - {N(bob), get_public_key(N(bob), "active")} + vector sch1 = { + producer_authority{N(alice), block_signing_authority_v0{1, {{get_public_key(N(alice), "active"), 1}}}}, + producer_authority{N(bob), block_signing_authority_v0{1, {{get_public_key(N(bob), "active"), 1}}}} }; //wdump((fc::json::to_pretty_string(res))); wlog("set producer schedule to [alice,bob]"); @@ -234,10 +230,10 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_promotion_test, TESTER ) try { produce_blocks(6); res = set_producers( {N(alice),N(bob),N(carol)} ); - vector sch2 = { - {N(alice), get_public_key(N(alice), "active")}, - {N(bob), get_public_key(N(bob), "active")}, - {N(carol), get_public_key(N(carol), "active")} + vector sch2 = { + producer_authority{N(alice), block_signing_authority_v0{1, {{get_public_key(N(alice), "active"),1}}}}, + producer_authority{N(bob), block_signing_authority_v0{1, {{get_public_key(N(bob), "active"),1}}}}, + producer_authority{N(carol), block_signing_authority_v0{1, {{get_public_key(N(carol), "active"),1}}}} }; wlog("set producer schedule to [alice,bob,carol]"); BOOST_REQUIRE_EQUAL( true, control->proposed_producers().valid() ); @@ -274,15 +270,15 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_reduction, tester ) try { produce_block(); } - auto compare_schedules = [&]( const vector& a, const producer_schedule_type& b ) { + auto compare_schedules = [&]( const vector& a, const producer_authority_schedule& b ) { return std::equal( a.begin(), a.end(), b.producers.begin(), b.producers.end() ); }; auto res = set_producers( {N(alice),N(bob),N(carol)} ); - vector sch1 = { - {N(alice), get_public_key(N(alice), "active")}, - {N(bob), get_public_key(N(bob), "active")}, - {N(carol), get_public_key(N(carol), "active")} + vector sch1 = { + producer_authority{N(alice), block_signing_authority_v0{ 1, {{get_public_key(N(alice), "active"),1}}}}, + producer_authority{N(bob), block_signing_authority_v0{ 1, {{get_public_key(N(bob), "active"),1}}}}, + producer_authority{N(carol), block_signing_authority_v0{ 1, {{get_public_key(N(carol), "active"),1}}}} }; wlog("set producer schedule to [alice,bob,carol]"); BOOST_REQUIRE_EQUAL( true, control->proposed_producers().valid() ); @@ -299,9 +295,9 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_reduction, tester ) try { produce_blocks(6); res = set_producers( {N(alice),N(bob)} ); - vector sch2 = { - {N(alice), get_public_key(N(alice), "active")}, - {N(bob), get_public_key(N(bob), "active")} + vector sch2 = { + producer_authority{N(alice), block_signing_authority_v0{ 1, {{ get_public_key(N(alice), "active"),1}}}}, + producer_authority{N(bob), block_signing_authority_v0{ 1, {{ get_public_key(N(bob), "active"),1}}}} }; wlog("set producer schedule to [alice,bob]"); BOOST_REQUIRE_EQUAL( true, control->proposed_producers().valid() ); @@ -328,7 +324,8 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_reduction, tester ) try { } FC_LOG_AND_RETHROW() BOOST_AUTO_TEST_CASE( empty_producer_schedule_has_no_effect ) try { - validating_tester c( validating_tester::default_config() ); + fc::temp_directory tempdir; + validating_tester c( tempdir, true ); c.execute_setup_policy( setup_policy::preactivate_feature_and_new_bios ); c.create_accounts( {N(alice),N(bob),N(carol)} ); @@ -336,14 +333,14 @@ BOOST_AUTO_TEST_CASE( empty_producer_schedule_has_no_effect ) try { c.produce_block(); } - auto compare_schedules = [&]( const vector& a, const producer_schedule_type& b ) { + auto compare_schedules = [&]( const vector& a, const producer_authority_schedule& b ) { return std::equal( a.begin(), a.end(), b.producers.begin(), b.producers.end() ); }; - auto res = c.set_producers( {N(alice),N(bob)} ); - vector sch1 = { - {N(alice), get_public_key(N(alice), "active")}, - {N(bob), get_public_key(N(bob), "active")} + auto res = c.set_producers_legacy( {N(alice),N(bob)} ); + vector sch1 = { + producer_authority{N(alice), block_signing_authority_v0{ 1, {{ get_public_key(N(alice), "active"),1}}}}, + producer_authority{N(bob), block_signing_authority_v0{ 1, {{ get_public_key(N(bob), "active"),1}}}} }; wlog("set producer schedule to [alice,bob]"); BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); @@ -362,7 +359,7 @@ BOOST_AUTO_TEST_CASE( empty_producer_schedule_has_no_effect ) try { BOOST_CHECK_EQUAL( true, compare_schedules( sch1, c.control->active_producers() ) ); c.produce_blocks(6); - res = c.set_producers( {} ); + res = c.set_producers_legacy( {} ); wlog("set producer schedule to []"); BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); BOOST_CHECK_EQUAL( c.control->proposed_producers()->producers.size(), 0u ); @@ -382,11 +379,11 @@ BOOST_AUTO_TEST_CASE( empty_producer_schedule_has_no_effect ) try { BOOST_CHECK_EQUAL( c.control->pending_producers().version, 2u ); // Setting a new producer schedule should still use version 2 - res = c.set_producers( {N(alice),N(bob),N(carol)} ); - vector sch2 = { - {N(alice), get_public_key(N(alice), "active")}, - {N(bob), get_public_key(N(bob), "active")}, - {N(carol), get_public_key(N(carol), "active")} + res = c.set_producers_legacy( {N(alice),N(bob),N(carol)} ); + vector sch2 = { + producer_authority{N(alice), block_signing_authority_v0{ 1, {{get_public_key(N(alice), "active"),1}}}}, + producer_authority{N(bob), block_signing_authority_v0{ 1, {{get_public_key(N(bob), "active"),1}}}}, + producer_authority{N(carol), block_signing_authority_v0{ 1, {{get_public_key(N(carol), "active"),1}}}} }; wlog("set producer schedule to [alice,bob,carol]"); BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); @@ -413,15 +410,15 @@ BOOST_AUTO_TEST_CASE( producer_watermark_test ) try { c.create_accounts( {N(alice),N(bob),N(carol)} ); c.produce_block(); - auto compare_schedules = [&]( const vector& a, const producer_schedule_type& b ) { + auto compare_schedules = [&]( const vector& a, const producer_authority_schedule& b ) { return std::equal( a.begin(), a.end(), b.producers.begin(), b.producers.end() ); }; auto res = c.set_producers( {N(alice),N(bob),N(carol)} ); - vector sch1 = { - {N(alice), c.get_public_key(N(alice), "active")}, - {N(bob), c.get_public_key(N(bob), "active")}, - {N(carol), c.get_public_key(N(carol), "active")} + vector sch1 = { + producer_authority{N(alice), block_signing_authority_v0{ 1, {{c.get_public_key(N(alice), "active"),1}}}}, + producer_authority{N(bob), block_signing_authority_v0{ 1, {{c.get_public_key(N(bob), "active"),1}}}}, + producer_authority{N(carol), block_signing_authority_v0{ 1, {{c.get_public_key(N(carol), "active"),1}}}} }; wlog("set producer schedule to [alice,bob,carol]"); BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); @@ -436,29 +433,29 @@ BOOST_AUTO_TEST_CASE( producer_watermark_test ) try { BOOST_REQUIRE_EQUAL( c.control->active_producers().version, 1u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch1, c.control->active_producers() ) ); - produce_empty_blocks_until( c, N(carol), N(alice) ); + produce_until_transition( c, N(carol), N(alice) ); c.produce_block(); - produce_empty_blocks_until( c, N(carol), N(alice) ); + produce_until_transition( c, N(carol), N(alice) ); res = c.set_producers( {N(alice),N(bob)} ); - vector sch2 = { - {N(alice), c.get_public_key(N(alice), "active")}, - {N(bob), c.get_public_key(N(bob), "active")} + vector sch2 = { + producer_authority{N(alice), block_signing_authority_v0{ 1, {{c.get_public_key(N(alice), "active"),1}}}}, + producer_authority{N(bob), block_signing_authority_v0{ 1, {{c.get_public_key(N(bob), "active"),1}}}} }; wlog("set producer schedule to [alice,bob]"); BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); BOOST_CHECK_EQUAL( true, compare_schedules( sch2, *c.control->proposed_producers() ) ); - produce_empty_blocks_until( c, N(bob), N(carol) ); - produce_empty_blocks_until( c, N(alice), N(bob) ); + produce_until_transition( c, N(bob), N(carol) ); + produce_until_transition( c, N(alice), N(bob) ); BOOST_CHECK_EQUAL( c.control->pending_producers().version, 2u ); BOOST_CHECK_EQUAL( c.control->active_producers().version, 1u ); - produce_empty_blocks_until( c, N(carol), N(alice) ); + produce_until_transition( c, N(carol), N(alice) ); BOOST_CHECK_EQUAL( c.control->pending_producers().version, 2u ); BOOST_CHECK_EQUAL( c.control->active_producers().version, 1u ); - produce_empty_blocks_until( c, N(bob), N(carol) ); + produce_until_transition( c, N(bob), N(carol) ); BOOST_CHECK_EQUAL( c.control->pending_block_producer(), N(carol) ); BOOST_REQUIRE_EQUAL( c.control->active_producers().version, 2u ); @@ -473,15 +470,37 @@ BOOST_AUTO_TEST_CASE( producer_watermark_test ) try { BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); BOOST_CHECK_EQUAL( true, compare_schedules( sch1, *c.control->proposed_producers() ) ); - produce_empty_blocks_until( c, N(bob), N(alice) ); - produce_empty_blocks_until( c, N(alice), N(bob) ); + produce_until_transition( c, N(bob), N(alice) ); + + auto bob_last_produced_block_num = c.control->head_block_num(); + wdump((bob_last_produced_block_num)); + + produce_until_transition( c, N(alice), N(bob) ); + + auto alice_last_produced_block_num = c.control->head_block_num(); + wdump((alice_last_produced_block_num)); + + { + wdump((c.control->head_block_state()->producer_to_last_produced)); + const auto& last_produced = c.control->head_block_state()->producer_to_last_produced; + auto alice_itr = last_produced.find( N(alice) ); + BOOST_REQUIRE( alice_itr != last_produced.end() ); + BOOST_CHECK_EQUAL( alice_itr->second, alice_last_produced_block_num ); + auto bob_itr = last_produced.find( N(bob) ); + BOOST_REQUIRE( bob_itr != last_produced.end() ); + BOOST_CHECK_EQUAL( bob_itr->second, bob_last_produced_block_num ); + auto carol_itr = last_produced.find( N(carol) ); + BOOST_REQUIRE( carol_itr != last_produced.end() ); + BOOST_CHECK_EQUAL( carol_itr->second, carol_last_produced_block_num ); + } + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 3u ); BOOST_REQUIRE_EQUAL( c.control->active_producers().version, 2u ); - produce_empty_blocks_until( c, N(bob), N(alice) ); + produce_until_transition( c, N(bob), N(alice) ); BOOST_REQUIRE_EQUAL( c.control->active_producers().version, 3u ); - produce_empty_blocks_until( c, N(alice), N(bob) ); + produce_until_transition( c, N(alice), N(bob) ); c.produce_blocks(11); BOOST_CHECK_EQUAL( c.control->pending_block_producer(), N(bob) ); c.finish_block(); @@ -498,8 +517,152 @@ BOOST_AUTO_TEST_CASE( producer_watermark_test ) try { BOOST_CHECK_EQUAL( h.producer, N(carol) ); BOOST_CHECK_EQUAL( h.confirmed, confirmed ); - produce_empty_blocks_until( c, N(carol), N(alice) ); + produce_until_transition( c, N(carol), N(alice) ); + +} FC_LOG_AND_RETHROW() + +BOOST_FIXTURE_TEST_CASE( producer_one_of_n_test, TESTER ) try { + create_accounts( {N(alice),N(bob)} ); + produce_block(); + + vector sch1 = { + producer_authority{N(alice), block_signing_authority_v0{1, {{get_public_key(N(alice), "bs1"), 1}, {get_public_key(N(alice), "bs2"), 1}}}}, + producer_authority{N(bob), block_signing_authority_v0{1, {{get_public_key(N(bob), "bs1"), 1}, {get_public_key(N(bob), "bs2"), 1}}}} + }; + + auto res = set_producer_schedule( sch1 ); + block_signing_private_keys.emplace(get_public_key(N(alice), "bs1"), get_private_key(N(alice), "bs1")); + block_signing_private_keys.emplace(get_public_key(N(bob), "bs1"), get_private_key(N(bob), "bs1")); + + BOOST_REQUIRE(produce_until_blocks_from(*this, {N(alice), N(bob)}, 300)); + + BOOST_REQUIRE_EQUAL( validate(), true ); +} FC_LOG_AND_RETHROW() + +BOOST_FIXTURE_TEST_CASE( producer_m_of_n_test, TESTER ) try { + create_accounts( {N(alice),N(bob)} ); + produce_block(); + + + vector sch1 = { + producer_authority{N(alice), block_signing_authority_v0{2, {{get_public_key(N(alice), "bs1"), 1}, {get_public_key(N(alice), "bs2"), 1}}}}, + producer_authority{N(bob), block_signing_authority_v0{2, {{get_public_key(N(bob), "bs1"), 1}, {get_public_key(N(bob), "bs2"), 1}}}} + }; + + auto res = set_producer_schedule( sch1 ); + block_signing_private_keys.emplace(get_public_key(N(alice), "bs1"), get_private_key(N(alice), "bs1")); + block_signing_private_keys.emplace(get_public_key(N(alice), "bs2"), get_private_key(N(alice), "bs2")); + block_signing_private_keys.emplace(get_public_key(N(bob), "bs1"), get_private_key(N(bob), "bs1")); + block_signing_private_keys.emplace(get_public_key(N(bob), "bs2"), get_private_key(N(bob), "bs2")); + + BOOST_REQUIRE(produce_until_blocks_from(*this, {N(alice), N(bob)}, 300)); + + BOOST_REQUIRE_EQUAL( validate(), true ); +} FC_LOG_AND_RETHROW() + +BOOST_FIXTURE_TEST_CASE( satisfiable_msig_test, TESTER ) try { + create_accounts( {N(alice),N(bob)} ); + produce_block(); + + vector sch1 = { + producer_authority{N(alice), block_signing_authority_v0{2, {{get_public_key(N(alice), "bs1"), 1}}}} + }; + + // ensure that the entries in a wtmsig schedule are rejected if not satisfiable + BOOST_REQUIRE_EXCEPTION( + set_producer_schedule( sch1 ), wasm_execution_error, + fc_exception_message_is( "producer schedule includes an unsatisfiable authority for alice" ) + ); + + BOOST_REQUIRE_EQUAL( false, control->proposed_producers().valid() ); + +} FC_LOG_AND_RETHROW() + +BOOST_FIXTURE_TEST_CASE( duplicate_producers_test, TESTER ) try { + create_accounts( {N(alice)} ); + produce_block(); + + vector sch1 = { + producer_authority{N(alice), block_signing_authority_v0{1, {{get_public_key(N(alice), "bs1"), 1}}}}, + producer_authority{N(alice), block_signing_authority_v0{1, {{get_public_key(N(alice), "bs2"), 1}}}} + }; + + // ensure that the schedule is rejected if it has duplicate producers in it + BOOST_REQUIRE_EXCEPTION( + set_producer_schedule( sch1 ), wasm_execution_error, + fc_exception_message_is( "duplicate producer name in producer schedule" ) + ); + + BOOST_REQUIRE_EQUAL( false, control->proposed_producers().valid() ); + +} FC_LOG_AND_RETHROW() + +BOOST_FIXTURE_TEST_CASE( duplicate_keys_test, TESTER ) try { + create_accounts( {N(alice),N(bob)} ); + produce_block(); + + vector sch1 = { + producer_authority{N(alice), block_signing_authority_v0{2, {{get_public_key(N(alice), "bs1"), 1}, {get_public_key(N(alice), "bs1"), 1}}}} + }; + + // ensure that the schedule is rejected if it has duplicate keys for a single producer in it + BOOST_REQUIRE_EXCEPTION( + set_producer_schedule( sch1 ), wasm_execution_error, + fc_exception_message_is( "producer schedule includes a duplicated key for alice" ) + ); + + BOOST_REQUIRE_EQUAL( false, control->proposed_producers().valid() ); + + // ensure that multiple producers are allowed to share keys + vector sch2 = { + producer_authority{N(alice), block_signing_authority_v0{1, {{get_public_key(N(alice), "bs1"), 1}}}}, + producer_authority{N(bob), block_signing_authority_v0{1, {{get_public_key(N(alice), "bs1"), 1}}}} + }; + + set_producer_schedule( sch2 ); + BOOST_REQUIRE_EQUAL( true, control->proposed_producers().valid() ); +} FC_LOG_AND_RETHROW() + +BOOST_AUTO_TEST_CASE( large_authority_overflow_test ) try { + + block_signing_authority_v0 auth; + { // create a large authority that should overflow + const size_t pre_overflow_count = 65'537UL; // enough for weights of 0xFFFF to add up to 0xFFFFFFFF + auth.keys.reserve(pre_overflow_count + 1); + + for (int i = 0; i < pre_overflow_count; i++) { + auto key_str = std::to_string(i) + "_bsk"; + auth.keys.emplace_back(key_weight{get_public_key(N(alice), key_str), 0xFFFFU}); + } + + // reduce the last weight by 1 so that its unsatisfiable + auth.keys.back().weight = 0xFFFEU; + + // add one last key with a weight of 2 so that its only satisfiable with values that sum to an overflow of 32bit uint + auth.keys.emplace_back(key_weight{get_public_key(N(alice), std::to_string(pre_overflow_count) + "_bsk"), 0x0002U}); + + auth.threshold = 0xFFFFFFFFUL; + } + + std::set provided_keys; + { // construct a set of all keys to provide + for( const auto& kw: auth.keys) { + provided_keys.emplace(kw.key); + } + } + + { // prove the naive accumulation overflows + uint32_t total = 0; + for( const auto& kw: auth.keys) { + total += kw.weight; + } + BOOST_REQUIRE_EQUAL(total, 0x0UL); + } + + auto res = auth.keys_satisfy_and_relevant(provided_keys); + BOOST_REQUIRE_EQUAL(res.first, true); + BOOST_REQUIRE_EQUAL(res.second, provided_keys.size()); } FC_LOG_AND_RETHROW() BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 513bd91c650..4874d4e5354 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ #include #include #include @@ -19,6 +15,7 @@ using namespace eosio::chain; using namespace eosio::testing; +using namespace std::literals; BOOST_AUTO_TEST_SUITE(protocol_feature_tests) @@ -52,8 +49,7 @@ BOOST_AUTO_TEST_CASE( activate_preactivate_feature ) try { c.produce_block(); // Now the latest bios contract can be set. - c.set_code( config::system_account_name, contracts::eosio_bios_wasm() ); - c.set_abi( config::system_account_name, contracts::eosio_bios_abi().data() ); + c.set_before_producer_authority_bios_contract(); c.produce_block(); @@ -96,7 +92,7 @@ BOOST_AUTO_TEST_CASE( activate_and_restart ) try { BOOST_CHECK( c.control->is_builtin_activated( builtin_protocol_feature_t::preactivate_feature ) ); c.close(); - c.open( std::move( pfs ), nullptr ); + c.open( std::move( pfs ) ); BOOST_CHECK_EQUAL( head_block_num, c.control->head_block_num() ); @@ -236,7 +232,7 @@ BOOST_AUTO_TEST_CASE( only_link_to_existing_permission_test ) try { ("account", "alice") ("permission", "test") ("parent", "active") - ("auth", authority(get_public_key("testapi", "test"))) + ("auth", authority(get_public_key(name("testapi"), "test"))) ); c.produce_block(); @@ -270,7 +266,7 @@ BOOST_AUTO_TEST_CASE( subjective_restrictions_test ) try { auto restart_with_new_pfs = [&c]( protocol_feature_set&& pfs ) { c.close(); - c.open(std::move(pfs), nullptr); + c.open(std::move(pfs)); }; auto get_builtin_digest = [&pfm]( builtin_protocol_feature_t codename ) -> digest_type { @@ -332,7 +328,7 @@ BOOST_AUTO_TEST_CASE( subjective_restrictions_test ) try { // Second, test subjective_restrictions on feature that need to be activated WITH preactivation (ONLY_LINK_TO_EXISTING_PERMISSION) - c.set_bios_contract(); + c.set_before_producer_authority_bios_contract(); c.produce_block(); custom_subjective_restrictions = { @@ -412,7 +408,7 @@ BOOST_AUTO_TEST_CASE( replace_deferred_test ) try { c.close(); auto cfg = c.get_config(); cfg.disable_all_subjective_mitigations = true; - c.init( cfg, nullptr ); + c.init( cfg ); BOOST_CHECK_EQUAL( c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ), alice_ram_usage0 ); @@ -455,7 +451,7 @@ BOOST_AUTO_TEST_CASE( replace_deferred_test ) try { c.close(); cfg.disable_all_subjective_mitigations = false; - c.init( cfg, nullptr ); + c.init( cfg ); const auto& pfm = c.control->get_protocol_feature_manager(); @@ -660,7 +656,7 @@ BOOST_AUTO_TEST_CASE( no_duplicate_deferred_id_test ) try { check_generation_context( index.begin()->packed_trx, trace2->id, - ((static_cast(N(alice)) << 64) | 1), + ((static_cast(N(alice).to_uint64_t()) << 64) | 1), N(test) ); c.produce_block(); @@ -678,7 +674,7 @@ BOOST_AUTO_TEST_CASE( no_duplicate_deferred_id_test ) try { check_generation_context( index.begin()->packed_trx, trace3->id, - ((static_cast(N(alice)) << 64) | 1), + ((static_cast(N(alice).to_uint64_t()) << 64) | 1), N(test) ); c.produce_block(); @@ -763,21 +759,21 @@ BOOST_AUTO_TEST_CASE( disallow_empty_producer_schedule_test ) { try { BOOST_REQUIRE( d ); // Before activation, it is allowed to set empty producer schedule - c.set_producers( {} ); + c.set_producers_legacy( {} ); // After activation, it should not be allowed c.preactivate_protocol_features( {*d} ); c.produce_block(); - BOOST_REQUIRE_EXCEPTION( c.set_producers( {} ), + BOOST_REQUIRE_EXCEPTION( c.set_producers_legacy( {} ), wasm_execution_error, fc_exception_message_is( "Producer schedule cannot be empty" ) ); // Setting non empty producer schedule should still be fine vector producer_names = {N(alice),N(bob),N(carol)}; c.create_accounts( producer_names ); - c.set_producers( producer_names ); + c.set_producers_legacy( producer_names ); c.produce_blocks(2); - const auto& schedule = c.get_producer_keys( producer_names ); + const auto& schedule = c.get_producer_authorities( producer_names ); BOOST_CHECK( std::equal( schedule.begin(), schedule.end(), c.control->active_producers().producers.begin()) ); } FC_LOG_AND_RETHROW() } @@ -969,7 +965,7 @@ BOOST_AUTO_TEST_CASE( forward_setcode_test ) { try { const auto& pfm = c.control->get_protocol_feature_manager(); const auto& d = pfm.get_builtin_digest( builtin_protocol_feature_t::forward_setcode ); BOOST_REQUIRE( d ); - c.set_bios_contract(); + c.set_before_producer_authority_bios_contract(); c.preactivate_protocol_features( {*d} ); c.produce_block(); c.set_code( config::system_account_name, contracts::reject_all_wasm() ); @@ -985,7 +981,7 @@ BOOST_AUTO_TEST_CASE( forward_setcode_test ) { try { tester c2(setup_policy::none); push_blocks( c, c2 ); // make a backup of the chain to enable testing further conditions. - c.set_bios_contract(); // To allow pushing further actions for setting up the other part of the test. + c.set_before_producer_authority_bios_contract(); // To allow pushing further actions for setting up the other part of the test. c.create_account( N(rejectall) ); c.produce_block(); // The existence of the rejectall account will make the reject_all contract reject all actions with no exception. @@ -1005,7 +1001,7 @@ BOOST_AUTO_TEST_CASE( forward_setcode_test ) { try { // However, it should still be possible to set the bios contract because the WASM on eosio is called after the // native setcode function completes. - c2.set_bios_contract(); + c2.set_before_producer_authority_bios_contract(); c2.produce_block(); } FC_LOG_AND_RETHROW() } @@ -1180,7 +1176,7 @@ BOOST_AUTO_TEST_CASE( ram_restrictions_test ) { try { c.close(); auto cfg = c.get_config(); cfg.disable_all_subjective_mitigations = true; - c.init( cfg, nullptr ); + c.init( cfg ); c.produce_block(); @@ -1205,7 +1201,7 @@ BOOST_AUTO_TEST_CASE( ram_restrictions_test ) { try { // Re-enable the subjective mitigation c.close(); cfg.disable_all_subjective_mitigations = false; - c.init( cfg, nullptr ); + c.init( cfg ); c.produce_block(); @@ -1328,4 +1324,419 @@ BOOST_AUTO_TEST_CASE( ram_restrictions_test ) { try { } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE( webauthn_producer ) { try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest( builtin_protocol_feature_t::webauthn_key ); + BOOST_REQUIRE( d ); + + c.create_account(N(waprod)); + c.produce_block(); + + vector waprodsched = {{N(waprod), public_key_type("PUB_WA_WdCPfafVNxVMiW5ybdNs83oWjenQXvSt1F49fg9mv7qrCiRwHj5b38U3ponCFWxQTkDsMC"s)}}; + + BOOST_CHECK_THROW( + c.push_action(config::system_account_name, N(setprods), config::system_account_name, fc::mutable_variant_object()("schedule", waprodsched)), + eosio::chain::unactivated_key_type + ); + + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + + c.push_action(config::system_account_name, N(setprods), config::system_account_name, fc::mutable_variant_object()("schedule", waprodsched)); +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( webauthn_create_account ) { try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest(builtin_protocol_feature_t::webauthn_key); + BOOST_REQUIRE(d); + + signed_transaction trx; + c.set_transaction_headers(trx); + authority auth = authority(public_key_type("PUB_WA_WdCPfafVNxVMiW5ybdNs83oWjenQXvSt1F49fg9mv7qrCiRwHj5b38U3ponCFWxQTkDsMC"s)); + + trx.actions.emplace_back(vector{{config::system_account_name,config::active_name}}, + newaccount{ + .creator = config::system_account_name, + .name = N(waaccount), + .owner = auth, + .active = auth, + }); + + c.set_transaction_headers(trx); + trx.sign(get_private_key(config::system_account_name, "active"), c.control->get_chain_id()); + BOOST_CHECK_THROW(c.push_transaction(trx), eosio::chain::unactivated_key_type); + + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + c.push_transaction(trx); +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( webauthn_update_account_auth ) { try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest(builtin_protocol_feature_t::webauthn_key); + BOOST_REQUIRE(d); + + c.create_account(N(billy)); + c.produce_block(); + + BOOST_CHECK_THROW(c.set_authority(N(billy), config::active_name, + authority(public_key_type("PUB_WA_WdCPfafVNxVMiW5ybdNs83oWjenQXvSt1F49fg9mv7qrCiRwHj5b38U3ponCFWxQTkDsMC"s))), + eosio::chain::unactivated_key_type); + + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + + c.set_authority(N(billy), config::active_name, authority(public_key_type("PUB_WA_WdCPfafVNxVMiW5ybdNs83oWjenQXvSt1F49fg9mv7qrCiRwHj5b38U3ponCFWxQTkDsMC"s))); +} FC_LOG_AND_RETHROW() } + +/* + For the following two recover_key wasm tests: + Given digest a2256be721c7d090ba13d6c37eee2a06c663473a40950d4f64f0a762dbe13d49 + And signature SIG_WA_2AAAuLJS3pLPgkQQPqLsehL6VeRBaAZS7NYM91UYRUrSAEfUvzKN7DCSwhjsDqe74cZNWKUU + GAHGG8ddSA7cvUxChbfKxLSrDCpwe6MVUqz4PDdyCt5tXhEJmKekxG1o1ucY3LVj8Vi9rRbzAkKPCzW + qC8cPcUtpLHNG8qUKkQrN4Xuwa9W8rsBiUKwZv1ToLyVhLrJe42pvHYBXicp4E8qec5E4m6SX11KuXE + RFcV48Mhiie2NyaxdtNtNzQ5XZ5hjBkxRujqejpF4SNHvdAGKRBbvhkiPLA25FD3xoCbrN26z72 + Should recover to pubkey PUB_WA_WdCPfafVNxVMiW5ybdNs83oWjenQXvSt1F49fg9mv7qrCiRwHj5b38U3ponCFWxQTkDsMC +*/ + +static const char webauthn_recover_key_wast[] = R"=====( +(module + (import "env" "recover_key" (func $recover_key (param i32 i32 i32 i32 i32) (result i32))) + (memory $0 1) + (export "apply" (func $apply)) + (func $apply (param $0 i64) (param $1 i64) (param $2 i64) + (drop + (call $recover_key + (i32.const 8) + (i32.const 40) + (i32.const 220) + (i32.const 1024) + (i32.const 1024) + ) + ) + ) + (data (i32.const 8) "\a2\25\6b\e7\21\c7\d0\90\ba\13\d6\c3\7e\ee\2a\06\c6\63\47\3a\40\95\0d\4f\64\f0\a7\62\db\e1\3d\49") + (data (i32.const 40) "\02\20\D9\13\2B\BD\B2\19\E4\E2\D9\9A\F9\C5\07\E3\59\7F\86\B6\15\81\4F\36\67\2D\50\10\34\86\17\92\BB\CF\21\A4\6D\1A\2E\B1\2B\AC\E4\A2\91\00\B9\42\F9\87\49\4F\3A\EF\C8\EF\B2\D5\AF\4D\4D\8D\E3\E0\87\15\25\AA\14\90\5A\F6\0C\A1\7A\1B\B8\0E\0C\F9\C3\B4\69\08\A0\F1\4F\72\56\7A\2F\14\0C\3A\3B\D2\EF\07\4C\01\00\00\00\6D\73\7B\22\6F\72\69\67\69\6E\22\3A\22\68\74\74\70\73\3A\2F\2F\6B\65\6F\73\64\2E\69\6E\76\61\6C\69\64\22\2C\22\74\79\70\65\22\3A\22\77\65\62\61\75\74\68\6E\2E\67\65\74\22\2C\22\63\68\61\6C\6C\65\6E\67\65\22\3A\22\6F\69\56\72\35\79\48\48\30\4A\43\36\45\39\62\44\66\75\34\71\42\73\5A\6A\52\7A\70\41\6C\51\31\50\5A\50\43\6E\59\74\76\68\50\55\6B\3D\22\7D") +) +)====="; + +BOOST_AUTO_TEST_CASE( webauthn_recover_key ) { try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest(builtin_protocol_feature_t::webauthn_key); + BOOST_REQUIRE(d); + + c.create_account(N(bob)); + c.set_code(N(bob), webauthn_recover_key_wast); + c.produce_block(); + + signed_transaction trx; + action act; + act.account = N(bob); + act.name = N(); + act.authorization = vector{{N(bob),config::active_name}}; + trx.actions.push_back(act); + + c.set_transaction_headers(trx); + trx.sign(c.get_private_key( N(bob), "active" ), c.control->get_chain_id()); + BOOST_CHECK_THROW(c.push_transaction(trx), eosio::chain::unactivated_signature_type); + + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + c.push_transaction(trx); + +} FC_LOG_AND_RETHROW() } + +static const char webauthn_assert_recover_key_wast[] = R"=====( +(module + (import "env" "assert_recover_key" (func $assert_recover_key (param i32 i32 i32 i32 i32))) + (memory $0 1) + (export "apply" (func $apply)) + (func $apply (param $0 i64) (param $1 i64) (param $2 i64) + (call $assert_recover_key + (i32.const 8) + (i32.const 40) + (i32.const 220) + (i32.const 1024) + (i32.const 1024) + ) + ) + (data (i32.const 8) "\a2\25\6b\e7\21\c7\d0\90\ba\13\d6\c3\7e\ee\2a\06\c6\63\47\3a\40\95\0d\4f\64\f0\a7\62\db\e1\3d\49") + (data (i32.const 40) "\02\20\D9\13\2B\BD\B2\19\E4\E2\D9\9A\F9\C5\07\E3\59\7F\86\B6\15\81\4F\36\67\2D\50\10\34\86\17\92\BB\CF\21\A4\6D\1A\2E\B1\2B\AC\E4\A2\91\00\B9\42\F9\87\49\4F\3A\EF\C8\EF\B2\D5\AF\4D\4D\8D\E3\E0\87\15\25\AA\14\90\5A\F6\0C\A1\7A\1B\B8\0E\0C\F9\C3\B4\69\08\A0\F1\4F\72\56\7A\2F\14\0C\3A\3B\D2\EF\07\4C\01\00\00\00\6D\73\7B\22\6F\72\69\67\69\6E\22\3A\22\68\74\74\70\73\3A\2F\2F\6B\65\6F\73\64\2E\69\6E\76\61\6C\69\64\22\2C\22\74\79\70\65\22\3A\22\77\65\62\61\75\74\68\6E\2E\67\65\74\22\2C\22\63\68\61\6C\6C\65\6E\67\65\22\3A\22\6F\69\56\72\35\79\48\48\30\4A\43\36\45\39\62\44\66\75\34\71\42\73\5A\6A\52\7A\70\41\6C\51\31\50\5A\50\43\6E\59\74\76\68\50\55\6B\3D\22\7D") + (data (i32.const 1024) "\02\02\20\B9\DA\B5\12\E8\92\39\2A\44\A9\F4\1F\94\33\C9\FB\D8\0D\B8\64\E9\DF\58\89\C2\40\7D\B3\AC\BB\9F\01\0D\6B\65\6F\73\64\2E\69\6E\76\61\6C\69\64") +) +)====="; + +BOOST_AUTO_TEST_CASE( webauthn_assert_recover_key ) { try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest(builtin_protocol_feature_t::webauthn_key); + BOOST_REQUIRE(d); + + c.create_account(N(bob)); + c.set_code(N(bob), webauthn_assert_recover_key_wast); + c.produce_block(); + + signed_transaction trx; + action act; + act.account = N(bob); + act.name = N(); + act.authorization = vector{{N(bob),config::active_name}}; + trx.actions.push_back(act); + + c.set_transaction_headers(trx); + trx.sign(c.get_private_key( N(bob), "active" ), c.control->get_chain_id()); + BOOST_CHECK_THROW(c.push_transaction(trx), eosio::chain::unactivated_signature_type); + + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + c.push_transaction(trx); + +} FC_LOG_AND_RETHROW() } + +static const char import_set_proposed_producer_ex_wast[] = R"=====( +(module + (import "env" "set_proposed_producers_ex" (func $set_proposed_producers_ex (param i64 i32 i32) (result i64))) + (memory $0 1) + (export "apply" (func $apply)) + (func $apply (param $0 i64) (param $1 i64) (param $2 i64) + (drop + (call $set_proposed_producers_ex + (i64.const 0) + (i32.const 0) + (i32.const 43) + ) + ) + ) + (data (i32.const 8) "\01\00\00\00\00\00\85\5C\34\00\03\EB\CF\44\B4\5A\71\D4\F2\25\76\8F\60\2D\1E\2E\2B\25\EF\77\9E\E9\89\7F\E7\44\BF\1A\16\E8\54\23\D5") +) +)====="; + +BOOST_AUTO_TEST_CASE( set_proposed_producers_ex_test ) { try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest(builtin_protocol_feature_t::wtmsig_block_signatures); + BOOST_REQUIRE(d); + + const auto& alice_account = account_name("alice"); + c.create_accounts( {alice_account} ); + c.produce_block(); + + BOOST_CHECK_EXCEPTION( c.set_code( alice_account, import_set_proposed_producer_ex_wast ), + wasm_exception, + fc_exception_message_is( "env.set_proposed_producers_ex unresolveable" ) ); + + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + + // ensure it now resolves + c.set_code( alice_account, import_set_proposed_producer_ex_wast ); + + // ensure it requires privilege + BOOST_REQUIRE_EQUAL( + c.push_action(action({{ alice_account, permission_name("active") }}, alice_account, action_name(), {} ), alice_account.to_uint64_t()), + "alice does not have permission to call this API" + ); + + c.push_action(config::system_account_name, N(setpriv), config::system_account_name, fc::mutable_variant_object()("account", alice_account)("is_priv", 1)); + + //ensure it can be called w/ privilege + BOOST_REQUIRE_EQUAL(c.push_action(action({{ alice_account, permission_name("active") }}, alice_account, action_name(), {} ), alice_account.to_uint64_t()), c.success()); + + c.produce_block(); +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( producer_schedule_change_extension_test ) { try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest(builtin_protocol_feature_t::wtmsig_block_signatures); + BOOST_REQUIRE(d); + + c.produce_blocks(2); + + // sync a remote node into this chain + tester remote( setup_policy::none ); + push_blocks(c, remote); + + // activate the feature + // there is a 1 block delay before header-only validation (which is responsible for extension validation) can be + // aware of the activation. So the expectation is that if it is activated in the _state_ at block N, block N + 1 + // will bear an extension making header-only validators aware of it, and therefore block N + 2 is the first block + // where a block may bear a downstream extension. + c.preactivate_protocol_features( {*d} ); + remote.push_block(c.produce_block()); + + auto last_legacy_block = c.produce_block(); + + + { // ensure producer_schedule_change_extension is rejected + const auto& hbs = remote.control->head_block_state(); + + // create a bad block that has the producer schedule change extension before the feature upgrade + auto bad_block = std::make_shared(last_legacy_block->clone()); + emplace_extension( + bad_block->header_extensions, + producer_schedule_change_extension::extension_id(), + fc::raw::pack(std::make_pair(hbs->active_schedule.version + 1, std::vector{})) + ); + + // re-sign the bad block + auto header_bmroot = digest_type::hash( std::make_pair( bad_block->digest(), remote.control->head_block_state()->blockroot_merkle ) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state()->pending_schedule.schedule_hash) ); + bad_block->producer_signature = remote.get_private_key(N(eosio), "active").sign(sig_digest); + + // ensure it is rejected as an unknown extension + BOOST_REQUIRE_EXCEPTION( + remote.push_block(bad_block), producer_schedule_exception, + fc_exception_message_is( "Block header producer_schedule_change_extension before activation of WTMsig Block Signatures" ) + ); + } + + { // ensure that non-null new_producers is accepted (and fails later in validation) + const auto& hbs = remote.control->head_block_state(); + + // create a bad block that has the producer schedule change extension before the feature upgrade + auto bad_block = std::make_shared(last_legacy_block->clone()); + bad_block->new_producers = legacy::producer_schedule_type{hbs->active_schedule.version + 1, {}}; + + // re-sign the bad block + auto header_bmroot = digest_type::hash( std::make_pair( bad_block->digest(), remote.control->head_block_state()->blockroot_merkle ) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state()->pending_schedule.schedule_hash) ); + bad_block->producer_signature = remote.get_private_key(N(eosio), "active").sign(sig_digest); + + // ensure it is accepted (but rejected because it doesn't match expected state) + BOOST_REQUIRE_EXCEPTION( + remote.push_block(bad_block), wrong_signing_key, + fc_exception_message_is( "block signed by unexpected key" ) + ); + } + + remote.push_block(last_legacy_block); + + // propagate header awareness of the activation. + auto first_new_block = c.produce_block(); + + { + const auto& hbs = remote.control->head_block_state(); + + // create a bad block that has the producer schedule change extension that is valid but not warranted by actions in the block + auto bad_block = std::make_shared(first_new_block->clone()); + emplace_extension( + bad_block->header_extensions, + producer_schedule_change_extension::extension_id(), + fc::raw::pack(std::make_pair(hbs->active_schedule.version + 1, std::vector{})) + ); + + // re-sign the bad block + auto header_bmroot = digest_type::hash( std::make_pair( bad_block->digest(), remote.control->head_block_state()->blockroot_merkle ) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state()->pending_schedule.schedule_hash) ); + bad_block->producer_signature = remote.get_private_key(N(eosio), "active").sign(sig_digest); + + // ensure it is rejected because it doesn't match expected state (but the extention was accepted) + BOOST_REQUIRE_EXCEPTION( + remote.push_block(bad_block), wrong_signing_key, + fc_exception_message_is( "block signed by unexpected key" ) + ); + } + + { // ensure that non-null new_producers is rejected + const auto& hbs = remote.control->head_block_state(); + + // create a bad block that has the producer schedule change extension before the feature upgrade + auto bad_block = std::make_shared(first_new_block->clone()); + bad_block->new_producers = legacy::producer_schedule_type{hbs->active_schedule.version + 1, {}}; + + // re-sign the bad block + auto header_bmroot = digest_type::hash( std::make_pair( bad_block->digest(), remote.control->head_block_state()->blockroot_merkle ) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state()->pending_schedule.schedule_hash) ); + bad_block->producer_signature = remote.get_private_key(N(eosio), "active").sign(sig_digest); + + // ensure it is rejected because the new_producers field is not null + BOOST_REQUIRE_EXCEPTION( + remote.push_block(bad_block), producer_schedule_exception, + fc_exception_message_is( "Block header contains legacy producer schedule outdated by activation of WTMsig Block Signatures" ) + ); + } + + remote.push_block(first_new_block); + remote.push_block(c.produce_block()); +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( wtmsig_block_signing_inflight_legacy_test ) { try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest(builtin_protocol_feature_t::wtmsig_block_signatures); + BOOST_REQUIRE(d); + + c.produce_blocks(2); + + // activate the feature, and start an in-flight producer schedule change with the legacy format + c.preactivate_protocol_features( {*d} ); + vector sched = {{N(eosio), c.get_public_key(N(eosio), "bsk")}}; + c.push_action(config::system_account_name, N(setprods), config::system_account_name, fc::mutable_variant_object()("schedule", sched)); + c.produce_block(); + + // ensure the last legacy block contains a new_producers + auto last_legacy_block = c.produce_block(); + BOOST_REQUIRE_EQUAL(last_legacy_block->new_producers.valid(), true); + + // promote to active schedule + c.produce_block(); + + // ensure that the next block is updated to the new schedule + BOOST_REQUIRE_EXCEPTION( c.produce_block(), no_block_signatures, fc_exception_message_is( "Signer returned no signatures" )); + c.control->abort_block(); + + c.block_signing_private_keys.emplace(get_public_key(N(eosio), "bsk"), get_private_key(N(eosio), "bsk")); + c.produce_block(); + +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE( wtmsig_block_signing_inflight_extension_test ) { try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest(builtin_protocol_feature_t::wtmsig_block_signatures); + BOOST_REQUIRE(d); + + c.produce_blocks(2); + + // activate the feature + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + + // start an in-flight producer schedule change before the activation is availble to header only validators + vector sched = {{N(eosio), c.get_public_key(N(eosio), "bsk")}}; + c.push_action(config::system_account_name, N(setprods), config::system_account_name, fc::mutable_variant_object()("schedule", sched)); + c.produce_block(); + + // ensure the first possible new block contains a producer_schedule_change_extension + auto first_new_block = c.produce_block(); + BOOST_REQUIRE_EQUAL(first_new_block->new_producers.valid(), false); + BOOST_REQUIRE_EQUAL(first_new_block->header_extensions.size(), 1); + BOOST_REQUIRE_EQUAL(first_new_block->header_extensions.at(0).first, producer_schedule_change_extension::extension_id()); + + // promote to active schedule + c.produce_block(); + + // ensure that the next block is updated to the new schedule + BOOST_REQUIRE_EXCEPTION( c.produce_block(), no_block_signatures, fc_exception_message_is( "Signer returned no signatures" )); + c.control->abort_block(); + + c.block_signing_private_keys.emplace(get_public_key(N(eosio), "bsk"), get_private_key(N(eosio), "bsk")); + c.produce_block(); + +} FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/ram_tests.cpp b/unittests/ram_tests.cpp index e73eb979339..bacf4c656bd 100644 --- a/unittests/ram_tests.cpp +++ b/unittests/ram_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file api_tests.cpp - * @copyright defined in eos/LICENSE - */ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wsign-compare" #include @@ -36,7 +32,7 @@ BOOST_FIXTURE_TEST_CASE(ram_tests, eosio_system::eosio_system_tester) { try { create_account_with_resources(N(testram11111),config::system_account_name, init_request_bytes + 40); create_account_with_resources(N(testram22222),config::system_account_name, init_request_bytes + 1190); produce_blocks(10); - BOOST_REQUIRE_EQUAL( success(), stake( "eosio.stake", "testram11111", core_from_string("10.0000"), core_from_string("5.0000") ) ); + BOOST_REQUIRE_EQUAL( success(), stake( name("eosio.stake"), name("testram11111"), core_from_string("10.0000"), core_from_string("5.0000") ) ); produce_blocks(10); for (auto i = 0; i < 10; ++i) { diff --git a/unittests/resource_limits_test.cpp b/unittests/resource_limits_test.cpp index d4ce66e5c2a..a32190c55ae 100644 --- a/unittests/resource_limits_test.cpp +++ b/unittests/resource_limits_test.cpp @@ -1,7 +1,3 @@ -/** - * @file api_tests.cpp - * @copyright defined in eos/LICENSE.txt - */ #include #include diff --git a/unittests/restart_chain_tests.cpp b/unittests/restart_chain_tests.cpp new file mode 100644 index 00000000000..06958d25532 --- /dev/null +++ b/unittests/restart_chain_tests.cpp @@ -0,0 +1,81 @@ +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include + +using namespace eosio; +using namespace testing; +using namespace chain; + +void remove_existing_blocks(controller::config& config) { + auto block_log_path = config.blocks_dir / "blocks.log"; + remove(block_log_path); + auto block_index_path = config.blocks_dir / "blocks.index"; + remove(block_index_path); +} + +BOOST_AUTO_TEST_SUITE(restart_chain_tests) + +BOOST_AUTO_TEST_CASE(test_existing_state_without_block_log) +{ + tester chain; + + std::vector blocks; + blocks.push_back(chain.produce_block()); + blocks.push_back(chain.produce_block()); + blocks.push_back(chain.produce_block()); + + tester other; + for (auto new_block : blocks) { + other.push_block(new_block); + } + blocks.clear(); + + other.close(); + auto cfg = other.get_config(); + remove_existing_blocks(cfg); + // restarting chain with no block log and no genesis + other.open(); + + blocks.push_back(chain.produce_block()); + blocks.push_back(chain.produce_block()); + blocks.push_back(chain.produce_block()); + chain.control->abort_block(); + + for (auto new_block : blocks) { + other.push_block(new_block); + } +} + +BOOST_AUTO_TEST_CASE(test_restart_with_different_chain_id) +{ + tester chain; + + std::vector blocks; + blocks.push_back(chain.produce_block()); + blocks.push_back(chain.produce_block()); + blocks.push_back(chain.produce_block()); + + tester other; + for (auto new_block : blocks) { + other.push_block(new_block); + } + blocks.clear(); + + other.close(); + genesis_state genesis; + genesis.initial_timestamp = fc::time_point::from_iso_string("2020-01-01T00:00:01.000"); + genesis.initial_key = eosio::testing::base_tester::get_public_key( config::system_account_name, "active" ); + fc::optional chain_id = genesis.compute_chain_id(); + BOOST_REQUIRE_EXCEPTION(other.open(chain_id), chain_id_type_exception, fc_exception_message_starts_with("chain ID in state ")); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/snapshot_tests.cpp b/unittests/snapshot_tests.cpp index a3749f9656a..cd1a2c3b2c1 100644 --- a/unittests/snapshot_tests.cpp +++ b/unittests/snapshot_tests.cpp @@ -1,9 +1,7 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include +#include +#include #include #include @@ -11,44 +9,58 @@ #include #include +#include using namespace eosio; using namespace testing; using namespace chain; -class snapshotted_tester : public base_tester { -public: - snapshotted_tester(controller::config config, const snapshot_reader_ptr& snapshot, int ordinal) { - FC_ASSERT(config.blocks_dir.filename().generic_string() != "." - && config.state_dir.filename().generic_string() != ".", "invalid path names in controller::config"); +chainbase::bfs::path get_parent_path(chainbase::bfs::path blocks_dir, int ordinal) { + chainbase::bfs::path leaf_dir = blocks_dir.filename(); + if (leaf_dir.generic_string() == std::string("blocks")) { + blocks_dir = blocks_dir.parent_path(); + leaf_dir = blocks_dir.filename(); + try { + auto ordinal_for_config = boost::lexical_cast(leaf_dir.generic_string()); + blocks_dir = blocks_dir.parent_path(); + } + catch(const boost::bad_lexical_cast& ) { + // no extra ordinal directory added to path + } + } + return blocks_dir / std::to_string(ordinal); +} - controller::config copied_config = config; - copied_config.blocks_dir = - config.blocks_dir.parent_path() / std::to_string(ordinal).append(config.blocks_dir.filename().generic_string()); - copied_config.state_dir = - config.state_dir.parent_path() / std::to_string(ordinal).append(config.state_dir.filename().generic_string()); +controller::config copy_config(const controller::config& config, int ordinal) { + controller::config copied_config = config; + auto parent_path = get_parent_path(config.blocks_dir, ordinal); + copied_config.blocks_dir = parent_path / config.blocks_dir.filename().generic_string(); + copied_config.state_dir = parent_path / config.state_dir.filename().generic_string(); + return copied_config; +} - init(copied_config, snapshot); - } +controller::config copy_config_and_files(const controller::config& config, int ordinal) { + controller::config copied_config = copy_config(config, ordinal); + fc::create_directories(copied_config.blocks_dir); + fc::copy(config.blocks_dir / "blocks.log", copied_config.blocks_dir / "blocks.log"); + fc::copy(config.blocks_dir / config::reversible_blocks_dir_name, copied_config.blocks_dir / config::reversible_blocks_dir_name ); + return copied_config; +} - snapshotted_tester(controller::config config, const snapshot_reader_ptr& snapshot, int ordinal, int copy_block_log_from_ordinal) { +class snapshotted_tester : public base_tester { +public: + enum config_file_handling { dont_copy_config_files, copy_config_files }; + snapshotted_tester(controller::config config, const snapshot_reader_ptr& snapshot, int ordinal, + config_file_handling copy_files_from_config = config_file_handling::dont_copy_config_files) { FC_ASSERT(config.blocks_dir.filename().generic_string() != "." - && config.state_dir.filename().generic_string() != ".", "invalid path names in controller::config"); - - controller::config copied_config = config; - copied_config.blocks_dir = - config.blocks_dir.parent_path() / std::to_string(ordinal).append(config.blocks_dir.filename().generic_string()); - copied_config.state_dir = - config.state_dir.parent_path() / std::to_string(ordinal).append(config.state_dir.filename().generic_string()); + && config.state_dir.filename().generic_string() != ".", "invalid path names in controller::config"); - // create a copy of the desired block log and reversible - auto block_log_path = config.blocks_dir.parent_path() / std::to_string(copy_block_log_from_ordinal).append(config.blocks_dir.filename().generic_string()); - fc::create_directories(copied_config.blocks_dir); - fc::copy(block_log_path / "blocks.log", copied_config.blocks_dir / "blocks.log"); - fc::copy(block_log_path / config::reversible_blocks_dir_name, copied_config.blocks_dir / config::reversible_blocks_dir_name ); + controller::config copied_config = (copy_files_from_config == copy_config_files) + ? copy_config_and_files(config, ordinal) : copy_config(config, ordinal); init(copied_config, snapshot); } + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { return _produce_block(skip_time, false); } @@ -102,6 +114,10 @@ struct variant_snapshot_suite { return std::make_shared(buffer); } + template + static snapshot_t load_from_file() { + return Snapshot::json(); + } }; struct buffered_snapshot_suite { @@ -145,12 +161,113 @@ struct buffered_snapshot_suite { return std::make_shared(std::make_shared(buffer)); } + template + static snapshot_t load_from_file() { + return Snapshot::bin(); + } }; BOOST_AUTO_TEST_SUITE(snapshot_tests) using snapshot_suites = boost::mpl::list; +namespace { + void variant_diff_helper(const fc::variant& lhs, const fc::variant& rhs, std::function&& out){ + if (lhs.get_type() != rhs.get_type()) { + out("", lhs, rhs); + } else if (lhs.is_object() ) { + const auto& l_obj = lhs.get_object(); + const auto& r_obj = rhs.get_object(); + static const std::string sep = "."; + + // test keys from LHS + std::set keys; + for (const auto& entry: l_obj) { + const auto& l_val = entry.value(); + const auto& r_iter = r_obj.find(entry.key()); + if (r_iter == r_obj.end()) { + out(sep + entry.key(), l_val, fc::variant()); + } else { + const auto& r_val = r_iter->value(); + variant_diff_helper(l_val, r_val, [&out, &entry](const std::string& path, const fc::variant& lhs, const fc::variant& rhs){ + out(sep + entry.key() + path, lhs, rhs); + }); + } + + keys.insert(entry.key()); + } + + // print keys in RHS that were not tested + for (const auto& entry: r_obj) { + if (keys.find(entry.key()) != keys.end()) { + continue; + } + const auto& r_val = entry.value(); + out(sep + entry.key(), fc::variant(), r_val); + } + } else if (lhs.is_array()) { + const auto& l_arr = lhs.get_array(); + const auto& r_arr = rhs.get_array(); + + // diff common + auto common_count = std::min(l_arr.size(), r_arr.size()); + for (size_t idx = 0; idx < common_count; idx++) { + const auto& l_val = l_arr.at(idx); + const auto& r_val = r_arr.at(idx); + variant_diff_helper(l_val, r_val, [&](const std::string& path, const fc::variant& lhs, const fc::variant& rhs){ + out( std::string("[") + std::to_string(idx) + std::string("]") + path, lhs, rhs); + }); + } + + // print lhs additions + for (size_t idx = common_count; idx < lhs.size(); idx++) { + const auto& l_val = l_arr.at(idx); + out( std::string("[") + std::to_string(idx) + std::string("]"), l_val, fc::variant()); + } + + // print rhs additions + for (size_t idx = common_count; idx < rhs.size(); idx++) { + const auto& r_val = r_arr.at(idx); + out( std::string("[") + std::to_string(idx) + std::string("]"), fc::variant(), r_val); + } + + } else if (!(lhs == rhs)) { + out("", lhs, rhs); + } + } + + void print_variant_diff(const fc::variant& lhs, const fc::variant& rhs) { + variant_diff_helper(lhs, rhs, [](const std::string& path, const fc::variant& lhs, const fc::variant& rhs){ + std::cout << path << std::endl; + if (!lhs.is_null()) { + std::cout << " < " << fc::json::to_pretty_string(lhs) << std::endl; + } + + if (!rhs.is_null()) { + std::cout << " > " << fc::json::to_pretty_string(rhs) << std::endl; + } + }); + } + + template + void verify_integrity_hash(const controller& lhs, const controller& rhs) { + const auto lhs_integrity_hash = lhs.calculate_integrity_hash(); + const auto rhs_integrity_hash = rhs.calculate_integrity_hash(); + if (std::is_same_v && lhs_integrity_hash.str() != rhs_integrity_hash.str()) { + auto lhs_latest_writer = SNAPSHOT_SUITE::get_writer(); + lhs.write_snapshot(lhs_latest_writer); + auto lhs_latest = SNAPSHOT_SUITE::finalize(lhs_latest_writer); + + auto rhs_latest_writer = SNAPSHOT_SUITE::get_writer(); + rhs.write_snapshot(rhs_latest_writer); + auto rhs_latest = SNAPSHOT_SUITE::finalize(rhs_latest_writer); + + print_variant_diff(lhs_latest, rhs_latest); + } + BOOST_REQUIRE_EQUAL(lhs_integrity_hash.str(), rhs_integrity_hash.str()); + } +} + BOOST_AUTO_TEST_CASE_TEMPLATE(test_exhaustive_snapshot, SNAPSHOT_SUITE, snapshot_suites) { tester chain; @@ -198,6 +315,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_exhaustive_snapshot, SNAPSHOT_SUITE, snapshot BOOST_AUTO_TEST_CASE_TEMPLATE(test_replay_over_snapshot, SNAPSHOT_SUITE, snapshot_suites) { tester chain; + const chainbase::bfs::path parent_path = chain.get_config().blocks_dir.parent_path(); chain.create_account(N(snapshot)); chain.produce_blocks(1); @@ -220,7 +338,6 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_replay_over_snapshot, SNAPSHOT_SUITE, snapsho } chain.control->abort_block(); - auto expected_pre_integrity_hash = chain.control->calculate_integrity_hash(); // create a new snapshot child auto writer = SNAPSHOT_SUITE::get_writer(); @@ -228,8 +345,9 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_replay_over_snapshot, SNAPSHOT_SUITE, snapsho auto snapshot = SNAPSHOT_SUITE::finalize(writer); // create a new child at this snapshot - snapshotted_tester snap_chain(chain.get_config(), SNAPSHOT_SUITE::get_reader(snapshot), 1); - BOOST_REQUIRE_EQUAL(expected_pre_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); + int ordinal = 1; + snapshotted_tester snap_chain(chain.get_config(), SNAPSHOT_SUITE::get_reader(snapshot), ordinal++); + verify_integrity_hash(*chain.control, *snap_chain.control); // push more blocks to build up a block log for (int itr = 0; itr < post_snapshot_block_count; itr++) { @@ -244,12 +362,223 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_replay_over_snapshot, SNAPSHOT_SUITE, snapsho // verify the hash at the end chain.control->abort_block(); - auto expected_post_integrity_hash = chain.control->calculate_integrity_hash(); - BOOST_REQUIRE_EQUAL(expected_post_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); + verify_integrity_hash(*chain.control, *snap_chain.control); // replay the block log from the snapshot child, from the snapshot - snapshotted_tester replay_chain(chain.get_config(), SNAPSHOT_SUITE::get_reader(snapshot), 2, 1); - BOOST_REQUIRE_EQUAL(expected_post_integrity_hash.str(), snap_chain.control->calculate_integrity_hash().str()); + using config_file_handling = snapshotted_tester::config_file_handling; + snapshotted_tester replay_chain(snap_chain.get_config(), SNAPSHOT_SUITE::get_reader(snapshot), ordinal++, config_file_handling::copy_config_files); + const auto replay_head = replay_chain.control->head_block_num(); + auto snap_head = snap_chain.control->head_block_num(); + BOOST_REQUIRE_EQUAL(replay_head, snap_chain.control->last_irreversible_block_num()); + for (auto block_num = replay_head + 1; block_num <= snap_head; ++block_num) { + auto block = snap_chain.control->fetch_block_by_number(block_num); + replay_chain.push_block(block); + } + verify_integrity_hash(*chain.control, *replay_chain.control); + + auto block = chain.produce_block(); + chain.control->abort_block(); + snap_chain.push_block(block); + replay_chain.push_block(block); + verify_integrity_hash(*chain.control, *snap_chain.control); + verify_integrity_hash(*chain.control, *replay_chain.control); + + snapshotted_tester replay2_chain(snap_chain.get_config(), SNAPSHOT_SUITE::get_reader(snapshot), ordinal++, config_file_handling::copy_config_files); + const auto replay2_head = replay2_chain.control->head_block_num(); + snap_head = snap_chain.control->head_block_num(); + BOOST_REQUIRE_EQUAL(replay2_head, snap_chain.control->last_irreversible_block_num()); + for (auto block_num = replay2_head + 1; block_num <= snap_head; ++block_num) { + auto block = snap_chain.control->fetch_block_by_number(block_num); + replay2_chain.push_block(block); + } + verify_integrity_hash(*chain.control, *replay2_chain.control); + + // verifies that chain's block_log has a genesis_state (and blocks starting at 1) + controller::config copied_config = copy_config_and_files(chain.get_config(), ordinal++); + auto genesis = chain::block_log::extract_genesis_state(chain.get_config().blocks_dir); + BOOST_REQUIRE(genesis); + tester from_block_log_chain(copied_config, *genesis); + const auto from_block_log_head = from_block_log_chain.control->head_block_num(); + BOOST_REQUIRE_EQUAL(from_block_log_head, snap_chain.control->last_irreversible_block_num()); + for (auto block_num = from_block_log_head + 1; block_num <= snap_head; ++block_num) { + auto block = snap_chain.control->fetch_block_by_number(block_num); + from_block_log_chain.push_block(block); + } + verify_integrity_hash(*chain.control, *from_block_log_chain.control); +} + +BOOST_AUTO_TEST_CASE_TEMPLATE(test_chain_id_in_snapshot, SNAPSHOT_SUITE, snapshot_suites) +{ + tester chain; + const chainbase::bfs::path parent_path = chain.get_config().blocks_dir.parent_path(); + + chain.create_account(N(snapshot)); + chain.produce_blocks(1); + chain.set_code(N(snapshot), contracts::snapshot_test_wasm()); + chain.set_abi(N(snapshot), contracts::snapshot_test_abi().data()); + chain.produce_blocks(1); + chain.control->abort_block(); + + // create a new snapshot child + auto writer = SNAPSHOT_SUITE::get_writer(); + chain.control->write_snapshot(writer); + auto snapshot = SNAPSHOT_SUITE::finalize(writer); + + snapshotted_tester snap_chain(chain.get_config(), SNAPSHOT_SUITE::get_reader(snapshot), 0); + BOOST_REQUIRE_EQUAL(chain.control->get_chain_id(), snap_chain.control->get_chain_id()); + verify_integrity_hash(*chain.control, *snap_chain.control); +} + +BOOST_AUTO_TEST_CASE_TEMPLATE(test_compatible_versions, SNAPSHOT_SUITE, snapshot_suites) +{ + tester chain(setup_policy::preactivate_feature_and_new_bios); + + ///< Begin deterministic code to generate blockchain for comparison + // TODO: create a utility that will write new bin/json gzipped files based on this + chain.create_account(N(snapshot)); + chain.produce_blocks(1); + chain.set_code(N(snapshot), contracts::snapshot_test_wasm()); + chain.set_abi(N(snapshot), contracts::snapshot_test_abi().data()); + chain.produce_blocks(1); + chain.control->abort_block(); + + { + static_assert(chain_snapshot_header::minimum_compatible_version <= 2, "version 2 unit test is no longer needed. Please clean up data files"); + auto v2 = SNAPSHOT_SUITE::template load_from_file(); + int ordinal = 0; + snapshotted_tester v2_tester(chain.get_config(), SNAPSHOT_SUITE::get_reader(v2), ordinal++); + verify_integrity_hash(*chain.control, *v2_tester.control); + + // create a latest snapshot + auto latest_writer = SNAPSHOT_SUITE::get_writer(); + v2_tester.control->write_snapshot(latest_writer); + auto latest = SNAPSHOT_SUITE::finalize(latest_writer); + + // load the latest snapshot + snapshotted_tester latest_tester(chain.get_config(), SNAPSHOT_SUITE::get_reader(latest), ordinal++); + verify_integrity_hash(*v2_tester.control, *latest_tester.control); + } +} + +BOOST_AUTO_TEST_CASE_TEMPLATE(test_pending_schedule_snapshot, SNAPSHOT_SUITE, snapshot_suites) +{ + tester chain(setup_policy::preactivate_feature_and_new_bios); + auto genesis = chain::block_log::extract_genesis_state(chain.get_config().blocks_dir); + BOOST_REQUIRE(genesis); + BOOST_REQUIRE_EQUAL(genesis->compute_chain_id(), chain.control->get_chain_id()); + const auto& gpo = chain.control->get_global_properties(); + BOOST_REQUIRE_EQUAL(gpo.chain_id, chain.control->get_chain_id()); + auto block = chain.produce_block(); + BOOST_REQUIRE_EQUAL(block->block_num(), 3); // ensure that test setup stays consistent with original snapshot setup + chain.create_account(N(snapshot)); + block = chain.produce_block(); + BOOST_REQUIRE_EQUAL(block->block_num(), 4); + + BOOST_REQUIRE_EQUAL(gpo.proposed_schedule.version, 0); + BOOST_REQUIRE_EQUAL(gpo.proposed_schedule.producers.size(), 0); + + auto res = chain.set_producers_legacy( {N(snapshot)} ); + block = chain.produce_block(); + BOOST_REQUIRE_EQUAL(block->block_num(), 5); + chain.control->abort_block(); + ///< End deterministic code to generate blockchain for comparison + + BOOST_REQUIRE_EQUAL(gpo.proposed_schedule.version, 1); + BOOST_REQUIRE_EQUAL(gpo.proposed_schedule.producers.size(), 1); + BOOST_REQUIRE_EQUAL(gpo.proposed_schedule.producers[0].producer_name.to_string(), "snapshot"); + + static_assert(chain_snapshot_header::minimum_compatible_version <= 2, "version 2 unit test is no longer needed. Please clean up data files"); + auto v2 = SNAPSHOT_SUITE::template load_from_file(); + int ordinal = 0; + + //////////////////////////////////////////////////////////////////////// + // Verify that the controller gets its chain_id from the snapshot + //////////////////////////////////////////////////////////////////////// + + auto reader = SNAPSHOT_SUITE::get_reader(v2); + snapshotted_tester v2_tester(chain.get_config(), reader, ordinal++); + auto chain_id = chain::controller::extract_chain_id(*reader); + BOOST_REQUIRE_EQUAL(chain_id, v2_tester.control->get_chain_id()); + BOOST_REQUIRE_EQUAL(chain.control->get_chain_id(), v2_tester.control->get_chain_id()); + verify_integrity_hash(*chain.control, *v2_tester.control); + + // create a latest version snapshot from the loaded v2 snapthos + auto latest_from_v2_writer = SNAPSHOT_SUITE::get_writer(); + v2_tester.control->write_snapshot(latest_from_v2_writer); + auto latest_from_v2 = SNAPSHOT_SUITE::finalize(latest_from_v2_writer); + + // load the latest snapshot in a new tester and compare integrity + snapshotted_tester latest_from_v2_tester(chain.get_config(), SNAPSHOT_SUITE::get_reader(latest_from_v2), ordinal++); + verify_integrity_hash(*v2_tester.control, *latest_from_v2_tester.control); + + const auto& v2_gpo = v2_tester.control->get_global_properties(); + BOOST_REQUIRE_EQUAL(v2_gpo.proposed_schedule.version, 1); + BOOST_REQUIRE_EQUAL(v2_gpo.proposed_schedule.producers.size(), 1); + BOOST_REQUIRE_EQUAL(v2_gpo.proposed_schedule.producers[0].producer_name.to_string(), "snapshot"); + + // produce block + auto new_block = chain.produce_block(); + // undo the auto-pending from tester + chain.control->abort_block(); + + // push that block to all sub testers and validate the integrity of the database after it. + v2_tester.push_block(new_block); + verify_integrity_hash(*chain.control, *v2_tester.control); + + latest_from_v2_tester.push_block(new_block); + verify_integrity_hash(*chain.control, *latest_from_v2_tester.control); +} + +BOOST_AUTO_TEST_CASE_TEMPLATE(test_restart_with_existing_state_and_truncated_block_log, SNAPSHOT_SUITE, snapshot_suites) +{ + tester chain; + const chainbase::bfs::path parent_path = chain.get_config().blocks_dir.parent_path(); + + chain.create_account(N(snapshot)); + chain.produce_blocks(1); + chain.set_code(N(snapshot), contracts::snapshot_test_wasm()); + chain.set_abi(N(snapshot), contracts::snapshot_test_abi().data()); + chain.produce_blocks(1); + chain.control->abort_block(); + + static const int pre_snapshot_block_count = 12; + + for (int itr = 0; itr < pre_snapshot_block_count; itr++) { + // increment the contract + chain.push_action(N(snapshot), N(increment), N(snapshot), mutable_variant_object() + ( "value", 1 ) + ); + + // produce block + chain.produce_block(); + } + + chain.control->abort_block(); + + // create a new snapshot child + auto writer = SNAPSHOT_SUITE::get_writer(); + chain.control->write_snapshot(writer); + auto snapshot = SNAPSHOT_SUITE::finalize(writer); + + // create a new child at this snapshot + int ordinal = 1; + snapshotted_tester snap_chain(chain.get_config(), SNAPSHOT_SUITE::get_reader(snapshot), ordinal++); + verify_integrity_hash(*chain.control, *snap_chain.control); + auto block = chain.produce_block(); + chain.control->abort_block(); + snap_chain.push_block(block); + verify_integrity_hash(*chain.control, *snap_chain.control); + + snap_chain.close(); + auto cfg = snap_chain.get_config(); + // restart chain with truncated block log and existing state, but no genesis state (chain_id) + snap_chain.open(); + verify_integrity_hash(*chain.control, *snap_chain.control); + + block = chain.produce_block(); + chain.control->abort_block(); + snap_chain.push_block(block); + verify_integrity_hash(*chain.control, *snap_chain.control); } BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/snapshots.hpp.in b/unittests/snapshots.hpp.in new file mode 100644 index 00000000000..2af307f6c3f --- /dev/null +++ b/unittests/snapshots.hpp.in @@ -0,0 +1,17 @@ +#pragma once + +#define MAKE_READ_SNAPSHOT(NAME) \ + struct NAME {\ + static fc::variant json() { return read_json_snapshot ("${CMAKE_BINARY_DIR}/unittests/snapshots/" #NAME ".json.gz"); } \ + static std::string bin() { return read_binary_snapshot("${CMAKE_BINARY_DIR}/unittests/snapshots/" #NAME ".bin.gz" ); } \ + };\ + +namespace eosio { + namespace testing { + struct snapshots { + // v2 + MAKE_READ_SNAPSHOT(snap_v2) + MAKE_READ_SNAPSHOT(snap_v2_prod_sched) + }; + } /// eosio::testing +} /// eosio diff --git a/unittests/snapshots/CMakeLists.txt b/unittests/snapshots/CMakeLists.txt new file mode 100644 index 00000000000..8b7b147e494 --- /dev/null +++ b/unittests/snapshots/CMakeLists.txt @@ -0,0 +1,4 @@ +configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/snap_v2.bin.gz ${CMAKE_CURRENT_BINARY_DIR}/snap_v2.bin.gz COPYONLY ) +configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/snap_v2.json.gz ${CMAKE_CURRENT_BINARY_DIR}/snap_v2.json.gz COPYONLY ) +configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/snap_v2_prod_sched.bin.gz ${CMAKE_CURRENT_BINARY_DIR}/snap_v2_prod_sched.bin.gz COPYONLY ) +configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/snap_v2_prod_sched.json.gz ${CMAKE_CURRENT_BINARY_DIR}/snap_v2_prod_sched.json.gz COPYONLY ) diff --git a/unittests/snapshots/snap_v2.bin.gz b/unittests/snapshots/snap_v2.bin.gz new file mode 100644 index 00000000000..6c2818acee1 Binary files /dev/null and b/unittests/snapshots/snap_v2.bin.gz differ diff --git a/unittests/snapshots/snap_v2.json.gz b/unittests/snapshots/snap_v2.json.gz new file mode 100644 index 00000000000..6689f080f00 Binary files /dev/null and b/unittests/snapshots/snap_v2.json.gz differ diff --git a/unittests/snapshots/snap_v2_prod_sched.bin.gz b/unittests/snapshots/snap_v2_prod_sched.bin.gz new file mode 100644 index 00000000000..3516218f216 Binary files /dev/null and b/unittests/snapshots/snap_v2_prod_sched.bin.gz differ diff --git a/unittests/snapshots/snap_v2_prod_sched.json.gz b/unittests/snapshots/snap_v2_prod_sched.json.gz new file mode 100644 index 00000000000..6975eedea85 Binary files /dev/null and b/unittests/snapshots/snap_v2_prod_sched.json.gz differ diff --git a/unittests/special_accounts_tests.cpp b/unittests/special_accounts_tests.cpp index aa769bcacaa..34f06836daa 100644 --- a/unittests/special_accounts_tests.cpp +++ b/unittests/special_accounts_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include @@ -65,7 +61,7 @@ BOOST_FIXTURE_TEST_CASE(accounts_exists, tester) for (size_t i = 0; i < std::max(active_auth.size(), active_producers.producers.size()); ++i) { account_name n1 = i < active_auth.size() ? active_auth[i] : (account_name)0; account_name n2 = i < active_producers.producers.size() ? active_producers.producers[i].producer_name : (account_name)0; - if (n1 != n2) diff.push_back((uint64_t)n2 - (uint64_t)n1); + if (n1 != n2) diff.push_back(name(n2.to_uint64_t() - n1.to_uint64_t())); } BOOST_CHECK_EQUAL(diff.size(), 0); diff --git a/unittests/test-contracts/CMakeLists.txt b/unittests/test-contracts/CMakeLists.txt index 1aded520712..7c48729a524 100644 --- a/unittests/test-contracts/CMakeLists.txt +++ b/unittests/test-contracts/CMakeLists.txt @@ -10,6 +10,7 @@ endif() add_subdirectory( asserter ) add_subdirectory( deferred_test ) add_subdirectory( get_sender_test ) +add_subdirectory( get_table_test ) add_subdirectory( integration_test ) add_subdirectory( noop ) add_subdirectory( payloadless ) diff --git a/unittests/test-contracts/asserter/asserter.cpp b/unittests/test-contracts/asserter/asserter.cpp index 3402e9d9940..f8710e4c556 100644 --- a/unittests/test-contracts/asserter/asserter.cpp +++ b/unittests/test-contracts/asserter/asserter.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include "asserter.hpp" using namespace eosio; diff --git a/unittests/test-contracts/asserter/asserter.hpp b/unittests/test-contracts/asserter/asserter.hpp index 625dc2e4cd7..2261a96e237 100644 --- a/unittests/test-contracts/asserter/asserter.hpp +++ b/unittests/test-contracts/asserter/asserter.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/unittests/test-contracts/deferred_test/deferred_test.cpp b/unittests/test-contracts/deferred_test/deferred_test.cpp index 1ff307d8426..d70720b18bf 100644 --- a/unittests/test-contracts/deferred_test/deferred_test.cpp +++ b/unittests/test-contracts/deferred_test/deferred_test.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include "deferred_test.hpp" #include #include diff --git a/unittests/test-contracts/deferred_test/deferred_test.hpp b/unittests/test-contracts/deferred_test/deferred_test.hpp index ba7b5c26730..f5a322337e4 100644 --- a/unittests/test-contracts/deferred_test/deferred_test.hpp +++ b/unittests/test-contracts/deferred_test/deferred_test.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/unittests/test-contracts/get_sender_test/get_sender_test.cpp b/unittests/test-contracts/get_sender_test/get_sender_test.cpp index a3574dfae4c..5f89aa228cf 100644 --- a/unittests/test-contracts/get_sender_test/get_sender_test.cpp +++ b/unittests/test-contracts/get_sender_test/get_sender_test.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include "get_sender_test.hpp" #include diff --git a/unittests/test-contracts/get_sender_test/get_sender_test.hpp b/unittests/test-contracts/get_sender_test/get_sender_test.hpp index 632c2905326..0e001437e11 100644 --- a/unittests/test-contracts/get_sender_test/get_sender_test.hpp +++ b/unittests/test-contracts/get_sender_test/get_sender_test.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/unittests/test-contracts/get_table_test/CMakeLists.txt b/unittests/test-contracts/get_table_test/CMakeLists.txt new file mode 100644 index 00000000000..846bf453406 --- /dev/null +++ b/unittests/test-contracts/get_table_test/CMakeLists.txt @@ -0,0 +1,6 @@ +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( get_table_test get_table_test get_table_test.cpp ) +else() + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/get_table_test.wasm ${CMAKE_CURRENT_BINARY_DIR}/get_table_test.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/get_table_test.abi ${CMAKE_CURRENT_BINARY_DIR}/get_table_test.abi COPYONLY ) +endif() diff --git a/unittests/test-contracts/get_table_test/get_table_test.abi b/unittests/test-contracts/get_table_test/get_table_test.abi new file mode 100644 index 00000000000..1a7cdcc2137 --- /dev/null +++ b/unittests/test-contracts/get_table_test/get_table_test.abi @@ -0,0 +1,105 @@ +{ + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", + "version": "eosio::abi/1.1", + "types": [], + "structs": [ + { + "name": "addhashobj", + "base": "", + "fields": [ + { + "name": "hashinput", + "type": "string" + } + ] + }, + { + "name": "addnumobj", + "base": "", + "fields": [ + { + "name": "input", + "type": "uint64" + } + ] + }, + { + "name": "hashobj", + "base": "", + "fields": [ + { + "name": "key", + "type": "uint64" + }, + { + "name": "hash_input", + "type": "string" + }, + { + "name": "sec256", + "type": "checksum256" + }, + { + "name": "sec160", + "type": "checksum160" + } + ] + }, + { + "name": "numobj", + "base": "", + "fields": [ + { + "name": "key", + "type": "uint64" + }, + { + "name": "sec64", + "type": "uint64" + }, + { + "name": "sec128", + "type": "uint128" + }, + { + "name": "secdouble", + "type": "float64" + }, + { + "name": "secldouble", + "type": "float128" + } + ] + } + ], + "actions": [ + { + "name": "addhashobj", + "type": "addhashobj", + "ricardian_contract": "" + }, + { + "name": "addnumobj", + "type": "addnumobj", + "ricardian_contract": "" + } + ], + "tables": [ + { + "name": "hashobjs", + "type": "hashobj", + "index_type": "i64", + "key_names": [], + "key_types": [] + }, + { + "name": "numobjs", + "type": "numobj", + "index_type": "i64", + "key_names": [], + "key_types": [] + } + ], + "ricardian_clauses": [], + "variants": [] +} \ No newline at end of file diff --git a/unittests/test-contracts/get_table_test/get_table_test.cpp b/unittests/test-contracts/get_table_test/get_table_test.cpp new file mode 100644 index 00000000000..c35409f2ac2 --- /dev/null +++ b/unittests/test-contracts/get_table_test/get_table_test.cpp @@ -0,0 +1,26 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include "get_table_test.hpp" + +void get_table_test::addnumobj(uint64_t input) { + numobjs numobjs_table( _self, _self.value ); + numobjs_table.emplace(_self, [&]( auto& obj ) { + obj.key = numobjs_table.available_primary_key(); + obj.sec64 = input; + obj.sec128 = input; + obj.secdouble = input; + obj.secldouble = input; + }); +} + +void get_table_test::addhashobj(std::string hashinput) { + hashobjs hashobjs_table( _self, _self.value ); + hashobjs_table.emplace(_self, [&]( auto& obj ) { + obj.key = hashobjs_table.available_primary_key(); + obj.hash_input = hashinput; + obj.sec256 = sha256(hashinput.data(), hashinput.size()); + obj.sec160 = ripemd160(hashinput.data(), hashinput.size()); + }); +} \ No newline at end of file diff --git a/unittests/test-contracts/get_table_test/get_table_test.hpp b/unittests/test-contracts/get_table_test/get_table_test.hpp new file mode 100644 index 00000000000..a777bfd63ee --- /dev/null +++ b/unittests/test-contracts/get_table_test/get_table_test.hpp @@ -0,0 +1,63 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include +#include + +using namespace eosio; + +class [[eosio::contract]] get_table_test : public eosio::contract { + public: + using eosio::contract::contract; + + // Number object + struct [[eosio::table]] numobj { + uint64_t key; + uint64_t sec64; + uint128_t sec128; + double secdouble; + long double secldouble; + + uint64_t primary_key() const { return key; } + uint64_t sec64_key() const { return sec64; } + uint128_t sec128_key() const { return sec128; } + double secdouble_key() const { return secdouble; } + long double secldouble_key() const { return secldouble; } + }; + + // Hash object + struct [[eosio::table]] hashobj { + uint64_t key; + std::string hash_input; + checksum256 sec256; + checksum160 sec160; + + uint64_t primary_key() const { return key; } + checksum256 sec256_key() const { return sec256; } + checksum256 sec160_key() const { return checksum256(sec160.get_array()); } + }; + + typedef eosio::multi_index< "numobjs"_n, numobj, + indexed_by<"bysec1"_n, const_mem_fun>, + indexed_by<"bysec2"_n, const_mem_fun>, + indexed_by<"bysec3"_n, const_mem_fun>, + indexed_by<"bysec4"_n, const_mem_fun> + > numobjs; + + typedef eosio::multi_index< "hashobjs"_n, hashobj, + indexed_by<"bysec1"_n, const_mem_fun>, + indexed_by<"bysec2"_n, const_mem_fun> + > hashobjs; + + [[eosio::action]] + void addnumobj(uint64_t input); + + + [[eosio::action]] + void addhashobj(std::string hashinput); + + +}; \ No newline at end of file diff --git a/unittests/test-contracts/get_table_test/get_table_test.wasm b/unittests/test-contracts/get_table_test/get_table_test.wasm new file mode 100755 index 00000000000..cc1b5399505 Binary files /dev/null and b/unittests/test-contracts/get_table_test/get_table_test.wasm differ diff --git a/unittests/test-contracts/integration_test/integration_test.cpp b/unittests/test-contracts/integration_test/integration_test.cpp index ec8543caafb..061fbce8798 100644 --- a/unittests/test-contracts/integration_test/integration_test.cpp +++ b/unittests/test-contracts/integration_test/integration_test.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include "integration_test.hpp" using namespace eosio; diff --git a/unittests/test-contracts/integration_test/integration_test.hpp b/unittests/test-contracts/integration_test/integration_test.hpp index cbdc02295b2..408b469a535 100644 --- a/unittests/test-contracts/integration_test/integration_test.hpp +++ b/unittests/test-contracts/integration_test/integration_test.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/unittests/test-contracts/noop/noop.cpp b/unittests/test-contracts/noop/noop.cpp index 94fd344495f..564749e56a9 100644 --- a/unittests/test-contracts/noop/noop.cpp +++ b/unittests/test-contracts/noop/noop.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include "noop.hpp" using namespace eosio; diff --git a/unittests/test-contracts/noop/noop.hpp b/unittests/test-contracts/noop/noop.hpp index f6966c1ef1a..5065c951f9a 100644 --- a/unittests/test-contracts/noop/noop.hpp +++ b/unittests/test-contracts/noop/noop.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/unittests/test-contracts/payloadless/payloadless.cpp b/unittests/test-contracts/payloadless/payloadless.cpp index 70939590e50..16637efa2b4 100644 --- a/unittests/test-contracts/payloadless/payloadless.cpp +++ b/unittests/test-contracts/payloadless/payloadless.cpp @@ -1,8 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ - #include "payloadless.hpp" using namespace eosio; diff --git a/unittests/test-contracts/payloadless/payloadless.hpp b/unittests/test-contracts/payloadless/payloadless.hpp index 0a4e88affa1..945a0d74177 100644 --- a/unittests/test-contracts/payloadless/payloadless.hpp +++ b/unittests/test-contracts/payloadless/payloadless.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/unittests/test-contracts/proxy/proxy.cpp b/unittests/test-contracts/proxy/proxy.cpp index c9fc324cad5..41ef9b96c8a 100644 --- a/unittests/test-contracts/proxy/proxy.cpp +++ b/unittests/test-contracts/proxy/proxy.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include "proxy.hpp" #include diff --git a/unittests/test-contracts/proxy/proxy.hpp b/unittests/test-contracts/proxy/proxy.hpp index b913143fed2..eb0c336ac5f 100644 --- a/unittests/test-contracts/proxy/proxy.hpp +++ b/unittests/test-contracts/proxy/proxy.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.cpp b/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.cpp index d24cdbdbc24..95752f81c9c 100644 --- a/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.cpp +++ b/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include "ram_restrictions_test.hpp" #include diff --git a/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.hpp b/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.hpp index 34a988fe5d5..df8d6f9aee6 100644 --- a/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.hpp +++ b/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/unittests/test-contracts/reject_all/reject_all.cpp b/unittests/test-contracts/reject_all/reject_all.cpp index 40f26fd827b..ae9a3765384 100644 --- a/unittests/test-contracts/reject_all/reject_all.cpp +++ b/unittests/test-contracts/reject_all/reject_all.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include using namespace eosio; diff --git a/unittests/test-contracts/restrict_action_test/restrict_action_test.cpp b/unittests/test-contracts/restrict_action_test/restrict_action_test.cpp index 5c8f2b596b5..a21c13e6cc3 100644 --- a/unittests/test-contracts/restrict_action_test/restrict_action_test.cpp +++ b/unittests/test-contracts/restrict_action_test/restrict_action_test.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include "restrict_action_test.hpp" #include diff --git a/unittests/test-contracts/restrict_action_test/restrict_action_test.hpp b/unittests/test-contracts/restrict_action_test/restrict_action_test.hpp index f5ab48e385b..e17d5c4b226 100644 --- a/unittests/test-contracts/restrict_action_test/restrict_action_test.hpp +++ b/unittests/test-contracts/restrict_action_test/restrict_action_test.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/unittests/test-contracts/snapshot_test/snapshot_test.cpp b/unittests/test-contracts/snapshot_test/snapshot_test.cpp index 84e28e53ec7..c8b74b1b25b 100644 --- a/unittests/test-contracts/snapshot_test/snapshot_test.cpp +++ b/unittests/test-contracts/snapshot_test/snapshot_test.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include "snapshot_test.hpp" using namespace eosio; diff --git a/unittests/test-contracts/snapshot_test/snapshot_test.hpp b/unittests/test-contracts/snapshot_test/snapshot_test.hpp index dce23efb538..b134f4a4bee 100644 --- a/unittests/test-contracts/snapshot_test/snapshot_test.hpp +++ b/unittests/test-contracts/snapshot_test/snapshot_test.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/unittests/test-contracts/test_api/test_action.cpp b/unittests/test-contracts/test_api/test_action.cpp index 65a8d31554c..1b20249fc0e 100644 --- a/unittests/test-contracts/test_api/test_action.cpp +++ b/unittests/test-contracts/test_api/test_action.cpp @@ -1,7 +1,3 @@ -/** - * @file action_test.cpp - * @copyright defined in eos/LICENSE - */ #include #include #include diff --git a/unittests/test-contracts/test_api/test_api.cpp b/unittests/test-contracts/test_api/test_api.cpp index 417c89a5da4..f7d66ae8de3 100644 --- a/unittests/test-contracts/test_api/test_api.cpp +++ b/unittests/test-contracts/test_api/test_api.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include @@ -92,6 +88,7 @@ extern "C" { //test crypto WASM_TEST_HANDLER( test_crypto, test_recover_key ); + WASM_TEST_HANDLER( test_crypto, test_recover_key_partial ); WASM_TEST_HANDLER( test_crypto, test_recover_key_assert_true ); WASM_TEST_HANDLER( test_crypto, test_recover_key_assert_false ); WASM_TEST_HANDLER( test_crypto, test_sha1 ); diff --git a/unittests/test-contracts/test_api/test_api.hpp b/unittests/test-contracts/test_api/test_api.hpp index bbcf9965352..0fff16cf778 100644 --- a/unittests/test-contracts/test_api/test_api.hpp +++ b/unittests/test-contracts/test_api/test_api.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include @@ -132,6 +128,7 @@ struct test_multi_index { struct test_crypto { static void test_recover_key(); + static void test_recover_key_partial(); static void test_recover_key_assert_true(); static void test_recover_key_assert_false(); static void test_sha1(); diff --git a/unittests/test-contracts/test_api/test_api.wasm b/unittests/test-contracts/test_api/test_api.wasm index 9d81f87f7aa..5fd5ff870c3 100755 Binary files a/unittests/test-contracts/test_api/test_api.wasm and b/unittests/test-contracts/test_api/test_api.wasm differ diff --git a/unittests/test-contracts/test_api/test_api_common.hpp b/unittests/test-contracts/test_api/test_api_common.hpp index bfad0146b46..1b4ef7db137 100644 --- a/unittests/test-contracts/test_api/test_api_common.hpp +++ b/unittests/test-contracts/test_api/test_api_common.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/unittests/test-contracts/test_api/test_chain.cpp b/unittests/test-contracts/test_api/test_chain.cpp index b9e35f870ee..28cfa2ac485 100644 --- a/unittests/test-contracts/test_api/test_chain.cpp +++ b/unittests/test-contracts/test_api/test_chain.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include diff --git a/unittests/test-contracts/test_api/test_checktime.cpp b/unittests/test-contracts/test_api/test_checktime.cpp index 4475f9fb873..4a14544125e 100644 --- a/unittests/test-contracts/test_api/test_checktime.cpp +++ b/unittests/test-contracts/test_api/test_checktime.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include diff --git a/unittests/test-contracts/test_api/test_crypto.cpp b/unittests/test-contracts/test_api/test_crypto.cpp index c310b2a2778..bc2ff120e15 100644 --- a/unittests/test-contracts/test_api/test_crypto.cpp +++ b/unittests/test-contracts/test_api/test_crypto.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include @@ -10,21 +6,21 @@ #define WASM_TEST_FAIL 1 -static const char test1[] = "abc"; -static const unsigned char test1_ok_1[] = { +static constexpr char test1[] = "abc"; +static constexpr unsigned char test1_ok_1[] = { 0xa9, 0x99, 0x3e, 0x36, 0x47, 0x06, 0x81, 0x6a, 0xba, 0x3e, 0x25, 0x71, 0x78, 0x50, 0xc2, 0x6c, 0x9c, 0xd0, 0xd8, 0x9d }; -static const unsigned char test1_ok_256[] = { - 0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, - 0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23, - 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, - 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad +static constexpr unsigned char test1_ok_256[] = { + 0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, + 0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23, + 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, + 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad }; -static const unsigned char test1_ok_512[] = { +static constexpr unsigned char test1_ok_512[] = { 0xdd, 0xaf, 0x35, 0xa1, 0x93, 0x61, 0x7a, 0xba, 0xcc, 0x41, 0x73, 0x49, 0xae, 0x20, 0x41, 0x31, 0x12, 0xe6, 0xfa, 0x4e, 0x89, 0xa9, 0x7e, 0xa2, @@ -35,27 +31,27 @@ static const unsigned char test1_ok_512[] = { 0x2a, 0x9a, 0xc9, 0x4f, 0xa5, 0x4c, 0xa4, 0x9f }; -static const unsigned char test1_ok_ripe[] = { +static constexpr unsigned char test1_ok_ripe[] = { 0x8e, 0xb2, 0x08, 0xf7, 0xe0, 0x5d, 0x98, 0x7a, 0x9b, 0x04, 0x4a, 0x8e, 0x98, 0xc6, 0xb0, 0x87, 0xf1, 0x5a, 0x0b, 0xfc }; -const char test2[] = ""; -static const unsigned char test2_ok_1[] = { +static constexpr char test2[] = ""; +static constexpr unsigned char test2_ok_1[] = { 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, 0x07, 0x09 }; -const unsigned char test2_ok_256[] = { - 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, - 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, - 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, - 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 +static constexpr unsigned char test2_ok_256[] = { + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, + 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, + 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, + 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55 }; -const unsigned char test2_ok_512[] = { +static constexpr unsigned char test2_ok_512[] = { 0xcf, 0x83, 0xe1, 0x35, 0x7e, 0xef, 0xb8, 0xbd, 0xf1, 0x54, 0x28, 0x50, 0xd6, 0x6d, 0x80, 0x07, 0xd6, 0x20, 0xe4, 0x05, 0x0b, 0x57, 0x15, 0xdc, @@ -66,27 +62,27 @@ const unsigned char test2_ok_512[] = { 0xa5, 0x38, 0x32, 0x7a, 0xf9, 0x27, 0xda, 0x3e }; -const unsigned char test2_ok_ripe[] = { +static constexpr unsigned char test2_ok_ripe[] = { 0x9c, 0x11, 0x85, 0xa5, 0xc5, 0xe9, 0xfc, 0x54, 0x61, 0x28, 0x08, 0x97, 0x7e, 0xe8, 0xf5, 0x48, 0xb2, 0x25, 0x8d, 0x31 }; -static const char test3[] = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"; -static const unsigned char test3_ok_1[] = { +static constexpr char test3[] = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"; +static constexpr unsigned char test3_ok_1[] = { 0x84, 0x98, 0x3e, 0x44, 0x1c, 0x3b, 0xd2, 0x6e, 0xba, 0xae, 0x4a, 0xa1, 0xf9, 0x51, 0x29, 0xe5, 0xe5, 0x46, 0x70, 0xf1 }; -static const unsigned char test3_ok_256[] = { - 0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8, - 0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39, - 0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67, - 0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1 +static constexpr unsigned char test3_ok_256[] = { + 0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8, + 0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39, + 0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67, + 0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1 }; -static const unsigned char test3_ok_512[] = { +static constexpr unsigned char test3_ok_512[] = { 0x20, 0x4a, 0x8f, 0xc6, 0xdd, 0xa8, 0x2f, 0x0a, 0x0c, 0xed, 0x7b, 0xeb, 0x8e, 0x08, 0xa4, 0x16, 0x57, 0xc1, 0x6e, 0xf4, 0x68, 0xb2, 0x28, 0xa8, @@ -97,27 +93,27 @@ static const unsigned char test3_ok_512[] = { 0x54, 0xec, 0x63, 0x12, 0x38, 0xca, 0x34, 0x45 }; -static const unsigned char test3_ok_ripe[] = { +static constexpr unsigned char test3_ok_ripe[] = { 0x12, 0xa0, 0x53, 0x38, 0x4a, 0x9c, 0x0c, 0x88, 0xe4, 0x05, 0xa0, 0x6c, 0x27, 0xdc, 0xf4, 0x9a, 0xda, 0x62, 0xeb, 0x2b }; -static const char test4[] = "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"; -static const unsigned char test4_ok_1[] = { +static constexpr char test4[] = "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"; +static constexpr unsigned char test4_ok_1[] = { 0xa4, 0x9b, 0x24, 0x46, 0xa0, 0x2c, 0x64, 0x5b, 0xf4, 0x19, 0xf9, 0x95, 0xb6, 0x70, 0x91, 0x25, 0x3a, 0x04, 0xa2, 0x59 }; -static const unsigned char test4_ok_256[] = { - 0xcf, 0x5b, 0x16, 0xa7, 0x78, 0xaf, 0x83, 0x80, - 0x03, 0x6c, 0xe5, 0x9e, 0x7b, 0x04, 0x92, 0x37, - 0x0b, 0x24, 0x9b, 0x11, 0xe8, 0xf0, 0x7a, 0x51, - 0xaf, 0xac, 0x45, 0x03, 0x7a, 0xfe, 0xe9, 0xd1 +static constexpr unsigned char test4_ok_256[] = { + 0xcf, 0x5b, 0x16, 0xa7, 0x78, 0xaf, 0x83, 0x80, + 0x03, 0x6c, 0xe5, 0x9e, 0x7b, 0x04, 0x92, 0x37, + 0x0b, 0x24, 0x9b, 0x11, 0xe8, 0xf0, 0x7a, 0x51, + 0xaf, 0xac, 0x45, 0x03, 0x7a, 0xfe, 0xe9, 0xd1 }; -static const unsigned char test4_ok_512[] = { +static constexpr unsigned char test4_ok_512[] = { 0x8e, 0x95, 0x9b, 0x75, 0xda, 0xe3, 0x13, 0xda, 0x8c, 0xf4, 0xf7, 0x28, 0x14, 0xfc, 0x14, 0x3f, 0x8f, 0x77, 0x79, 0xc6, 0xeb, 0x9f, 0x7f, 0xa1, @@ -128,27 +124,27 @@ static const unsigned char test4_ok_512[] = { 0x5e, 0x96, 0xe5, 0x5b, 0x87, 0x4b, 0xe9, 0x09 }; -static const unsigned char test4_ok_ripe[] = { +static constexpr unsigned char test4_ok_ripe[] = { 0x6f, 0x3f, 0xa3, 0x9b, 0x6b, 0x50, 0x3c, 0x38, 0x4f, 0x91, 0x9a, 0x49, 0xa7, 0xaa, 0x5c, 0x2c, 0x08, 0xbd, 0xfb, 0x45 }; -static const char test5[] = "message digest"; -static const unsigned char test5_ok_1[] = { +static constexpr char test5[] = "message digest"; +static constexpr unsigned char test5_ok_1[] = { 0xc1, 0x22, 0x52, 0xce, 0xda, 0x8b, 0xe8, 0x99, 0x4d, 0x5f, 0xa0, 0x29, 0x0a, 0x47, 0x23, 0x1c, 0x1d, 0x16, 0xaa, 0xe3 }; -static const unsigned char test5_ok_256[] = { - 0xf7, 0x84, 0x6f, 0x55, 0xcf, 0x23, 0xe1, 0x4e, - 0xeb, 0xea, 0xb5, 0xb4, 0xe1, 0x55, 0x0c, 0xad, - 0x5b, 0x50, 0x9e, 0x33, 0x48, 0xfb, 0xc4, 0xef, - 0xa3, 0xa1, 0x41, 0x3d, 0x39, 0x3c, 0xb6, 0x50 +static constexpr unsigned char test5_ok_256[] = { + 0xf7, 0x84, 0x6f, 0x55, 0xcf, 0x23, 0xe1, 0x4e, + 0xeb, 0xea, 0xb5, 0xb4, 0xe1, 0x55, 0x0c, 0xad, + 0x5b, 0x50, 0x9e, 0x33, 0x48, 0xfb, 0xc4, 0xef, + 0xa3, 0xa1, 0x41, 0x3d, 0x39, 0x3c, 0xb6, 0x50 }; -static const unsigned char test5_ok_512[] = { +static constexpr unsigned char test5_ok_512[] = { 0x10, 0x7d, 0xbf, 0x38, 0x9d, 0x9e, 0x9f, 0x71, 0xa3, 0xa9, 0x5f, 0x6c, 0x05, 0x5b, 0x92, 0x51, 0xbc, 0x52, 0x68, 0xc2, 0xbe, 0x16, 0xd6, 0xc1, @@ -159,264 +155,288 @@ static const unsigned char test5_ok_512[] = { 0x6d, 0xe6, 0x66, 0x87, 0xbb, 0x42, 0x0e, 0x7c }; -static const unsigned char test5_ok_ripe[] = { +static constexpr unsigned char test5_ok_ripe[] = { 0x5d, 0x06, 0x89, 0xef, 0x49, 0xd2, 0xfa, 0xe5, 0x72, 0xb8, 0x81, 0xb1, 0x23, 0xa8, 0x5f, 0xfa, 0x21, 0x59, 0x5f, 0x36 }; extern "C" { - uint32_t my_strlen( const char *str ) { - uint32_t len = 0; - while(str[len]) ++len; - return len; - } - - bool my_memcmp( void *s1, void *s2, uint32_t n ) - { - unsigned char *c1 = (unsigned char *)s1; - unsigned char *c2 = (unsigned char *)s2; - for (uint32_t i = 0; i < n; ++i) { - if ( c1[i] != c2[i] ) { - return false; + uint32_t my_strlen( const char *str ) { + uint32_t len = 0; + while(str[len]) ++len; + return len; + } + + bool my_memcmp( void *s1, void *s2, uint32_t n ) + { + unsigned char *c1 = (unsigned char *)s1; + unsigned char *c2 = (unsigned char *)s2; + for (uint32_t i = 0; i < n; ++i) { + if ( c1[i] != c2[i] ) { + return false; + } } - } - return true; - } - + return true; + } } -struct sig_hash_key { +struct sig_hash_key_header { capi_checksum256 hash; - capi_public_key pk; - capi_signature sig; + uint32_t pk_len; + uint32_t sig_len; + + auto pk_base() const { + return ((const char *)this) + 40; + } + + auto sig_base() const { + return ((const char *)this) + 40 + pk_len; + } }; void test_crypto::test_recover_key_assert_true() { - sig_hash_key sh; - read_action_data( (char*)&sh, sizeof(sh) ); - assert_recover_key( &sh.hash, (const char*)&sh.sig, sizeof(sh.sig), (const char*)&sh.pk, sizeof(sh.pk) ); + char buffer[action_data_size()]; + read_action_data( buffer, action_data_size() ); + auto sh = (const sig_hash_key_header*)buffer; + + assert_recover_key( &sh->hash, sh->sig_base(), sh->sig_len, sh->pk_base(), sh->pk_len ); } void test_crypto::test_recover_key_assert_false() { - sig_hash_key sh; - read_action_data( (char*)&sh, sizeof(sh) ); - assert_recover_key( &sh.hash, (const char*)&sh.sig, sizeof(sh.sig), (const char*)&sh.pk, sizeof(sh.pk) ); + char buffer[action_data_size()]; + read_action_data( buffer, action_data_size() ); + auto sh = (const sig_hash_key_header*)buffer; + + assert_recover_key( &sh->hash, sh->sig_base(), sh->sig_len, sh->pk_base(), sh->pk_len ); eosio_assert( false, "should have thrown an error" ); } void test_crypto::test_recover_key() { - sig_hash_key sh; - read_action_data( (char*)&sh, sizeof(sh) ); - capi_public_key pk; - recover_key( &sh.hash, (const char*)&sh.sig, sizeof(sh.sig), pk.data, sizeof(pk) ); - for ( uint32_t i=0; i < sizeof(pk); i++ ) - if ( pk.data[i] != sh.pk.data[i] ) + char buffer[action_data_size()]; + read_action_data( buffer, action_data_size() ); + auto sh = (const sig_hash_key_header*)buffer; + + char recovered[sh->pk_len]; + auto result = recover_key( &sh->hash, sh->sig_base(), sh->sig_len, recovered, sh->pk_len ); + eosio_assert(result == sh->pk_len, "public key does not match"); + for ( uint32_t i=0; i < sh->pk_len; i++ ) + if ( recovered[i] != sh->pk_base()[i] ) eosio_assert( false, "public key does not match" ); } +void test_crypto::test_recover_key_partial() { + char buffer[action_data_size()]; + read_action_data( buffer, action_data_size() ); + auto sh = (const sig_hash_key_header*)buffer; + + auto recover_size = std::max(sh->pk_len / 2, 33); + char recovered[recover_size]; + auto result = recover_key( &sh->hash, sh->sig_base(), sh->sig_len, recovered, recover_size ); + eosio_assert(result == sh->pk_len, "recoverable key is not as long as provided key"); + for ( uint32_t i=0; i < recover_size; i++ ) + if ( recovered[i] != sh->pk_base()[i] ) + eosio_assert( false, "partial public key does not match" ); +} + void test_crypto::test_sha1() { - eosio::checksum160 tmp; + unsigned char tmp[20]; - tmp = eosio::sha1( test1, my_strlen(test1) ); - eosio_assert( my_memcmp((void *)test1_ok_1, &tmp, sizeof(eosio::checksum160)), "sha1 test1" ); + ::sha1( test1, my_strlen(test1), (capi_checksum160*)tmp ); + eosio_assert( my_memcmp((void *)test1_ok_1, &tmp, sizeof(tmp)), "sha1 test1" ); - tmp = eosio::sha1( test3, my_strlen(test3) ); - eosio_assert( my_memcmp((void *)test3_ok_1, &tmp, sizeof(eosio::checksum160)), "sha1 test3" ); + ::sha1( test3, my_strlen(test3), (capi_checksum160*)tmp ); + eosio_assert( my_memcmp((void *)test3_ok_1, &tmp, sizeof(tmp)), "sha1 test3" ); - tmp = eosio::sha1( test4, my_strlen(test4) ); - eosio_assert( my_memcmp((void *)test4_ok_1, &tmp, sizeof(eosio::checksum160)), "sha1 test4" ); + ::sha1( test4, my_strlen(test4), (capi_checksum160*)tmp ); + eosio_assert( my_memcmp((void *)test4_ok_1, &tmp, sizeof(tmp)), "sha1 test4" ); - tmp = eosio::sha1( test5, my_strlen(test5) ); - eosio_assert( my_memcmp((void *)test5_ok_1, &tmp, sizeof(eosio::checksum160)), "sha1 test5" ); + ::sha1( test5, my_strlen(test5), (capi_checksum160*)tmp ); + eosio_assert( my_memcmp((void *)test5_ok_1, &tmp, sizeof(tmp)), "sha1 test5" ); } void test_crypto::test_sha256() { - eosio::checksum256 tmp; + unsigned char tmp[32]; - tmp = eosio::sha256( test1, my_strlen(test1) ); - eosio_assert( my_memcmp((void *)test1_ok_256, &tmp, sizeof(eosio::checksum256)), "sha256 test1" ); + ::sha256( test1, my_strlen(test1), (capi_checksum256*)tmp); + eosio_assert( my_memcmp((void *)test1_ok_256, &tmp, sizeof(tmp)), "sha256 test1" ); - tmp = eosio::sha256( test3, my_strlen(test3) ); - eosio_assert( my_memcmp((void *)test3_ok_256, &tmp, sizeof(eosio::checksum256)), "sha256 test3" ); + ::sha256( test3, my_strlen(test3), (capi_checksum256*)tmp); + eosio_assert( my_memcmp((void *)test3_ok_256, &tmp, sizeof(tmp)), "sha256 test3" ); - tmp = eosio::sha256( test4, my_strlen(test4) ); - eosio_assert( my_memcmp((void *)test4_ok_256, &tmp, sizeof(eosio::checksum256)), "sha256 test4" ); + ::sha256( test4, my_strlen(test4), (capi_checksum256*)tmp); + eosio_assert( my_memcmp((void *)test4_ok_256, &tmp, sizeof(tmp)), "sha256 test4" ); - tmp = eosio::sha256( test5, my_strlen(test5) ); - eosio_assert( my_memcmp((void *)test5_ok_256, &tmp, sizeof(eosio::checksum256)), "sha256 test5" ); + ::sha256( test5, my_strlen(test5), (capi_checksum256*)tmp); + eosio_assert( my_memcmp((void *)test5_ok_256, &tmp, sizeof(tmp)), "sha256 test5" ); } void test_crypto::test_sha512() { - eosio::checksum512 tmp; + unsigned char tmp[64]; - tmp = eosio::sha512( test1, my_strlen(test1) ); - eosio_assert( my_memcmp((void *)test1_ok_512, &tmp, sizeof(eosio::checksum512)), "sha512 test1" ); + ::sha512( test1, my_strlen(test1), (capi_checksum512*)tmp ); + eosio_assert( my_memcmp((void *)test1_ok_512, &tmp, sizeof(tmp)), "sha512 test1" ); - tmp = eosio::sha512( test3, my_strlen(test3) ); - eosio_assert( my_memcmp((void *)test3_ok_512, &tmp, sizeof(eosio::checksum512)), "sha512 test3" ); + ::sha512( test3, my_strlen(test3), (capi_checksum512*)tmp ); + eosio_assert( my_memcmp((void *)test3_ok_512, &tmp, sizeof(tmp)), "sha512 test3" ); - tmp = eosio::sha512( test4, my_strlen(test4) ); - eosio_assert( my_memcmp((void *)test4_ok_512, &tmp, sizeof(eosio::checksum512)), "sha512 test4" ); + ::sha512( test4, my_strlen(test4), (capi_checksum512*)tmp ); + eosio_assert( my_memcmp((void *)test4_ok_512, &tmp, sizeof(tmp)), "sha512 test4" ); - tmp = eosio::sha512( test5, my_strlen(test5) ); - eosio_assert( my_memcmp((void *)test5_ok_512, &tmp, sizeof(eosio::checksum512)), "sha512 test5" ); + ::sha512( test5, my_strlen(test5), (capi_checksum512*)tmp ); + eosio_assert( my_memcmp((void *)test5_ok_512, &tmp, sizeof(tmp)), "sha512 test5" ); } void test_crypto::test_ripemd160() { - eosio::checksum160 tmp; + unsigned char tmp[20]; - tmp = eosio::ripemd160( test1, my_strlen(test1) ); - eosio_assert( my_memcmp((void *)test1_ok_ripe, &tmp, sizeof(eosio::checksum160)), "ripemd160 test1" ); + ::ripemd160( test1, my_strlen(test1), (capi_checksum160*)tmp ); + eosio_assert( my_memcmp((void *)test1_ok_ripe, &tmp, sizeof(tmp)), "ripemd160 test1" ); - tmp = eosio::ripemd160( test3, my_strlen(test3) ); - eosio_assert( my_memcmp((void *)test3_ok_ripe, &tmp, sizeof(eosio::checksum160)), "ripemd160 test3" ); + ::ripemd160( test3, my_strlen(test3), (capi_checksum160*)tmp ); + eosio_assert( my_memcmp((void *)test3_ok_ripe, &tmp, sizeof(tmp)), "ripemd160 test3" ); - tmp = eosio::ripemd160( test4, my_strlen(test4) ); - eosio_assert( my_memcmp((void *)test4_ok_ripe, &tmp, sizeof(eosio::checksum160)), "ripemd160 test4" ); + ::ripemd160( test4, my_strlen(test4), (capi_checksum160*)tmp ); + eosio_assert( my_memcmp((void *)test4_ok_ripe, &tmp, sizeof(tmp)), "ripemd160 test4" ); - tmp = eosio::ripemd160( test5, my_strlen(test5) ); - eosio_assert( my_memcmp((void *)test5_ok_ripe, &tmp, sizeof(eosio::checksum160)), "ripemd160 test5" ); + ::ripemd160( test5, my_strlen(test5), (capi_checksum160*)tmp ); + eosio_assert( my_memcmp((void *)test5_ok_ripe, &tmp, sizeof(tmp)), "ripemd160 test5" ); } void test_crypto::sha256_null() { - eosio::checksum256 tmp; - tmp = eosio::sha256( nullptr, 100); - eosio_assert( false, "should've thrown an error" ); + unsigned char tmp[32]; + ::sha256( nullptr, 100, (capi_checksum256*)tmp); + eosio_assert( false, "should've thrown an error" ); } void test_crypto::sha1_no_data() { - eosio::checksum160 tmp; - - tmp = eosio::sha1( test2, my_strlen(test2) ); - eosio_assert( my_memcmp((void *)test2_ok_1, &tmp, sizeof(eosio::checksum160)), "sha1 test2" ); + unsigned char tmp[20]; + ::sha1( test2, my_strlen(test2), (capi_checksum160*)tmp ); + eosio_assert( my_memcmp((void *)test2_ok_1, &tmp, sizeof(tmp)), "sha1 test2" ); } void test_crypto::sha256_no_data() { - eosio::checksum256 tmp; - - tmp = eosio::sha256( test2, my_strlen(test2) ); - eosio_assert( my_memcmp((void *)test2_ok_256, &tmp, sizeof(eosio::checksum256)), "sha256 test2" ); + unsigned char tmp[32]; + ::sha256( test2, my_strlen(test2), (capi_checksum256*)tmp ); + eosio_assert( my_memcmp((void *)test2_ok_256, &tmp, sizeof(tmp)), "sha256 test2" ); } void test_crypto::sha512_no_data() { - eosio::checksum512 tmp; - - tmp = eosio::sha512( test2, my_strlen(test2) ); - eosio_assert( my_memcmp((void *)test2_ok_512, &tmp, sizeof(eosio::checksum512)), "sha512 test2" ); + unsigned char tmp[64]; + ::sha512( test2, my_strlen(test2), (capi_checksum512*)tmp ); + eosio_assert( my_memcmp((void *)test2_ok_512, &tmp, sizeof(tmp)), "sha512 test2" ); } void test_crypto::ripemd160_no_data() { - eosio::checksum160 tmp; - - tmp = eosio::ripemd160( test2, my_strlen(test2) ); - eosio_assert( my_memcmp((void *)test2_ok_ripe, &tmp, sizeof(eosio::checksum160)), "ripemd160 test2" ); + unsigned char tmp[20]; + ::ripemd160( test2, my_strlen(test2), (capi_checksum160*)tmp ); + eosio_assert( my_memcmp((void *)test2_ok_ripe, &tmp, sizeof(tmp)), "ripemd160 test2" ); } void test_crypto::assert_sha256_false() { - eosio::checksum256 tmp; + unsigned char tmp[32]; - tmp = eosio::sha256( test1, my_strlen(test1) ); - tmp.data()[0] ^= (uint64_t)(-1); - assert_sha256( test1, my_strlen(test1), tmp ); - - eosio_assert( false, "should have failed" ); + ::sha256( test1, my_strlen(test1), (capi_checksum256*)tmp ); + tmp[0] ^= (uint64_t)(-1); + assert_sha256( test1, my_strlen(test1), tmp ); + + eosio_assert( false, "should have failed" ); } void test_crypto::assert_sha256_true() { - eosio::checksum256 tmp; - - tmp = eosio::sha256( test1, my_strlen(test1) ); - assert_sha256( test1, my_strlen(test1), tmp ); - - tmp = eosio::sha256( test3, my_strlen(test3) ); - assert_sha256( test3, my_strlen(test3), tmp ); - - tmp = eosio::sha256( test4, my_strlen(test4) ); - assert_sha256( test4, my_strlen(test4), tmp ); - - tmp = eosio::sha256( test5, my_strlen(test5) ); - assert_sha256( test5, my_strlen(test5), tmp ); + unsigned char tmp[32]; + + ::sha256( test1, my_strlen(test1), (capi_checksum256*)tmp ); + assert_sha256( test1, my_strlen(test1), tmp ); + + ::sha256( test3, my_strlen(test3), (capi_checksum256*)tmp ); + assert_sha256( test3, my_strlen(test3), tmp ); + + ::sha256( test4, my_strlen(test4), (capi_checksum256*)tmp ); + assert_sha256( test4, my_strlen(test4), tmp ); + + ::sha256( test5, my_strlen(test5), (capi_checksum256*)tmp ); + assert_sha256( test5, my_strlen(test5), tmp ); } void test_crypto::assert_sha1_false() { - eosio::checksum160 tmp; + unsigned char tmp[20]; - tmp = eosio::sha1( test1, my_strlen(test1) ); - tmp.data()[0] ^= (uint64_t)(-1); - assert_sha1( test1, my_strlen(test1), tmp ); + ::sha1( test1, my_strlen(test1), (capi_checksum160*)tmp ); + tmp[0] ^= (uint64_t)(-1); + assert_sha1( test1, my_strlen(test1), tmp ); - eosio_assert( false, "should have failed" ); + eosio_assert( false, "should have failed" ); } void test_crypto::assert_sha1_true() { - eosio::checksum160 tmp; + unsigned char tmp[20]; - tmp = eosio::sha1( test1, my_strlen(test1) ); - assert_sha1( test1, my_strlen(test1), tmp ); + ::sha1( test1, my_strlen(test1), (capi_checksum160*)tmp ); + assert_sha1( test1, my_strlen(test1), tmp ); - tmp = eosio::sha1( test3, my_strlen(test3) ); - assert_sha1( test3, my_strlen(test3), tmp ); + ::sha1( test3, my_strlen(test3), (capi_checksum160*)tmp ); + assert_sha1( test3, my_strlen(test3), tmp ); - tmp = eosio::sha1( test4, my_strlen(test4) ); - assert_sha1( test4, my_strlen(test4), tmp ); + ::sha1( test4, my_strlen(test4), (capi_checksum160*)tmp); + assert_sha1( test4, my_strlen(test4), tmp ); - tmp = eosio::sha1( test5, my_strlen(test5) ); - assert_sha1( test5, my_strlen(test5), tmp ); + ::sha1( test5, my_strlen(test5), (capi_checksum160*)tmp); + assert_sha1( test5, my_strlen(test5), tmp ); } void test_crypto::assert_sha512_false() { - eosio::checksum512 tmp; + unsigned char tmp[64]; - tmp = eosio::sha512( test1, my_strlen(test1) ); - tmp.data()[0] ^= (uint64_t)(-1); - assert_sha512( test1, my_strlen(test1), tmp ); + ::sha512( test1, my_strlen(test1), (capi_checksum512*)tmp); + tmp[0] ^= (uint64_t)(-1); + assert_sha512( test1, my_strlen(test1), tmp ); - eosio_assert(false, "should have failed"); + eosio_assert(false, "should have failed"); } void test_crypto::assert_sha512_true() { - eosio::checksum512 tmp; + unsigned char tmp[64]; - tmp = eosio::sha512( test1, my_strlen(test1) ); - assert_sha512( test1, my_strlen(test1), tmp ); + ::sha512( test1, my_strlen(test1), (capi_checksum512*)tmp); + assert_sha512( test1, my_strlen(test1), tmp ); - tmp = eosio::sha512( test3, my_strlen(test3) ); - assert_sha512( test3, my_strlen(test3), tmp ); + ::sha512( test3, my_strlen(test3), (capi_checksum512*)tmp); + assert_sha512( test3, my_strlen(test3), tmp ); - tmp = eosio::sha512( test4, my_strlen(test4) ); - assert_sha512( test4, my_strlen(test4), tmp ); + ::sha512( test4, my_strlen(test4), (capi_checksum512*)tmp); + assert_sha512( test4, my_strlen(test4), tmp ); - tmp = eosio::sha512( test5, my_strlen(test5) ); - assert_sha512( test5, my_strlen(test5), tmp ); + ::sha512( test5, my_strlen(test5), (capi_checksum512*)tmp); + assert_sha512( test5, my_strlen(test5), tmp ); } void test_crypto::assert_ripemd160_false() { - eosio::checksum160 tmp; + unsigned char tmp[20]; - tmp = eosio::ripemd160( test1, my_strlen(test1) ); - tmp.data()[0] ^= (uint64_t)(-1); - assert_ripemd160( test1, my_strlen(test1), tmp ); + ::ripemd160( test1, my_strlen(test1), (capi_checksum160*)tmp); + tmp[0] ^= (uint64_t)(-1); + assert_ripemd160( test1, my_strlen(test1), tmp ); - eosio_assert( false, "should have failed" ); + eosio_assert( false, "should have failed" ); } void test_crypto::assert_ripemd160_true() { - eosio::checksum160 tmp; + unsigned char tmp[20]; - tmp = eosio::ripemd160( test1, my_strlen(test1) ); - assert_ripemd160( test1, my_strlen(test1), tmp ); + ::ripemd160( test1, my_strlen(test1), (capi_checksum160*)tmp); + assert_ripemd160( test1, my_strlen(test1), tmp ); - tmp = eosio::ripemd160( test3, my_strlen(test3) ); - assert_ripemd160( test3, my_strlen(test3), tmp ); + ::ripemd160( test3, my_strlen(test3), (capi_checksum160*)tmp); + assert_ripemd160( test3, my_strlen(test3), tmp ); - tmp = eosio::ripemd160( test4, my_strlen(test4) ); - assert_ripemd160( test4, my_strlen(test4), tmp ); + ::ripemd160( test4, my_strlen(test4), (capi_checksum160*)tmp); + assert_ripemd160( test4, my_strlen(test4), tmp ); - tmp = eosio::ripemd160( test5, my_strlen(test5) ); - assert_ripemd160( test5, my_strlen(test5), tmp ); + ::ripemd160( test5, my_strlen(test5), (capi_checksum160*)tmp); + assert_ripemd160( test5, my_strlen(test5), tmp ); } diff --git a/unittests/test-contracts/test_api/test_datastream.cpp b/unittests/test-contracts/test_api/test_datastream.cpp index ca0e00982f6..e81dbf7c1c7 100644 --- a/unittests/test-contracts/test_api/test_datastream.cpp +++ b/unittests/test-contracts/test_api/test_datastream.cpp @@ -1,7 +1,3 @@ -/** - * @file action_test.cpp - * @copyright defined in eos/LICENSE.txt - */ #include #include diff --git a/unittests/test-contracts/test_api/test_permission.cpp b/unittests/test-contracts/test_api/test_permission.cpp index 7650d8f40cd..c078a65be8e 100644 --- a/unittests/test-contracts/test_api/test_permission.cpp +++ b/unittests/test-contracts/test_api/test_permission.cpp @@ -1,7 +1,3 @@ -/** - * @file action_test.cpp - * @copyright defined in eos/LICENSE - */ #include #include diff --git a/unittests/test-contracts/test_api/test_print.cpp b/unittests/test-contracts/test_api/test_print.cpp index c0b5ab07e00..3b1f89b3199 100644 --- a/unittests/test-contracts/test_api/test_print.cpp +++ b/unittests/test-contracts/test_api/test_print.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include "test_api.hpp" diff --git a/unittests/test-contracts/test_api/test_transaction.cpp b/unittests/test-contracts/test_api/test_transaction.cpp index 535f9896e9b..801a06b26d9 100644 --- a/unittests/test-contracts/test_api/test_transaction.cpp +++ b/unittests/test-contracts/test_api/test_transaction.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include diff --git a/unittests/test-contracts/test_api/test_types.cpp b/unittests/test-contracts/test_api/test_types.cpp index a74026c33fc..0ba34a5e1af 100644 --- a/unittests/test-contracts/test_api/test_types.cpp +++ b/unittests/test-contracts/test_api/test_types.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include "test_api.hpp" diff --git a/unittests/test-contracts/test_api_db/test_api_db.cpp b/unittests/test-contracts/test_api_db/test_api_db.cpp index 7ab5ddf9ce5..1c3bc358d77 100644 --- a/unittests/test-contracts/test_api_db/test_api_db.cpp +++ b/unittests/test-contracts/test_api_db/test_api_db.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include "test_api_db.hpp" using namespace eosio; diff --git a/unittests/test-contracts/test_api_db/test_api_db.hpp b/unittests/test-contracts/test_api_db/test_api_db.hpp index 9487434d518..6196e9db103 100644 --- a/unittests/test-contracts/test_api_db/test_api_db.hpp +++ b/unittests/test-contracts/test_api_db/test_api_db.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/unittests/test-contracts/test_api_multi_index/test_api_multi_index.cpp b/unittests/test-contracts/test_api_multi_index/test_api_multi_index.cpp index 72e7d4e6a5b..14a819932d9 100644 --- a/unittests/test-contracts/test_api_multi_index/test_api_multi_index.cpp +++ b/unittests/test-contracts/test_api_multi_index/test_api_multi_index.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include "test_api_multi_index.hpp" using namespace eosio; diff --git a/unittests/test-contracts/test_api_multi_index/test_api_multi_index.hpp b/unittests/test-contracts/test_api_multi_index/test_api_multi_index.hpp index 73f48ea8e4c..03253e3fa67 100644 --- a/unittests/test-contracts/test_api_multi_index/test_api_multi_index.hpp +++ b/unittests/test-contracts/test_api_multi_index/test_api_multi_index.hpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #pragma once #include diff --git a/unittests/test-contracts/test_ram_limit/test_ram_limit.cpp b/unittests/test-contracts/test_ram_limit/test_ram_limit.cpp index 0bc9c72c052..a7ea39e31ed 100644 --- a/unittests/test-contracts/test_ram_limit/test_ram_limit.cpp +++ b/unittests/test-contracts/test_ram_limit/test_ram_limit.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ #include #include #include diff --git a/unittests/unapplied_transaction_queue_tests.cpp b/unittests/unapplied_transaction_queue_tests.cpp new file mode 100644 index 00000000000..bad4628d4fa --- /dev/null +++ b/unittests/unapplied_transaction_queue_tests.cpp @@ -0,0 +1,310 @@ +#include +#include +#include +#include + +using namespace eosio; +using namespace eosio::chain; + +BOOST_AUTO_TEST_SUITE(unapplied_transaction_queue_tests) + +auto unique_trx_meta_data() { + + static uint64_t nextid = 0; + ++nextid; + + signed_transaction trx; + account_name creator = config::system_account_name; + trx.actions.emplace_back( vector{{creator,config::active_name}}, + onerror{ nextid, "test", 4 }); + return transaction_metadata::create_no_recover_keys( packed_transaction( trx ), transaction_metadata::trx_type::input ); +} + +auto next( unapplied_transaction_queue& q ) { + transaction_metadata_ptr trx; + auto itr = q.begin(); + if( itr != q.end() ) { + trx = itr->trx_meta; + q.erase( itr ); + } + return trx; +} + +auto create_test_block_state( std::vector trx_metas ) { + signed_block_ptr block = std::make_shared(); + for( auto& trx_meta : trx_metas ) { + block->transactions.emplace_back( *trx_meta->packed_trx() ); + } + + block->producer = eosio::chain::config::system_account_name; + + auto priv_key = eosio::testing::base_tester::get_private_key( block->producer, "active" ); + auto pub_key = eosio::testing::base_tester::get_public_key( block->producer, "active" ); + + auto prev = std::make_shared(); + auto header_bmroot = digest_type::hash( std::make_pair( block->digest(), prev->blockroot_merkle.get_root() ) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, prev->pending_schedule.schedule_hash) ); + block->producer_signature = priv_key.sign( sig_digest ); + + vector signing_keys; + signing_keys.emplace_back( std::move( priv_key ) ); + + auto signer = [&]( digest_type d ) { + std::vector result; + result.reserve(signing_keys.size()); + for (const auto& k: signing_keys) + result.emplace_back(k.sign(d)); + return result; + }; + pending_block_header_state pbhs; + pbhs.producer = block->producer; + producer_authority_schedule schedule = { 0, { producer_authority{block->producer, block_signing_authority_v0{ 1, {{pub_key, 1}} } } } }; + pbhs.active_schedule = schedule; + pbhs.valid_block_signing_authority = block_signing_authority_v0{ 1, {{pub_key, 1}} }; + auto bsp = std::make_shared( + std::move( pbhs ), + std::move( block ), + std::move( trx_metas ), + protocol_feature_set(), + []( block_timestamp_type timestamp, + const flat_set& cur_features, + const vector& new_features ) + {}, + signer + ); + + return bsp; +} + +BOOST_AUTO_TEST_CASE( unapplied_transaction_queue_test ) try { + + unapplied_transaction_queue q; + BOOST_CHECK( q.empty() ); + BOOST_CHECK( q.size() == 0 ); + + auto trx1 = unique_trx_meta_data(); + auto trx2 = unique_trx_meta_data(); + auto trx3 = unique_trx_meta_data(); + auto trx4 = unique_trx_meta_data(); + auto trx5 = unique_trx_meta_data(); + auto trx6 = unique_trx_meta_data(); + auto trx7 = unique_trx_meta_data(); + auto trx8 = unique_trx_meta_data(); + auto trx9 = unique_trx_meta_data(); + + // empty + auto p = next( q ); + BOOST_CHECK( p == nullptr ); + + // 1 persisted + q.add_persisted( trx1 ); + BOOST_CHECK( q.size() == 1 ); + BOOST_REQUIRE( next( q ) == trx1 ); + BOOST_CHECK( q.size() == 0 ); + BOOST_REQUIRE( next( q ) == nullptr ); + BOOST_REQUIRE( next( q ) == nullptr ); + BOOST_CHECK( q.empty() ); + + // fifo persisted + q.add_persisted( trx1 ); + q.add_persisted( trx2 ); + q.add_persisted( trx3 ); + BOOST_CHECK( q.size() == 3 ); + BOOST_REQUIRE( next( q ) == trx1 ); + BOOST_CHECK( q.size() == 2 ); + BOOST_REQUIRE( next( q ) == trx2 ); + BOOST_CHECK( q.size() == 1 ); + BOOST_REQUIRE( next( q ) == trx3 ); + BOOST_CHECK( q.size() == 0 ); + BOOST_REQUIRE( next( q ) == nullptr ); + BOOST_CHECK( q.empty() ); + + // fifo aborted + q.add_aborted( { trx1, trx2, trx3 } ); + q.add_aborted( { trx1, trx2, trx3 } ); // duplicates ignored + BOOST_CHECK( q.size() == 3 ); + BOOST_REQUIRE( next( q ) == trx1 ); + BOOST_CHECK( q.size() == 2 ); + BOOST_REQUIRE( next( q ) == trx2 ); + BOOST_CHECK( q.size() == 1 ); + BOOST_REQUIRE( next( q ) == trx3 ); + BOOST_CHECK( q.size() == 0 ); + BOOST_REQUIRE( next( q ) == nullptr ); + BOOST_CHECK( q.empty() ); + + // clear applied + q.add_aborted( { trx1, trx2, trx3 } ); + q.clear_applied( create_test_block_state( { trx1, trx3, trx4 } ) ); + BOOST_CHECK( q.size() == 1 ); + BOOST_REQUIRE( next( q ) == trx2 ); + BOOST_CHECK( q.size() == 0 ); + BOOST_REQUIRE( next( q ) == nullptr ); + BOOST_CHECK( q.empty() ); + + // order: persisted, aborted + q.add_persisted( trx6 ); + q.add_aborted( { trx1, trx2, trx3 } ); + q.add_aborted( { trx4, trx5 } ); + q.add_persisted( trx7 ); + BOOST_CHECK( q.size() == 7 ); + BOOST_REQUIRE( next( q ) == trx6 ); + BOOST_CHECK( q.size() == 6 ); + BOOST_REQUIRE( next( q ) == trx7 ); + BOOST_CHECK( q.size() == 5 ); + BOOST_REQUIRE( next( q ) == trx1 ); + BOOST_CHECK( q.size() == 4 ); + BOOST_REQUIRE( next( q ) == trx2 ); + BOOST_CHECK( q.size() == 3 ); + BOOST_REQUIRE( next( q ) == trx3 ); + BOOST_CHECK( q.size() == 2 ); + BOOST_REQUIRE( next( q ) == trx4 ); + BOOST_CHECK( q.size() == 1 ); + BOOST_REQUIRE( next( q ) == trx5 ); + BOOST_CHECK( q.size() == 0 ); + BOOST_REQUIRE( next( q ) == nullptr ); + BOOST_CHECK( q.empty() ); + + // fifo forked, one fork + auto bs1 = create_test_block_state( { trx1, trx2 } ); + auto bs2 = create_test_block_state( { trx3, trx4, trx5 } ); + auto bs3 = create_test_block_state( { trx6 } ); + q.add_forked( { bs3, bs2, bs1, bs1 } ); // bs1 duplicate ignored + BOOST_CHECK( q.size() == 6 ); + BOOST_REQUIRE( next( q ) == trx1 ); + BOOST_CHECK( q.size() == 5 ); + BOOST_REQUIRE( next( q ) == trx2 ); + BOOST_CHECK( q.size() == 4 ); + BOOST_REQUIRE_EQUAL( next( q ), trx3 ); + BOOST_CHECK( q.size() == 3 ); + BOOST_REQUIRE( next( q ) == trx4 ); + BOOST_CHECK( q.size() == 2 ); + BOOST_REQUIRE( next( q ) == trx5 ); + BOOST_CHECK( q.size() == 1 ); + BOOST_REQUIRE( next( q ) == trx6 ); + BOOST_CHECK( q.size() == 0 ); + BOOST_REQUIRE( next( q ) == nullptr ); + BOOST_CHECK( q.empty() ); + + // fifo forked + auto bs4 = create_test_block_state( { trx7 } ); + q.add_forked( { bs1 } ); + q.add_forked( { bs3, bs2 } ); + q.add_forked( { bs4 } ); + BOOST_CHECK( q.size() == 7 ); + BOOST_REQUIRE( next( q ) == trx1 ); + BOOST_CHECK( q.size() == 6 ); + BOOST_REQUIRE( next( q ) == trx2 ); + BOOST_CHECK( q.size() == 5 ); + BOOST_REQUIRE_EQUAL( next( q ), trx3 ); + BOOST_CHECK( q.size() == 4 ); + BOOST_REQUIRE( next( q ) == trx4 ); + BOOST_CHECK( q.size() == 3 ); + BOOST_REQUIRE( next( q ) == trx5 ); + BOOST_CHECK( q.size() == 2 ); + BOOST_REQUIRE( next( q ) == trx6 ); + BOOST_CHECK( q.size() == 1 ); + BOOST_REQUIRE( next( q ) == trx7 ); + BOOST_CHECK( q.size() == 0 ); + BOOST_REQUIRE( next( q ) == nullptr ); + BOOST_CHECK( q.empty() ); + + auto trx11 = unique_trx_meta_data(); + auto trx12 = unique_trx_meta_data(); + auto trx13 = unique_trx_meta_data(); + auto trx14 = unique_trx_meta_data(); + auto trx15 = unique_trx_meta_data(); + auto trx16 = unique_trx_meta_data(); + auto trx17 = unique_trx_meta_data(); + auto trx18 = unique_trx_meta_data(); + auto trx19 = unique_trx_meta_data(); + + // fifo forked, multi forks + auto bs5 = create_test_block_state( { trx11, trx12, trx13 } ); + auto bs6 = create_test_block_state( { trx11, trx15 } ); + q.add_forked( { bs3, bs2, bs1 } ); + q.add_forked( { bs4 } ); + q.add_forked( { bs3, bs2 } ); // dups ignored + q.add_forked( { bs6, bs5 } ); + BOOST_CHECK_EQUAL( q.size(), 11 ); + BOOST_REQUIRE( next( q ) == trx1 ); + BOOST_CHECK( q.size() == 10 ); + BOOST_REQUIRE( next( q ) == trx2 ); + BOOST_CHECK( q.size() == 9 ); + BOOST_REQUIRE_EQUAL( next( q ), trx3 ); + BOOST_CHECK( q.size() == 8 ); + BOOST_REQUIRE( next( q ) == trx4 ); + BOOST_CHECK( q.size() == 7 ); + BOOST_REQUIRE( next( q ) == trx5 ); + BOOST_CHECK( q.size() == 6 ); + BOOST_REQUIRE( next( q ) == trx6 ); + BOOST_CHECK( q.size() == 5 ); + BOOST_REQUIRE( next( q ) == trx7 ); + BOOST_CHECK( q.size() == 4 ); + BOOST_REQUIRE_EQUAL( next( q ), trx11 ); + BOOST_CHECK( q.size() == 3 ); + BOOST_REQUIRE( next( q ) == trx12 ); + BOOST_CHECK( q.size() == 2 ); + BOOST_REQUIRE( next( q ) == trx13 ); + BOOST_CHECK( q.size() == 1 ); + BOOST_REQUIRE( next( q ) == trx15 ); + BOOST_CHECK( q.size() == 0 ); + BOOST_REQUIRE( next( q ) == nullptr ); + BOOST_CHECK( q.empty() ); + + // altogether, order fifo: persisted, forked, aborted + q.add_forked( { bs3, bs2, bs1 } ); + q.add_persisted( trx16 ); + q.add_aborted( { trx9, trx14 } ); + q.add_persisted( trx8 ); + q.add_aborted( { trx18, trx19 } ); + q.add_forked( { bs6, bs5, bs4 } ); + BOOST_CHECK( q.size() == 17 ); + BOOST_REQUIRE( next( q ) == trx16 ); + BOOST_CHECK( q.size() == 16 ); + BOOST_REQUIRE( next( q ) == trx8 ); + BOOST_CHECK( q.size() == 15 ); + BOOST_REQUIRE( next( q ) == trx1 ); + BOOST_CHECK( q.size() == 14 ); + BOOST_REQUIRE( next( q ) == trx2 ); + BOOST_CHECK( q.size() == 13 ); + BOOST_REQUIRE_EQUAL( next( q ), trx3 ); + BOOST_CHECK( q.size() == 12 ); + BOOST_REQUIRE( next( q ) == trx4 ); + BOOST_CHECK( q.size() == 11 ); + BOOST_REQUIRE( next( q ) == trx5 ); + BOOST_CHECK( q.size() == 10 ); + BOOST_REQUIRE( next( q ) == trx6 ); + BOOST_CHECK( q.size() == 9 ); + BOOST_REQUIRE( next( q ) == trx7 ); + BOOST_CHECK( q.size() == 8 ); + BOOST_REQUIRE( next( q ) == trx11 ); + BOOST_CHECK( q.size() == 7 ); + BOOST_REQUIRE_EQUAL( next( q ), trx12 ); + BOOST_CHECK( q.size() == 6 ); + BOOST_REQUIRE( next( q ) == trx13 ); + BOOST_CHECK( q.size() == 5 ); + BOOST_REQUIRE( next( q ) == trx15 ); + BOOST_CHECK( q.size() == 4 ); + BOOST_REQUIRE( next( q ) == trx9 ); + BOOST_CHECK( q.size() == 3 ); + BOOST_REQUIRE( next( q ) == trx14 ); + BOOST_CHECK( q.size() == 2 ); + BOOST_REQUIRE( next( q ) == trx18 ); + BOOST_CHECK( q.size() == 1 ); + BOOST_REQUIRE( next( q ) == trx19 ); + BOOST_CHECK( q.size() == 0 ); + BOOST_REQUIRE( next( q ) == nullptr ); + BOOST_CHECK( q.empty() ); + + q.add_forked( { bs3, bs2, bs1 } ); + q.add_aborted( { trx9, trx11 } ); + q.add_persisted( trx8 ); + q.clear(); + BOOST_CHECK( q.empty() ); + BOOST_CHECK( q.size() == 0 ); + BOOST_REQUIRE( next( q ) == nullptr ); + +} FC_LOG_AND_RETHROW() /// unapplied_transaction_queue_test + + +BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/wasm_tests.cpp b/unittests/wasm_tests.cpp index a0744439ccc..da482058b07 100644 --- a/unittests/wasm_tests.cpp +++ b/unittests/wasm_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ #include #include @@ -12,7 +8,10 @@ #include #include +#include +#include #include +#include #include #include @@ -383,23 +382,23 @@ BOOST_FIXTURE_TEST_CASE( f32_f64_overflow_tests, tester ) try { int count = 0; auto check = [&](const char *wast_template, const char *op, const char *param) -> bool { count+=16; - create_accounts( {N(f_tests)+count} ); + create_accounts( {name(N(f_tests).to_uint64_t()+count)} ); produce_blocks(1); std::vector wast; wast.resize(strlen(wast_template) + 128); sprintf(&(wast[0]), wast_template, op, param); - set_code(N(f_tests)+count, &(wast[0])); + set_code(name(N(f_tests).to_uint64_t()+count), &(wast[0])); produce_blocks(10); signed_transaction trx; action act; - act.account = N(f_tests)+count; + act.account = name(N(f_tests).to_uint64_t()+count); act.name = N(); - act.authorization = vector{{N(f_tests)+count,config::active_name}}; + act.authorization = vector{{name(N(f_tests).to_uint64_t()+count),config::active_name}}; trx.actions.push_back(act); set_transaction_headers(trx); - trx.sign(get_private_key( N(f_tests)+count, "active" ), control->get_chain_id()); + trx.sign(get_private_key( name(N(f_tests).to_uint64_t()+count), "active" ), control->get_chain_id()); try { push_transaction(trx); @@ -561,6 +560,23 @@ BOOST_FIXTURE_TEST_CASE( check_entry_behavior_2, TESTER ) try { BOOST_CHECK_EQUAL(transaction_receipt::executed, receipt.status); } FC_LOG_AND_RETHROW() +BOOST_FIXTURE_TEST_CASE( entry_import, TESTER ) try { + create_accounts( {N(enterimport)} ); + produce_block(); + + set_code(N(enterimport), entry_import_wast); + + signed_transaction trx; + action act; + act.account = N(enterimport); + act.name = N(); + act.authorization = vector{{N(enterimport),config::active_name}}; + trx.actions.push_back(act); + + set_transaction_headers(trx); + trx.sign(get_private_key( N(enterimport), "active" ), control->get_chain_id()); + BOOST_CHECK_THROW(push_transaction(trx), abort_called); +} FC_LOG_AND_RETHROW() /** * Ensure we can load a wasm w/o memory @@ -608,7 +624,7 @@ BOOST_FIXTURE_TEST_CASE( check_global_reset, TESTER ) try { { action act; act.account = N(globalreset); - act.name = 1ULL; + act.name = name(1ULL); act.authorization = vector{{N(globalreset),config::active_name}}; trx.actions.push_back(act); } @@ -669,6 +685,42 @@ BOOST_FIXTURE_TEST_CASE( table_init_tests, TESTER ) try { } FC_LOG_AND_RETHROW() +BOOST_FIXTURE_TEST_CASE( table_init_oob, TESTER ) try { + create_accounts( {N(tableinitoob)} ); + produce_block(); + + signed_transaction trx; + trx.actions.emplace_back(vector{{N(tableinitoob),config::active_name}}, N(tableinitoob), N(), bytes{}); + trx.actions[0].authorization = vector{{N(tableinitoob),config::active_name}}; + + auto pushit_and_expect_fail = [&]() { + produce_block(); + trx.signatures.clear(); + set_transaction_headers(trx); + trx.sign(get_private_key(N(tableinitoob), "active"), control->get_chain_id()); + + //the unspecified_exception_code comes from WAVM, which manages to throw a WAVM specific exception + // up to where exec_one captures it and doesn't understand it + BOOST_CHECK_THROW(push_transaction(trx), eosio::chain::wasm_execution_error); + }; + + set_code(N(tableinitoob), table_init_oob_wast); + produce_block(); + + pushit_and_expect_fail(); + //make sure doing it again didn't lodge something funky in to a cache + pushit_and_expect_fail(); + + set_code(N(tableinitoob), table_init_oob_smaller_wast); + produce_block(); + pushit_and_expect_fail(); + pushit_and_expect_fail(); + + //an elem w/o a table is a setcode fail though + BOOST_CHECK_THROW(set_code(N(tableinitoob), table_init_oob_no_table_wast), eosio::chain::wasm_exception); + +} FC_LOG_AND_RETHROW() + BOOST_FIXTURE_TEST_CASE( memory_init_border, TESTER ) try { produce_blocks(2); @@ -834,7 +886,6 @@ BOOST_FIXTURE_TEST_CASE( lotso_globals, TESTER ) try { BOOST_CHECK_THROW(set_code(N(globals), string(ss.str() + "(global $z (mut i64) (i64.const -12)))") .c_str()), eosio::chain::wasm_execution_error); - } FC_LOG_AND_RETHROW() BOOST_FIXTURE_TEST_CASE( offset_check, TESTER ) try { @@ -1087,7 +1138,7 @@ BOOST_FIXTURE_TEST_CASE( check_table_maximum, TESTER ) try { { signed_transaction trx; action act; - act.name = 555ULL<<32 | 0ULL; //top 32 is what we assert against, bottom 32 is indirect call index + act.name = name(555ULL<<32 | 0ULL); //top 32 is what we assert against, bottom 32 is indirect call index act.account = N(tbl); act.authorization = vector{{N(tbl),config::active_name}}; trx.actions.push_back(act); @@ -1101,7 +1152,7 @@ BOOST_FIXTURE_TEST_CASE( check_table_maximum, TESTER ) try { { signed_transaction trx; action act; - act.name = 555ULL<<32 | 1022ULL; //top 32 is what we assert against, bottom 32 is indirect call index + act.name = name(555ULL<<32 | 1022ULL); //top 32 is what we assert against, bottom 32 is indirect call index act.account = N(tbl); act.authorization = vector{{N(tbl),config::active_name}}; trx.actions.push_back(act); @@ -1115,7 +1166,7 @@ BOOST_FIXTURE_TEST_CASE( check_table_maximum, TESTER ) try { { signed_transaction trx; action act; - act.name = 7777ULL<<32 | 1023ULL; //top 32 is what we assert against, bottom 32 is indirect call index + act.name = name(7777ULL<<32 | 1023ULL); //top 32 is what we assert against, bottom 32 is indirect call index act.account = N(tbl); act.authorization = vector{{N(tbl),config::active_name}}; trx.actions.push_back(act); @@ -1129,7 +1180,7 @@ BOOST_FIXTURE_TEST_CASE( check_table_maximum, TESTER ) try { { signed_transaction trx; action act; - act.name = 7778ULL<<32 | 1023ULL; //top 32 is what we assert against, bottom 32 is indirect call index + act.name = name(7778ULL<<32 | 1023ULL); //top 32 is what we assert against, bottom 32 is indirect call index act.account = N(tbl); act.authorization = vector{{N(tbl),config::active_name}}; trx.actions.push_back(act); @@ -1145,7 +1196,7 @@ BOOST_FIXTURE_TEST_CASE( check_table_maximum, TESTER ) try { { signed_transaction trx; action act; - act.name = 133ULL<<32 | 5ULL; //top 32 is what we assert against, bottom 32 is indirect call index + act.name = name(133ULL<<32 | 5ULL); //top 32 is what we assert against, bottom 32 is indirect call index act.account = N(tbl); act.authorization = vector{{N(tbl),config::active_name}}; trx.actions.push_back(act); @@ -1161,7 +1212,7 @@ BOOST_FIXTURE_TEST_CASE( check_table_maximum, TESTER ) try { { signed_transaction trx; action act; - act.name = eosio::chain::wasm_constraints::maximum_table_elements+54334; + act.name = name(eosio::chain::wasm_constraints::maximum_table_elements+54334); act.account = N(tbl); act.authorization = vector{{N(tbl),config::active_name}}; trx.actions.push_back(act); @@ -1181,7 +1232,7 @@ BOOST_FIXTURE_TEST_CASE( check_table_maximum, TESTER ) try { { signed_transaction trx; action act; - act.name = 555ULL<<32 | 1022ULL; //top 32 is what we assert against, bottom 32 is indirect call index + act.name = name(555ULL<<32 | 1022ULL); //top 32 is what we assert against, bottom 32 is indirect call index act.account = N(tbl); act.authorization = vector{{N(tbl),config::active_name}}; trx.actions.push_back(act); @@ -1194,7 +1245,7 @@ BOOST_FIXTURE_TEST_CASE( check_table_maximum, TESTER ) try { { signed_transaction trx; action act; - act.name = 7777ULL<<32 | 1023ULL; //top 32 is what we assert against, bottom 32 is indirect call index + act.name = name(7777ULL<<32 | 1023ULL); //top 32 is what we assert against, bottom 32 is indirect call index act.account = N(tbl); act.authorization = vector{{N(tbl),config::active_name}}; trx.actions.push_back(act); @@ -1208,7 +1259,7 @@ BOOST_FIXTURE_TEST_CASE( check_table_maximum, TESTER ) try { { signed_transaction trx; action act; - act.name = 888ULL; + act.name = name(888ULL); act.account = N(tbl); act.authorization = vector{{N(tbl),config::active_name}}; trx.actions.push_back(act); @@ -1333,7 +1384,7 @@ BOOST_FIXTURE_TEST_CASE( lotso_stack_4, TESTER ) try { ss << "(local i32)"; ss << " )"; ss << ")"; - BOOST_CHECK_THROW(set_code(N(stackz), ss.str().c_str()), fc::exception); + BOOST_CHECK_THROW(set_code(N(stackz), ss.str().c_str()), wasm_serialization_error); produce_blocks(1); } } FC_LOG_AND_RETHROW() @@ -1396,7 +1447,7 @@ BOOST_FIXTURE_TEST_CASE( lotso_stack_7, TESTER ) try { ss << "(param i32)"; ss << " )"; ss << ")"; - BOOST_CHECK_THROW(set_code(N(stackz), ss.str().c_str()), fc::exception); + BOOST_CHECK_THROW(set_code(N(stackz), ss.str().c_str()), wasm_execution_error); produce_blocks(1); } } FC_LOG_AND_RETHROW() @@ -1437,7 +1488,7 @@ BOOST_FIXTURE_TEST_CASE( lotso_stack_9, TESTER ) try { ss << "(local f32)"; ss << " )"; ss << ")"; - BOOST_CHECK_THROW(set_code(N(stackz), ss.str().c_str()), fc::exception); + BOOST_CHECK_THROW(set_code(N(stackz), ss.str().c_str()), wasm_execution_error); produce_blocks(1); } } FC_LOG_AND_RETHROW() @@ -1742,6 +1793,106 @@ BOOST_FIXTURE_TEST_CASE( big_maligned_host_ptr, TESTER ) try { produce_blocks(1); } FC_LOG_AND_RETHROW() +BOOST_FIXTURE_TEST_CASE( depth_tests, TESTER ) try { + produce_block(); + create_accounts( {N(depth)} ); + produce_block(); + + signed_transaction trx; + trx.actions.emplace_back(vector{{N(depth),config::active_name}}, N(depth), N(), bytes{}); + trx.actions[0].authorization = vector{{N(depth),config::active_name}}; + + auto pushit = [&]() { + produce_block(); + trx.signatures.clear(); + set_transaction_headers(trx); + trx.sign(get_private_key(N(depth), "active"), control->get_chain_id()); + push_transaction(trx); + }; + + //strictly wasm recursion to maximum_call_depth & maximum_call_depth+1 + string wasm_depth_okay = fc::format_string(depth_assert_wasm, fc::mutable_variant_object() + ("MAX_DEPTH", eosio::chain::wasm_constraints::maximum_call_depth)); + set_code(N(depth), wasm_depth_okay.c_str()); + pushit(); + + string wasm_depth_one_over = fc::format_string(depth_assert_wasm, fc::mutable_variant_object() + ("MAX_DEPTH", eosio::chain::wasm_constraints::maximum_call_depth+1)); + set_code(N(depth), wasm_depth_one_over.c_str()); + BOOST_CHECK_THROW(pushit(), wasm_execution_error); + + //wasm recursion but call an intrinsic as the last function instead + string intrinsic_depth_okay = fc::format_string(depth_assert_intrinsic, fc::mutable_variant_object() + ("MAX_DEPTH", eosio::chain::wasm_constraints::maximum_call_depth)); + set_code(N(depth), intrinsic_depth_okay.c_str()); + pushit(); + + string intrinsic_depth_one_over = fc::format_string(depth_assert_intrinsic, fc::mutable_variant_object() + ("MAX_DEPTH", eosio::chain::wasm_constraints::maximum_call_depth+1)); + set_code(N(depth), intrinsic_depth_one_over.c_str()); + BOOST_CHECK_THROW(pushit(), wasm_execution_error); + + //add a float operation in the mix to ensure any injected softfloat call doesn't count against limit + string wasm_float_depth_okay = fc::format_string(depth_assert_wasm_float, fc::mutable_variant_object() + ("MAX_DEPTH", eosio::chain::wasm_constraints::maximum_call_depth)); + set_code(N(depth), wasm_float_depth_okay.c_str()); + pushit(); + + string wasm_float_depth_one_over = fc::format_string(depth_assert_wasm_float, fc::mutable_variant_object() + ("MAX_DEPTH", eosio::chain::wasm_constraints::maximum_call_depth+1)); + set_code(N(depth), wasm_float_depth_one_over.c_str()); + BOOST_CHECK_THROW(pushit(), wasm_execution_error); + +} FC_LOG_AND_RETHROW() + +BOOST_FIXTURE_TEST_CASE( varuint_memory_flags_tests, TESTER ) try { + produce_block(); + create_accounts( {N(memflags)} ); + produce_block(); + + set_code(N(memflags), varuint_memory_flags); + produce_block(); + + signed_transaction trx; + action act; + act.account = N(memflags); + act.name = N(); + act.authorization = vector{{N(memflags),config::active_name}}; + trx.actions.push_back(act); + set_transaction_headers(trx); + trx.sign(get_private_key( N(memflags), "active" ), control->get_chain_id()); + push_transaction(trx); + produce_block(); +} FC_LOG_AND_RETHROW() + +// TODO: Update to use eos-vm once merged +BOOST_AUTO_TEST_CASE( code_size ) try { + using namespace IR; + using namespace Runtime; + using namespace Serialization; + std::vector code_start = { + 0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00, 0x01, 0x07, 0x01, 0x60, + 0x03, 0x7e, 0x7e, 0x7e, 0x00, 0x03, 0x02, 0x01, 0x00, 0x07, 0x09, 0x01, + 0x05, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x00, 0x00, 0x0a, 0x8b, 0x80, 0x80, + 0x0a, 0x01, 0x86, 0x80, 0x80, 0x0a, 0x00 + }; + + std::vector code_end = { 0x0b }; + + std::vector code_function_body; + code_function_body.insert(code_function_body.end(), wasm_constraints::maximum_code_size + 4, 0x01); + + std::vector code; + code.insert(code.end(), code_start.begin(), code_start.end()); + code.insert(code.end(), code_function_body.begin(), code_function_body.end()); + code.insert(code.end(), code_end.begin(), code_end.end()); + + Module module; + Serialization::MemoryInputStream stream((const U8*)code.data(), code.size()); + BOOST_CHECK_THROW(WASM::serialize(stream, module), FatalSerializationException); + +} FC_LOG_AND_RETHROW() + // TODO: restore net_usage_tests #if 0 BOOST_FIXTURE_TEST_CASE(net_usage_tests, tester ) try { diff --git a/unittests/whitelist_blacklist_tests.cpp b/unittests/whitelist_blacklist_tests.cpp index 770f8f6ea34..faabea39220 100644 --- a/unittests/whitelist_blacklist_tests.cpp +++ b/unittests/whitelist_blacklist_tests.cpp @@ -1,7 +1,3 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ #include #include #include @@ -30,41 +26,17 @@ class whitelist_blacklist_tester { public: whitelist_blacklist_tester() {} - static controller::config get_default_chain_configuration( const fc::path& p ) { - controller::config cfg; - cfg.blocks_dir = p / config::default_blocks_dir_name; - cfg.state_dir = p / config::default_state_dir_name; - cfg.state_size = 1024*1024*8; - cfg.state_guard_size = 0; - cfg.reversible_cache_size = 1024*1024*8; - cfg.reversible_guard_size = 0; - cfg.contracts_console = true; - - cfg.genesis.initial_timestamp = fc::time_point::from_iso_string("2020-01-01T00:00:00.000"); - cfg.genesis.initial_key = base_tester::get_public_key( config::system_account_name, "active" ); - - for(int i = 0; i < boost::unit_test::framework::master_test_suite().argc; ++i) { - if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wavm")) - cfg.wasm_runtime = chain::wasm_interface::vm_type::wavm; - else if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--wabt")) - cfg.wasm_runtime = chain::wasm_interface::vm_type::wabt; - } - - return cfg; - } - void init( bool bootstrap = true ) { FC_ASSERT( !chain, "chain is already up" ); - auto cfg = get_default_chain_configuration( tempdir.path() ); - cfg.sender_bypass_whiteblacklist = sender_bypass_whiteblacklist; - cfg.actor_whitelist = actor_whitelist; - cfg.actor_blacklist = actor_blacklist; - cfg.contract_whitelist = contract_whitelist; - cfg.contract_blacklist = contract_blacklist; - cfg.action_blacklist = action_blacklist; - - chain.emplace(cfg); + chain.emplace(tempdir, [&](controller::config& cfg) { + cfg.sender_bypass_whiteblacklist = sender_bypass_whiteblacklist; + cfg.actor_whitelist = actor_whitelist; + cfg.actor_blacklist = actor_blacklist; + cfg.contract_whitelist = contract_whitelist; + cfg.contract_blacklist = contract_blacklist; + cfg.action_blacklist = action_blacklist; + }, !shutdown_called); wdump((last_produced_block)); chain->set_last_produced_block_map( last_produced_block ); @@ -90,6 +62,7 @@ class whitelist_blacklist_tester { last_produced_block = chain->get_last_produced_block_map(); wdump((last_produced_block)); chain.reset(); + shutdown_called = true; } transaction_trace_ptr transfer( account_name from, account_name to, string quantity = "1.00 TOK" ) { @@ -101,7 +74,9 @@ class whitelist_blacklist_tester { ); } + private: fc::temp_directory tempdir; // Must come before chain + public: fc::optional chain; flat_set sender_bypass_whiteblacklist; flat_set actor_whitelist; @@ -110,6 +85,7 @@ class whitelist_blacklist_tester { flat_set contract_blacklist; flat_set< pair > action_blacklist; map last_produced_block; + bool shutdown_called = false; }; struct transfer_args { @@ -453,7 +429,7 @@ BOOST_AUTO_TEST_CASE( actor_blacklist_inline_deferred ) { try { tester1.chain->set_abi( N(charlie), contracts::deferred_test_abi().data() ); tester1.chain->produce_blocks(); - auto auth = authority(eosio::testing::base_tester::get_public_key("alice", "active")); + auto auth = authority(eosio::testing::base_tester::get_public_key(name("alice"), "active")); auth.accounts.push_back( permission_level_weight{{N(alice), config::eosio_code_name}, 1} ); tester1.chain->push_action( N(eosio), N(updateauth), N(alice), mvo() @@ -463,7 +439,7 @@ BOOST_AUTO_TEST_CASE( actor_blacklist_inline_deferred ) { try { ( "auth", auth ) ); - auth = authority(eosio::testing::base_tester::get_public_key("bob", "active")); + auth = authority(eosio::testing::base_tester::get_public_key(name("bob"), "active")); auth.accounts.push_back( permission_level_weight{{N(alice), config::eosio_code_name}, 1} ); auth.accounts.push_back( permission_level_weight{{N(bob), config::eosio_code_name}, 1} ); @@ -474,7 +450,7 @@ BOOST_AUTO_TEST_CASE( actor_blacklist_inline_deferred ) { try { ( "auth", auth ) ); - auth = authority(eosio::testing::base_tester::get_public_key("charlie", "active")); + auth = authority(eosio::testing::base_tester::get_public_key(name("charlie"), "active")); auth.accounts.push_back( permission_level_weight{{N(charlie), config::eosio_code_name}, 1} ); tester1.chain->push_action( N(eosio), N(updateauth), N(charlie), mvo() @@ -597,7 +573,7 @@ BOOST_AUTO_TEST_CASE( blacklist_sender_bypass ) { try { tester1.chain->set_abi( N(charlie), contracts::deferred_test_abi().data() ); tester1.chain->produce_blocks(); - auto auth = authority(eosio::testing::base_tester::get_public_key("alice", "active")); + auto auth = authority(eosio::testing::base_tester::get_public_key(name("alice"), "active")); auth.accounts.push_back( permission_level_weight{{N(alice), config::eosio_code_name}, 1} ); tester1.chain->push_action( N(eosio), N(updateauth), N(alice), mvo() @@ -607,7 +583,7 @@ BOOST_AUTO_TEST_CASE( blacklist_sender_bypass ) { try { ( "auth", auth ) ); - auth = authority(eosio::testing::base_tester::get_public_key("bob", "active")); + auth = authority(eosio::testing::base_tester::get_public_key(name("bob"), "active")); auth.accounts.push_back( permission_level_weight{{N(bob), config::eosio_code_name}, 1} ); tester1.chain->push_action( N(eosio), N(updateauth), N(bob), mvo() @@ -617,7 +593,7 @@ BOOST_AUTO_TEST_CASE( blacklist_sender_bypass ) { try { ( "auth", auth ) ); - auth = authority(eosio::testing::base_tester::get_public_key("charlie", "active")); + auth = authority(eosio::testing::base_tester::get_public_key(name("charlie"), "active")); auth.accounts.push_back( permission_level_weight{{N(charlie), config::eosio_code_name}, 1} ); tester1.chain->push_action( N(eosio), N(updateauth), N(charlie), mvo() @@ -738,18 +714,10 @@ BOOST_AUTO_TEST_CASE( blacklist_sender_bypass ) { try { } FC_LOG_AND_RETHROW() } BOOST_AUTO_TEST_CASE( greylist_limit_tests ) { try { - controller::config contrl_config; - { - // Hack to get the default controller config used in tester (since v1.8.x does not have the base_tester::default_config helper function). - tester temp( setup_policy::none ); - contrl_config = temp.get_config(); - } - fc::temp_directory tempdir; - contrl_config.blocks_dir = tempdir.path() / config::default_blocks_dir_name; - contrl_config.state_dir = tempdir.path() / config::default_state_dir_name; + auto conf_genesis = tester::default_config( tempdir ); - auto& cfg = contrl_config.genesis.initial_configuration; + auto& cfg = conf_genesis.second.initial_configuration; cfg.max_block_net_usage = 128 * 1024; // 64 KiB max block size cfg.target_block_net_usage_pct = config::percent_1/10; @@ -762,13 +730,13 @@ BOOST_AUTO_TEST_CASE( greylist_limit_tests ) { try { cfg.min_transaction_cpu_usage = 100; // Empty blocks (consisting of only onblock) would be below the target. // But all it takes is one transaction in the block to be above the target. - tester c( contrl_config ); + tester c( conf_genesis.first, conf_genesis.second ); c.execute_setup_policy( setup_policy::full ); const resource_limits_manager& rm = c.control->get_resource_limits_manager(); - const auto& user_account = name(N(user)); - const auto& other_account = name(N(other)); + const auto& user_account = N(user); + const auto& other_account = N(other); c.create_accounts( {user_account, other_account} );