From 75d214a3b5fb213e708bd7f51ea47136c0af9ff2 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 7 Jan 2021 11:17:42 -0500 Subject: [PATCH 01/52] llvm 11 support for EOS VM OC --- CMakeLists.txt | 4 +-- .../webassembly/eos-vm-oc/LLVMEmitIR.cpp | 31 ++++++++++++------- .../chain/webassembly/eos-vm-oc/LLVMJIT.cpp | 1 + 3 files changed, 23 insertions(+), 13 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index de23e74c7d3..a268bcc4e10 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -73,8 +73,8 @@ if(CMAKE_SIZEOF_VOID_P EQUAL 8 AND NOT WIN32) # EOS VM OC requires LLVM, but move the check up here to a central location so that the EosioTester.cmakes # can be created with the exact version found find_package(LLVM REQUIRED CONFIG) - if(LLVM_VERSION_MAJOR VERSION_LESS 7 OR LLVM_VERSION_MAJOR VERSION_GREATER 10) - message(FATAL_ERROR "EOSIO requires an LLVM version 7.0 to 10.0") + if(LLVM_VERSION_MAJOR VERSION_LESS 7 OR LLVM_VERSION_MAJOR VERSION_GREATER_EQUAL 12) + message(FATAL_ERROR "EOSIO requires an LLVM version 7 through 11") endif() endif() endif() diff --git a/libraries/chain/webassembly/eos-vm-oc/LLVMEmitIR.cpp b/libraries/chain/webassembly/eos-vm-oc/LLVMEmitIR.cpp index 5423207b00f..0f4d953f30f 100644 --- a/libraries/chain/webassembly/eos-vm-oc/LLVMEmitIR.cpp +++ b/libraries/chain/webassembly/eos-vm-oc/LLVMEmitIR.cpp @@ -345,6 +345,13 @@ namespace LLVMJIT "eosvmoc_internal.div0_or_overflow",FunctionType::get(),{}); } + //llvm11 removed inferring the function type automatically, plumb CreateCalls through here as done for 10 & earlier + llvm::CallInst* createCall(llvm::Value* Callee, llvm::ArrayRef Args) { + auto* PTy = llvm::cast(Callee->getType()); + auto* FTy = llvm::cast(PTy->getElementType()); + return irBuilder.CreateCall(FTy, Callee, Args); + } + llvm::Value* getLLVMIntrinsic(const std::initializer_list& argTypes,llvm::Intrinsic::ID id) { return llvm::Intrinsic::getDeclaration(moduleContext.llvmModule,id,llvm::ArrayRef(argTypes.begin(),argTypes.end())); @@ -356,7 +363,7 @@ namespace LLVMJIT const eosio::chain::eosvmoc::intrinsic_entry& ie = eosio::chain::eosvmoc::get_intrinsic_map().at(intrinsicName); llvm::Value* ic = irBuilder.CreateLoad( emitLiteralPointer((void*)(OFFSET_OF_FIRST_INTRINSIC-ie.ordinal*8), llvmI64Type->getPointerTo(256)) ); llvm::Value* itp = irBuilder.CreateIntToPtr(ic, asLLVMType(ie.type)->getPointerTo()); - return irBuilder.CreateCall(itp,llvm::ArrayRef(args.begin(),args.end())); + return createCall(itp,llvm::ArrayRef(args.begin(),args.end())); } // A helper function to emit a conditional call to a non-returning intrinsic function. @@ -707,7 +714,7 @@ namespace LLVMJIT popMultiple(llvmArgs,calleeType->parameters.size()); // Call the function. - auto result = irBuilder.CreateCall(callee,llvm::ArrayRef(llvmArgs,calleeType->parameters.size())); + auto result = createCall(callee,llvm::ArrayRef(llvmArgs,calleeType->parameters.size())); if(isExit) { irBuilder.CreateUnreachable(); enterUnreachable(); @@ -758,7 +765,7 @@ namespace LLVMJIT llvm::Value* running_code_start = irBuilder.CreateLoad(emitLiteralPointer((void*)OFFSET_OF_CONTROL_BLOCK_MEMBER(running_code_base), llvmI64Type->getPointerTo(256))); llvm::Value* offset_from_start = irBuilder.CreateAdd(running_code_start, functionInfo); llvm::Value* ptr_cast = irBuilder.CreateIntToPtr(offset_from_start, functionPointerType); - auto result = irBuilder.CreateCall(ptr_cast,llvm::ArrayRef(llvmArgs,calleeType->parameters.size())); + auto result = createCall(ptr_cast,llvm::ArrayRef(llvmArgs,calleeType->parameters.size())); // Push the result on the operand stack. if(calleeType->ret != ResultType::none) { push(result); } @@ -795,7 +802,7 @@ namespace LLVMJIT PN->addIncoming(offset_from_start, is_code_offset_block); llvm::Value* ptr_cast = irBuilder.CreateIntToPtr(PN, functionPointerType); - auto result = irBuilder.CreateCall(ptr_cast,llvm::ArrayRef(llvmArgs,calleeType->parameters.size())); + auto result = createCall(ptr_cast,llvm::ArrayRef(llvmArgs,calleeType->parameters.size())); // Push the result on the operand stack. if(calleeType->ret != ResultType::none) { push(result); } @@ -900,8 +907,10 @@ namespace LLVMJIT #if LLVM_VERSION_MAJOR < 10 #define LOAD_STORE_ALIGNMENT_PARAM 1 -#else +#elif LLVM_VERSION_MAJOR == 10 #define LOAD_STORE_ALIGNMENT_PARAM llvm::MaybeAlign(1) +#else + #define LOAD_STORE_ALIGNMENT_PARAM llvm::Align(1) #endif EMIT_LOAD_OP(i32,load8_s,llvmI8Type,0,irBuilder.CreateSExt,LOAD_STORE_ALIGNMENT_PARAM) EMIT_LOAD_OP(i32,load8_u,llvmI8Type,0,irBuilder.CreateZExt,LOAD_STORE_ALIGNMENT_PARAM) @@ -1072,9 +1081,9 @@ namespace LLVMJIT EMIT_INT_BINARY_OP(ge_u, coerceBoolToI32(irBuilder.CreateICmpUGE(left, right))) #endif - EMIT_INT_UNARY_OP(clz,irBuilder.CreateCall(getLLVMIntrinsic({operand->getType()},llvm::Intrinsic::ctlz),llvm::ArrayRef({operand,emitLiteral(false)}))) - EMIT_INT_UNARY_OP(ctz,irBuilder.CreateCall(getLLVMIntrinsic({operand->getType()},llvm::Intrinsic::cttz),llvm::ArrayRef({operand,emitLiteral(false)}))) - EMIT_INT_UNARY_OP(popcnt,irBuilder.CreateCall(getLLVMIntrinsic({operand->getType()},llvm::Intrinsic::ctpop),llvm::ArrayRef({operand}))) + EMIT_INT_UNARY_OP(clz,createCall(getLLVMIntrinsic({operand->getType()},llvm::Intrinsic::ctlz),llvm::ArrayRef({operand,emitLiteral(false)}))) + EMIT_INT_UNARY_OP(ctz,createCall(getLLVMIntrinsic({operand->getType()},llvm::Intrinsic::cttz),llvm::ArrayRef({operand,emitLiteral(false)}))) + EMIT_INT_UNARY_OP(popcnt,createCall(getLLVMIntrinsic({operand->getType()},llvm::Intrinsic::ctpop),llvm::ArrayRef({operand}))) EMIT_INT_UNARY_OP(eqz,coerceBoolToI32(irBuilder.CreateICmpEQ(operand,typedZeroConstants[(Uptr)type]))) // @@ -1085,11 +1094,11 @@ namespace LLVMJIT EMIT_FP_BINARY_OP(sub,irBuilder.CreateFSub(left,right)) EMIT_FP_BINARY_OP(mul,irBuilder.CreateFMul(left,right)) EMIT_FP_BINARY_OP(div,irBuilder.CreateFDiv(left,right)) - EMIT_FP_BINARY_OP(copysign,irBuilder.CreateCall(getLLVMIntrinsic({left->getType()},llvm::Intrinsic::copysign),llvm::ArrayRef({left,right}))) + EMIT_FP_BINARY_OP(copysign,createCall(getLLVMIntrinsic({left->getType()},llvm::Intrinsic::copysign),llvm::ArrayRef({left,right}))) EMIT_FP_UNARY_OP(neg,irBuilder.CreateFNeg(operand)) - EMIT_FP_UNARY_OP(abs,irBuilder.CreateCall(getLLVMIntrinsic({operand->getType()},llvm::Intrinsic::fabs),llvm::ArrayRef({operand}))) - EMIT_FP_UNARY_OP(sqrt,irBuilder.CreateCall(getLLVMIntrinsic({operand->getType()},llvm::Intrinsic::sqrt),llvm::ArrayRef({operand}))) + EMIT_FP_UNARY_OP(abs,createCall(getLLVMIntrinsic({operand->getType()},llvm::Intrinsic::fabs),llvm::ArrayRef({operand}))) + EMIT_FP_UNARY_OP(sqrt,createCall(getLLVMIntrinsic({operand->getType()},llvm::Intrinsic::sqrt),llvm::ArrayRef({operand}))) EMIT_FP_BINARY_OP(eq,coerceBoolToI32(irBuilder.CreateFCmpOEQ(left,right))) EMIT_FP_BINARY_OP(ne,coerceBoolToI32(irBuilder.CreateFCmpUNE(left,right))) diff --git a/libraries/chain/webassembly/eos-vm-oc/LLVMJIT.cpp b/libraries/chain/webassembly/eos-vm-oc/LLVMJIT.cpp index 340e5bf5dca..8345beeb799 100644 --- a/libraries/chain/webassembly/eos-vm-oc/LLVMJIT.cpp +++ b/libraries/chain/webassembly/eos-vm-oc/LLVMJIT.cpp @@ -40,6 +40,7 @@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND #include "llvm/Object/SymbolSize.h" #include "llvm/Support/Debug.h" #include "llvm/Support/DataTypes.h" +#include "llvm/Support/FileSystem.h" #include "llvm/Support/TargetSelect.h" #include "llvm/Support/Host.h" #include "llvm/Support/DynamicLibrary.h" From e14b2fc09a6a8d2d79fdcf8ab4febadeca41720a Mon Sep 17 00:00:00 2001 From: Edgar Gonzalez Date: Thu, 14 Jan 2021 12:44:09 -0600 Subject: [PATCH 02/52] add step to the pipeline to build and push to dockerhub on release branch 2.0.x --- .cicd/create-docker-from-binary.sh | 52 ++++++++++++++++++++++++++++++ .cicd/generate-pipeline.sh | 7 ++++ docker/dockerfile | 8 +++++ 3 files changed, 67 insertions(+) create mode 100755 .cicd/create-docker-from-binary.sh create mode 100644 docker/dockerfile diff --git a/.cicd/create-docker-from-binary.sh b/.cicd/create-docker-from-binary.sh new file mode 100755 index 00000000000..e7bdb07d43b --- /dev/null +++ b/.cicd/create-docker-from-binary.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +set -euo pipefail + +buildkite-agent artifact download '*.deb' --step ':ubuntu: Ubuntu 18.04 - Package Builder' . +echo ":done: download successful" + +SANITIZED_BRANCH=$(echo "$BUILDKITE_BRANCH" | sed 's.^/..' | sed 's/[:/]/_/g') +SANITIZED_TAG=$(echo "$BUILDKITE_TAG" | sed 's.^/..' | tr '/' '_') +echo "$SANITIZED_BRANCH" +echo "$SANITIZED_TAG" + +# do docker build +echo ":docker::build: Building image..." +DOCKERHUB_REGISTRY="docker.io/eosio/eos" + +BUILD_TAG=${BUILDKITE_BUILD_NUMBER:-latest} +DOCKER_BUILD_GEN="docker build -t eos_image:$BUILD_TAG -f ./docker/dockerfile ." +echo "$ $DOCKER_BUILD_GEN" +eval $DOCKER_BUILD_GEN + +#tag and push on each destination AWS & DOCKERHUB + +EOSIO_REGS=("$EOSIO_REGISTRY" "$DOCKERHUB_REGISTRY") +for REG in ${EOSIO_REGS[@]}; do + DOCKER_TAG_COMMIT="docker tag eos_image:$BUILD_TAG $REG:$BUILDKITE_COMMIT" + DOCKER_TAG_BRANCH="docker tag eos_image:$BUILD_TAG $REG:$SANITIZED_BRANCH" + echo -e "$ Tagging Images: \n$DOCKER_TAG_COMMIT \n$DOCKER_TAG_BRANCH" + eval $DOCKER_TAG_COMMIT + eval $DOCKER_TAG_BRANCH + DOCKER_PUSH_COMMIT="docker push $REG:$BUILDKITE_COMMIT" + DOCKER_PUSH_BRANCH="docker push $REG:$SANITIZED_BRANCH" + echo -e "$ Pushing Images: \n$DOCKER_PUSH_COMMIT \n$DOCKER_PUSH_BRANCH" + eval $DOCKER_PUSH_COMMIT + eval $DOCKER_PUSH_BRANCH + CLEAN_IMAGE_COMMIT="docker rmi $REG:$BUILDKITE_COMMIT" + CLEAN_IMAGE_BRANCH="docker rmi $REG:$SANITIZED_BRANCH" + echo -e "Cleaning Up: \n$CLEAN_IMAGE_COMMIT \n$CLEAN_IMAGE_BRANCH$" + eval $CLEAN_IMAGE_COMMIT + eval $CLEAN_IMAGE_BRANCH + if [[ ! -z "$SANITIZED_TAG" ]]; then + DOCKER_TAG="docker tag eos_image $REG:$SANITIZED_TAG" + DOCKER_REM="docker rmi $REG:$SANITIZED_TAG" + echo -e "$ \n Tagging Image: \n$DOCKER_TAG \n Cleaning Up: \n$DOCKER_REM" + eval $DOCKER_TAG + eval $DOCKER_REM + fi +done + +DOCKER_GEN="docker rmi eos_image:$BUILD_TAG" +echo "Clean up base image" +eval $DOCKER_GEN \ No newline at end of file diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index c94d88d4c04..4d8a5c52d3e 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -583,5 +583,12 @@ cat < Date: Tue, 19 Jan 2021 17:28:14 -0500 Subject: [PATCH 03/52] Add debug code to print in the generate-pipeline.sh --- .cicd/generate-pipeline.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index 4d8a5c52d3e..59cb917a98c 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -91,6 +91,11 @@ nIFS=$IFS # fix array splitting (\n won't work) echo ' - wait' echo '' # build steps +if [[ "$DEBUG" == 'true' ]]; + echo '# PLATFORMS_JSON_ARRAY' + echo "# $(echo "$PLATFORMS_JSON_ARRAY" | jq -c '.')" + echo '' +fi echo ' # builds' echo $PLATFORMS_JSON_ARRAY | jq -cr '.[]' | while read -r PLATFORM_JSON; do if [[ ! "$(echo "$PLATFORM_JSON" | jq -r .FILE_NAME)" =~ 'macos' ]]; then From 4f0e2e8ca35db64be509d7f56ad4e5e5e381a3ae Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Tue, 19 Jan 2021 17:39:24 -0500 Subject: [PATCH 04/52] Fix syntax error --- .cicd/generate-pipeline.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index 59cb917a98c..27b964ad41a 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -91,7 +91,7 @@ nIFS=$IFS # fix array splitting (\n won't work) echo ' - wait' echo '' # build steps -if [[ "$DEBUG" == 'true' ]]; +if [[ "$DEBUG" == 'true' ]]; then echo '# PLATFORMS_JSON_ARRAY' echo "# $(echo "$PLATFORMS_JSON_ARRAY" | jq -c '.')" echo '' From f9a5db80a41c2b0261a84a65d5ad81814d35f047 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Tue, 19 Jan 2021 19:48:54 -0500 Subject: [PATCH 05/52] Isolate latest Ubuntu version from PLATFORMS_JSON_ARRAY --- .cicd/generate-pipeline.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index 27b964ad41a..b07645e1f3f 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -91,9 +91,12 @@ nIFS=$IFS # fix array splitting (\n won't work) echo ' - wait' echo '' # build steps +export LATEST_UBUNTU="$(echo "$PLATFORMS_JSON_ARRAY" | jq -c 'map(select(.PLATFORM_NAME == "ubuntu")) | sort_by(.VERSION_MAJOR) | .[-1]')" # isolate latest ubuntu from array if [[ "$DEBUG" == 'true' ]]; then echo '# PLATFORMS_JSON_ARRAY' echo "# $(echo "$PLATFORMS_JSON_ARRAY" | jq -c '.')" + echo '# LATEST_UBUNTU' + echo "# $(echo "$LATEST_UBUNTU" | jq -c '.')" echo '' fi echo ' # builds' From cf623f039f9f7eb244f8dac3939ce3d66cdde57d Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Tue, 19 Jan 2021 19:59:45 -0500 Subject: [PATCH 06/52] Support changing the DCMAKE_BUILD_TYPE parameter in build.sh --- .cicd/build.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.cicd/build.sh b/.cicd/build.sh index 585ae183344..1bfa3ff485e 100755 --- a/.cicd/build.sh +++ b/.cicd/build.sh @@ -3,7 +3,8 @@ set -eo pipefail [[ "$ENABLE_INSTALL" == 'true' ]] || echo '--- :evergreen_tree: Configuring Environment' . ./.cicd/helpers/general.sh mkdir -p "$BUILD_DIR" -CMAKE_EXTRAS="-DCMAKE_BUILD_TYPE=\"Release\" -DENABLE_MULTIVERSION_PROTOCOL_TEST=\"true\" -DBUILD_MONGO_DB_PLUGIN=\"true\"" +[[ -z "$CMAKE_BUILD_TYPE" ]] && export CMAKE_BUILD_TYPE='Release' +CMAKE_EXTRAS="-DCMAKE_BUILD_TYPE=\"$CMAKE_BUILD_TYPE\" -DENABLE_MULTIVERSION_PROTOCOL_TEST=\"true\" -DBUILD_MONGO_DB_PLUGIN=\"true\"" if [[ "$(uname)" == 'Darwin' && "$FORCE_LINUX" != 'true' ]]; then # You can't use chained commands in execute if [[ "$GITHUB_ACTIONS" == 'true' ]]; then From f97fce73ade07f431860087737caa41b2e99e72b Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Tue, 19 Jan 2021 20:05:52 -0500 Subject: [PATCH 07/52] Add a debug build for the latest Ubuntu --- .cicd/generate-pipeline.sh | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index b07645e1f3f..e82d9fcd7ba 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -101,6 +101,7 @@ if [[ "$DEBUG" == 'true' ]]; then fi echo ' # builds' echo $PLATFORMS_JSON_ARRAY | jq -cr '.[]' | while read -r PLATFORM_JSON; do + # release build if [[ ! "$(echo "$PLATFORM_JSON" | jq -r .FILE_NAME)" =~ 'macos' ]]; then cat < Date: Wed, 20 Jan 2021 14:58:56 -0500 Subject: [PATCH 08/52] Add debug package builder step --- .cicd/generate-pipeline.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index e82d9fcd7ba..1b5881ae948 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -581,6 +581,20 @@ cat < Date: Wed, 20 Jan 2021 19:44:41 -0500 Subject: [PATCH 09/52] Provide mechanism to skip debug builds --- .cicd/generate-pipeline.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index 1b5881ae948..bd40a1727a0 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -165,7 +165,7 @@ EOF agents: queue: "$BUILDKITE_BUILD_AGENT_QUEUE" timeout: ${TIMEOUT:-180} - skip: \${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_BUILD} + skip: \${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_BUILD}${SKIP_DEBUG_BUILD} EOF fi @@ -593,7 +593,7 @@ cat < Date: Wed, 20 Jan 2021 19:47:57 -0500 Subject: [PATCH 10/52] Add RelWithDebInfo build --- .cicd/generate-pipeline.sh | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index bd40a1727a0..3594c3f97da 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -156,6 +156,17 @@ EOF # debug build if [[ "$(echo "$PLATFORM_JSON" | jq -r .FILE_NAME)" == "$(echo "$LATEST_UBUNTU" | jq -r '.FILE_NAME')" ]]; then cat < Date: Fri, 22 Jan 2021 13:29:40 -0500 Subject: [PATCH 11/52] Disable debug builds for the unpinned pipeline because it excludes the package builder step anyways --- .cicd/generate-pipeline.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index 3594c3f97da..4a130f03fe6 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -154,7 +154,7 @@ EOF EOF fi # debug build - if [[ "$(echo "$PLATFORM_JSON" | jq -r .FILE_NAME)" == "$(echo "$LATEST_UBUNTU" | jq -r '.FILE_NAME')" ]]; then + if [[ "$PLATFORM_TYPE" == 'pinned' && "$(echo "$PLATFORM_JSON" | jq -r .FILE_NAME)" == "$(echo "$LATEST_UBUNTU" | jq -r '.FILE_NAME')" ]]; then cat < Date: Fri, 22 Jan 2021 13:35:52 -0500 Subject: [PATCH 12/52] Older releases still don't have Ubuntu 20.04 --- .cicd/generate-pipeline.sh | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index 4a130f03fe6..f7a41e4a782 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -592,33 +592,33 @@ cat < Date: Wed, 27 Jan 2021 10:12:34 -0500 Subject: [PATCH 13/52] initial fix for local build --- scripts/helpers/eosio.sh | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/scripts/helpers/eosio.sh b/scripts/helpers/eosio.sh index a7ace5f53f9..fa733aff5e0 100755 --- a/scripts/helpers/eosio.sh +++ b/scripts/helpers/eosio.sh @@ -302,27 +302,27 @@ function build-clang() { if [[ ! -d $CLANG_ROOT ]]; then execute bash -c "cd ${TEMP_DIR} \ && rm -rf clang8 \ - && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 \ - && cd clang8 && git checkout $PINNED_COMPILER_LLVM_COMMIT \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://github.com/llvm-mirror/llvm.git clang8 \ + && cd clang8 && git checkout $PINNED_COMPILER_LLVM_COMMIT && sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config \ && cd tools \ - && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://github.com/llvm-mirror/lld.git \ && cd lld && git checkout $PINNED_COMPILER_LLD_COMMIT && cd ../ \ - && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://github.com/llvm-mirror/polly.git \ && cd polly && git checkout $PINNED_COMPILER_POLLY_COMMIT && cd ../ \ - && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang \ - && git checkout $PINNED_COMPILER_CLANG_COMMIT \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://github.com/llvm-mirror/clang.git clang && cd clang \ + && git checkout $PINNED_COMPILER_CLANG_COMMIT && sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config \ && patch -p2 < \"$REPO_ROOT/scripts/clang-devtoolset8-support.patch\" \ && cd tools && mkdir extra && cd extra \ - && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://github.com/llvm-mirror/clang-tools-extra.git \ && cd clang-tools-extra && git checkout $PINNED_COMPILER_CLANG_TOOLS_EXTRA_COMMIT && cd .. \ && cd ../../../../projects \ - && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://github.com/llvm-mirror/libcxx.git \ && cd libcxx && git checkout $PINNED_COMPILER_LIBCXX_COMMIT && cd ../ \ - && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://github.com/llvm-mirror/libcxxabi.git \ && cd libcxxabi && git checkout $PINNED_COMPILER_LIBCXXABI_COMMIT && cd ../ \ - && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://github.com/llvm-mirror/libunwind.git \ && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ - && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://github.com/llvm-mirror/compiler-rt.git \ && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ && cd ${TEMP_DIR}/clang8 \ && mkdir build && cd build \ From 3e74fc851366823809fbaea291e9dd28d91bb89c Mon Sep 17 00:00:00 2001 From: Qing Yang Date: Wed, 27 Jan 2021 10:57:34 -0500 Subject: [PATCH 14/52] use llvm-mirror repo for CICD builds --- .../pinned/amazon_linux-2-pinned.dockerfile | 18 ++++++++--------- .../pinned/centos-7.7-pinned.dockerfile | 18 ++++++++--------- .cicd/platforms/pinned/macos-10.14-pinned.sh | 20 ++++++++++--------- .../pinned/ubuntu-16.04-pinned.dockerfile | 18 ++++++++--------- .../pinned/ubuntu-18.04-pinned.dockerfile | 18 ++++++++--------- scripts/helpers/eosio.sh | 2 +- 6 files changed, 48 insertions(+), 46 deletions(-) diff --git a/.cicd/platforms/pinned/amazon_linux-2-pinned.dockerfile b/.cicd/platforms/pinned/amazon_linux-2-pinned.dockerfile index 7611cdd62ac..c9fd9e7445b 100644 --- a/.cicd/platforms/pinned/amazon_linux-2-pinned.dockerfile +++ b/.cicd/platforms/pinned/amazon_linux-2-pinned.dockerfile @@ -16,15 +16,15 @@ RUN curl -LO https://cmake.org/files/v3.13/cmake-3.13.2.tar.gz && \ cd / && \ rm -rf cmake-3.13.2.tar.gz /cmake-3.13.2 # build clang8 -RUN git clone --single-branch --branch release_80 https://git.llvm.org/git/llvm.git clang8 && cd clang8 && git checkout 18e41dc && \ - cd tools && git clone --single-branch --branch release_80 https://git.llvm.org/git/lld.git && cd lld && git checkout d60a035 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/polly.git && cd polly && git checkout 1bc06e5 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang.git clang && cd clang && git checkout a03da8b && \ - cd tools && mkdir extra && cd extra && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang-tools-extra.git && cd clang-tools-extra && git checkout 6b34834 && \ - cd /clang8/projects && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxx.git && cd libcxx && git checkout 1853712 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxxabi.git && cd libcxxabi && git checkout d7338a4 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libunwind.git && cd libunwind && git checkout 57f6739 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/compiler-rt.git && cd compiler-rt && git checkout 5bc7979 && \ +RUN git clone --single-branch --branch release_80 https://github.com/llvm-mirror/llvm.git clang8 && cd clang8 && git checkout 18e41dc && sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config && \ + cd tools && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/lld.git && cd lld && git checkout d60a035 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/polly.git && cd polly && git checkout 1bc06e5 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/clang.git clang && cd clang && git checkout a03da8b && sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config && \ + cd tools && mkdir extra && cd extra && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/clang-tools-extra.git && cd clang-tools-extra && git checkout 6b34834 && \ + cd /clang8/projects && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/libcxx.git && cd libcxx && git checkout 1853712 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/libcxxabi.git && cd libcxxabi && git checkout d7338a4 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/libunwind.git && cd libunwind && git checkout 57f6739 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/compiler-rt.git && cd compiler-rt && git checkout 5bc7979 && \ mkdir /clang8/build && cd /clang8/build && \ cmake -G 'Unix Makefiles' -DCMAKE_INSTALL_PREFIX='/usr/local' -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=X86 -DCMAKE_BUILD_TYPE=Release .. && \ make -j $(nproc) && \ diff --git a/.cicd/platforms/pinned/centos-7.7-pinned.dockerfile b/.cicd/platforms/pinned/centos-7.7-pinned.dockerfile index 2d52a765584..4c825064195 100644 --- a/.cicd/platforms/pinned/centos-7.7-pinned.dockerfile +++ b/.cicd/platforms/pinned/centos-7.7-pinned.dockerfile @@ -22,16 +22,16 @@ RUN curl -LO https://cmake.org/files/v3.13/cmake-3.13.2.tar.gz && \ rm -rf cmake-3.13.2.tar.gz /cmake-3.13.2 COPY ./scripts/clang-devtoolset8-support.patch /tmp/clang-devtoolset8-support.patch # build clang8 -RUN git clone --single-branch --branch release_80 https://git.llvm.org/git/llvm.git clang8 && cd clang8 && git checkout 18e41dc && \ - cd tools && git clone --single-branch --branch release_80 https://git.llvm.org/git/lld.git && cd lld && git checkout d60a035 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/polly.git && cd polly && git checkout 1bc06e5 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang.git clang && cd clang && git checkout a03da8b && \ +RUN git clone --single-branch --branch release_80 https://github.com/llvm-mirror/llvm.git clang8 && cd clang8 && git checkout 18e41dc && sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config && \ + cd tools && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/lld.git && cd lld && git checkout d60a035 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/polly.git && cd polly && git checkout 1bc06e5 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/clang.git clang && cd clang && git checkout a03da8b && sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config && \ patch -p2 < /tmp/clang-devtoolset8-support.patch && \ - cd tools && mkdir extra && cd extra && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang-tools-extra.git && cd clang-tools-extra && git checkout 6b34834 && \ - cd /clang8/projects && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxx.git && cd libcxx && git checkout 1853712 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxxabi.git && cd libcxxabi && git checkout d7338a4 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libunwind.git && cd libunwind && git checkout 57f6739 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/compiler-rt.git && cd compiler-rt && git checkout 5bc7979 && \ + cd tools && mkdir extra && cd extra && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/clang-tools-extra.git && cd clang-tools-extra && git checkout 6b34834 && \ + cd /clang8/projects && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/libcxx.git && cd libcxx && git checkout 1853712 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/libcxxabi.git && cd libcxxabi && git checkout d7338a4 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/libunwind.git && cd libunwind && git checkout 57f6739 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/compiler-rt.git && cd compiler-rt && git checkout 5bc7979 && \ mkdir /clang8/build && cd /clang8/build && \ source /opt/rh/devtoolset-8/enable && \ source /opt/rh/rh-python36/enable && \ diff --git a/.cicd/platforms/pinned/macos-10.14-pinned.sh b/.cicd/platforms/pinned/macos-10.14-pinned.sh index 40507b65ca9..c0ddf9efd48 100755 --- a/.cicd/platforms/pinned/macos-10.14-pinned.sh +++ b/.cicd/platforms/pinned/macos-10.14-pinned.sh @@ -4,41 +4,43 @@ VERSION=1 brew update brew install git cmake python libtool libusb graphviz automake wget gmp llvm@7 pkgconfig doxygen openssl@1.1 jq || : # install clang from source -git clone --single-branch --branch release_80 https://git.llvm.org/git/llvm.git clang8 +git clone --single-branch --branch release_80 https://github.com/llvm-mirror/llvm.git clang8 cd clang8 git checkout 18e41dc +sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config cd tools -git clone --single-branch --branch release_80 https://git.llvm.org/git/lld.git +git clone --single-branch --branch release_80 https://github.com/llvm-mirror/lld.git cd lld git checkout d60a035 cd ../ -git clone --single-branch --branch release_80 https://git.llvm.org/git/polly.git +git clone --single-branch --branch release_80 https://github.com/llvm-mirror/polly.git cd polly git checkout 1bc06e5 cd ../ -git clone --single-branch --branch release_80 https://git.llvm.org/git/clang.git clang +git clone --single-branch --branch release_80 https://github.com/llvm-mirror/clang.git clang cd clang git checkout a03da8b +sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config cd tools mkdir extra cd extra -git clone --single-branch --branch release_80 https://git.llvm.org/git/clang-tools-extra.git +git clone --single-branch --branch release_80 https://github.com/llvm-mirror/clang-tools-extra.git cd clang-tools-extra git checkout 6b34834 cd ../../../../../projects/ -git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxx.git +git clone --single-branch --branch release_80 https://github.com/llvm-mirror/libcxx.git cd libcxx git checkout 1853712 cd ../ -git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxxabi.git +git clone --single-branch --branch release_80 https://github.com/llvm-mirror/libcxxabi.git cd libcxxabi git checkout d7338a4 cd ../ -git clone --single-branch --branch release_80 https://git.llvm.org/git/libunwind.git +git clone --single-branch --branch release_80 https://github.com/llvm-mirror/libunwind.git cd libunwind git checkout 57f6739 cd ../ -git clone --single-branch --branch release_80 https://git.llvm.org/git/compiler-rt.git +git clone --single-branch --branch release_80 https://github.com/llvm-mirror/compiler-rt.git cd compiler-rt git checkout 5bc7979 mkdir ../../build diff --git a/.cicd/platforms/pinned/ubuntu-16.04-pinned.dockerfile b/.cicd/platforms/pinned/ubuntu-16.04-pinned.dockerfile index 9682578d761..5bebe55a956 100644 --- a/.cicd/platforms/pinned/ubuntu-16.04-pinned.dockerfile +++ b/.cicd/platforms/pinned/ubuntu-16.04-pinned.dockerfile @@ -17,15 +17,15 @@ RUN curl -LO https://cmake.org/files/v3.13/cmake-3.13.2.tar.gz && \ cd / && \ rm -rf cmake-3.13.2.tar.gz /cmake-3.13.2 # build clang -RUN git clone --single-branch --branch release_80 https://git.llvm.org/git/llvm.git clang8 && cd clang8 && git checkout 18e41dc && \ - cd tools && git clone --single-branch --branch release_80 https://git.llvm.org/git/lld.git && cd lld && git checkout d60a035 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/polly.git && cd polly && git checkout 1bc06e5 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang.git clang && cd clang && git checkout a03da8b && \ - cd tools && mkdir extra && cd extra && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang-tools-extra.git && cd clang-tools-extra && git checkout 6b34834 && \ - cd /clang8/projects && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxx.git && cd libcxx && git checkout 1853712 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxxabi.git && cd libcxxabi && git checkout d7338a4 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libunwind.git && cd libunwind && git checkout 57f6739 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/compiler-rt.git && cd compiler-rt && git checkout 5bc7979 && \ +RUN git clone --single-branch --branch release_80 https://github.com/llvm-mirror/llvm.git clang8 && cd clang8 && git checkout 18e41dc && sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config && \ + cd tools && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/lld.git && cd lld && git checkout d60a035 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/polly.git && cd polly && git checkout 1bc06e5 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/clang.git clang && cd clang && git checkout a03da8b && sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config && \ + cd tools && mkdir extra && cd extra && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/clang-tools-extra.git && cd clang-tools-extra && git checkout 6b34834 && \ + cd /clang8/projects && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/libcxx.git && cd libcxx && git checkout 1853712 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/libcxxabi.git && cd libcxxabi && git checkout d7338a4 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/libunwind.git && cd libunwind && git checkout 57f6739 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/compiler-rt.git && cd compiler-rt && git checkout 5bc7979 && \ mkdir /clang8/build && cd /clang8/build && \ cmake -G 'Unix Makefiles' -DCMAKE_INSTALL_PREFIX='/usr/local' -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=X86 -DCMAKE_BUILD_TYPE=Release .. && \ make -j $(nproc) && \ diff --git a/.cicd/platforms/pinned/ubuntu-18.04-pinned.dockerfile b/.cicd/platforms/pinned/ubuntu-18.04-pinned.dockerfile index 9b166645ced..2f771cb7a78 100644 --- a/.cicd/platforms/pinned/ubuntu-18.04-pinned.dockerfile +++ b/.cicd/platforms/pinned/ubuntu-18.04-pinned.dockerfile @@ -19,15 +19,15 @@ RUN curl -LO https://cmake.org/files/v3.13/cmake-3.13.2.tar.gz && \ cd / && \ rm -rf cmake-3.13.2.tar.gz /cmake-3.13.2 # build clang8 -RUN git clone --single-branch --branch release_80 https://git.llvm.org/git/llvm.git clang8 && cd clang8 && git checkout 18e41dc && \ - cd tools && git clone --single-branch --branch release_80 https://git.llvm.org/git/lld.git && cd lld && git checkout d60a035 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/polly.git && cd polly && git checkout 1bc06e5 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang.git clang && cd clang && git checkout a03da8b && \ - cd tools && mkdir extra && cd extra && git clone --single-branch --branch release_80 https://git.llvm.org/git/clang-tools-extra.git && cd clang-tools-extra && git checkout 6b34834 && \ - cd /clang8/projects && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxx.git && cd libcxx && git checkout 1853712 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libcxxabi.git && cd libcxxabi && git checkout d7338a4 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/libunwind.git && cd libunwind && git checkout 57f6739 && \ - cd ../ && git clone --single-branch --branch release_80 https://git.llvm.org/git/compiler-rt.git && cd compiler-rt && git checkout 5bc7979 && \ +RUN git clone --single-branch --branch release_80 https://github.com/llvm-mirror/llvm.git clang8 && cd clang8 && git checkout 18e41dc && sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config && \ + cd tools && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/lld.git && cd lld && git checkout d60a035 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/polly.git && cd polly && git checkout 1bc06e5 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/clang.git clang && cd clang && git checkout a03da8b && sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config && \ + cd tools && mkdir extra && cd extra && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/clang-tools-extra.git && cd clang-tools-extra && git checkout 6b34834 && \ + cd /clang8/projects && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/libcxx.git && cd libcxx && git checkout 1853712 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/libcxxabi.git && cd libcxxabi && git checkout d7338a4 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/libunwind.git && cd libunwind && git checkout 57f6739 && \ + cd ../ && git clone --single-branch --branch release_80 https://github.com/llvm-mirror/compiler-rt.git && cd compiler-rt && git checkout 5bc7979 && \ mkdir /clang8/build && cd /clang8/build && \ cmake -G 'Unix Makefiles' -DCMAKE_INSTALL_PREFIX='/usr/local' -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=X86 -DCMAKE_BUILD_TYPE=Release .. && \ make -j $(nproc) && \ diff --git a/scripts/helpers/eosio.sh b/scripts/helpers/eosio.sh index fa733aff5e0..c1cc556dc6b 100755 --- a/scripts/helpers/eosio.sh +++ b/scripts/helpers/eosio.sh @@ -310,7 +310,7 @@ function build-clang() { && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://github.com/llvm-mirror/polly.git \ && cd polly && git checkout $PINNED_COMPILER_POLLY_COMMIT && cd ../ \ && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://github.com/llvm-mirror/clang.git clang && cd clang \ - && git checkout $PINNED_COMPILER_CLANG_COMMIT && sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config \ + && git checkout $PINNED_COMPILER_CLANG_COMMIT && sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config \ && patch -p2 < \"$REPO_ROOT/scripts/clang-devtoolset8-support.patch\" \ && cd tools && mkdir extra && cd extra \ && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://github.com/llvm-mirror/clang-tools-extra.git \ From 21176ab6a0b27bf3007c2469daba015ea30fa66a Mon Sep 17 00:00:00 2001 From: Qing Yang Date: Wed, 27 Jan 2021 12:13:23 -0500 Subject: [PATCH 15/52] fix sed error for macOS 10.14 --- .cicd/platforms/pinned/macos-10.14-pinned.sh | 4 ++-- scripts/helpers/eosio.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.cicd/platforms/pinned/macos-10.14-pinned.sh b/.cicd/platforms/pinned/macos-10.14-pinned.sh index c0ddf9efd48..7eea188b39c 100755 --- a/.cicd/platforms/pinned/macos-10.14-pinned.sh +++ b/.cicd/platforms/pinned/macos-10.14-pinned.sh @@ -7,7 +7,7 @@ brew install git cmake python libtool libusb graphviz automake wget gmp llvm@7 p git clone --single-branch --branch release_80 https://github.com/llvm-mirror/llvm.git clang8 cd clang8 git checkout 18e41dc -sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config +sed -i.bak 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config cd tools git clone --single-branch --branch release_80 https://github.com/llvm-mirror/lld.git cd lld @@ -20,7 +20,7 @@ cd ../ git clone --single-branch --branch release_80 https://github.com/llvm-mirror/clang.git clang cd clang git checkout a03da8b -sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config +sed -i.bak 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config cd tools mkdir extra cd extra diff --git a/scripts/helpers/eosio.sh b/scripts/helpers/eosio.sh index c1cc556dc6b..d68373fc1db 100755 --- a/scripts/helpers/eosio.sh +++ b/scripts/helpers/eosio.sh @@ -303,14 +303,14 @@ function build-clang() { execute bash -c "cd ${TEMP_DIR} \ && rm -rf clang8 \ && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://github.com/llvm-mirror/llvm.git clang8 \ - && cd clang8 && git checkout $PINNED_COMPILER_LLVM_COMMIT && sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config \ + && cd clang8 && git checkout $PINNED_COMPILER_LLVM_COMMIT && sed -i.bak 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config \ && cd tools \ && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://github.com/llvm-mirror/lld.git \ && cd lld && git checkout $PINNED_COMPILER_LLD_COMMIT && cd ../ \ && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://github.com/llvm-mirror/polly.git \ && cd polly && git checkout $PINNED_COMPILER_POLLY_COMMIT && cd ../ \ && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://github.com/llvm-mirror/clang.git clang && cd clang \ - && git checkout $PINNED_COMPILER_CLANG_COMMIT && sed -i 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config \ + && git checkout $PINNED_COMPILER_CLANG_COMMIT && sed -i.bak 's,https://github.com/llvm-mirror/,https://git.llvm.org/git/,g' .git/config \ && patch -p2 < \"$REPO_ROOT/scripts/clang-devtoolset8-support.patch\" \ && cd tools && mkdir extra && cd extra \ && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://github.com/llvm-mirror/clang-tools-extra.git \ From 71c1d12ff14ab2cc715f993c09628c0a8da359ce Mon Sep 17 00:00:00 2001 From: Edgar Gonzalez Date: Wed, 27 Jan 2021 13:22:36 -0600 Subject: [PATCH 16/52] Updating to the new Docker repo name EOSIO instead EOS --- .cicd/create-docker-from-binary.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.cicd/create-docker-from-binary.sh b/.cicd/create-docker-from-binary.sh index e7bdb07d43b..5986e97f956 100755 --- a/.cicd/create-docker-from-binary.sh +++ b/.cicd/create-docker-from-binary.sh @@ -12,10 +12,10 @@ echo "$SANITIZED_TAG" # do docker build echo ":docker::build: Building image..." -DOCKERHUB_REGISTRY="docker.io/eosio/eos" +DOCKERHUB_REGISTRY="docker.io/eosio/eosio" BUILD_TAG=${BUILDKITE_BUILD_NUMBER:-latest} -DOCKER_BUILD_GEN="docker build -t eos_image:$BUILD_TAG -f ./docker/dockerfile ." +DOCKER_BUILD_GEN="docker build -t eosio_image:$BUILD_TAG -f ./docker/dockerfile ." echo "$ $DOCKER_BUILD_GEN" eval $DOCKER_BUILD_GEN @@ -23,8 +23,8 @@ eval $DOCKER_BUILD_GEN EOSIO_REGS=("$EOSIO_REGISTRY" "$DOCKERHUB_REGISTRY") for REG in ${EOSIO_REGS[@]}; do - DOCKER_TAG_COMMIT="docker tag eos_image:$BUILD_TAG $REG:$BUILDKITE_COMMIT" - DOCKER_TAG_BRANCH="docker tag eos_image:$BUILD_TAG $REG:$SANITIZED_BRANCH" + DOCKER_TAG_COMMIT="docker tag eosio_image:$BUILD_TAG $REG:$BUILDKITE_COMMIT" + DOCKER_TAG_BRANCH="docker tag eosio_image:$BUILD_TAG $REG:$SANITIZED_BRANCH" echo -e "$ Tagging Images: \n$DOCKER_TAG_COMMIT \n$DOCKER_TAG_BRANCH" eval $DOCKER_TAG_COMMIT eval $DOCKER_TAG_BRANCH @@ -39,7 +39,7 @@ for REG in ${EOSIO_REGS[@]}; do eval $CLEAN_IMAGE_COMMIT eval $CLEAN_IMAGE_BRANCH if [[ ! -z "$SANITIZED_TAG" ]]; then - DOCKER_TAG="docker tag eos_image $REG:$SANITIZED_TAG" + DOCKER_TAG="docker tag eosio_image $REG:$SANITIZED_TAG" DOCKER_REM="docker rmi $REG:$SANITIZED_TAG" echo -e "$ \n Tagging Image: \n$DOCKER_TAG \n Cleaning Up: \n$DOCKER_REM" eval $DOCKER_TAG @@ -47,6 +47,6 @@ for REG in ${EOSIO_REGS[@]}; do fi done -DOCKER_GEN="docker rmi eos_image:$BUILD_TAG" +DOCKER_GEN="docker rmi eosio_image:$BUILD_TAG" echo "Clean up base image" eval $DOCKER_GEN \ No newline at end of file From c4cd191e2e9f4cad4bf0142041cbe8a36265c97d Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Wed, 27 Jan 2021 22:32:56 -0500 Subject: [PATCH 17/52] Remove debug builds --- .cicd/generate-pipeline.sh | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index f7a41e4a782..a21c4c3de09 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -101,7 +101,6 @@ if [[ "$DEBUG" == 'true' ]]; then fi echo ' # builds' echo $PLATFORMS_JSON_ARRAY | jq -cr '.[]' | while read -r PLATFORM_JSON; do - # release build if [[ ! "$(echo "$PLATFORM_JSON" | jq -r .FILE_NAME)" =~ 'macos' ]]; then cat < Date: Wed, 27 Jan 2021 22:34:06 -0500 Subject: [PATCH 18/52] Make the build type configurable for all builds --- .cicd/generate-pipeline.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index a21c4c3de09..cbd2adb9041 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -91,6 +91,7 @@ nIFS=$IFS # fix array splitting (\n won't work) echo ' - wait' echo '' # build steps +[[ -z "$CMAKE_BUILD_TYPE" ]] && export CMAKE_BUILD_TYPE='Release' export LATEST_UBUNTU="$(echo "$PLATFORMS_JSON_ARRAY" | jq -c 'map(select(.PLATFORM_NAME == "ubuntu")) | sort_by(.VERSION_MAJOR) | .[-1]')" # isolate latest ubuntu from array if [[ "$DEBUG" == 'true' ]]; then echo '# PLATFORMS_JSON_ARRAY' @@ -106,6 +107,7 @@ echo $PLATFORMS_JSON_ARRAY | jq -cr '.[]' | while read -r PLATFORM_JSON; do - label: "$(echo "$PLATFORM_JSON" | jq -r .ICON) $(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_FULL) - Build" command: "./.cicd/build.sh" env: + CMAKE_BUILD_TYPE: $CMAKE_BUILD_TYPE IMAGE_TAG: $(echo "$PLATFORM_JSON" | jq -r .FILE_NAME) PLATFORM_TYPE: $PLATFORM_TYPE agents: @@ -139,6 +141,7 @@ EOF - EOSIO/skip-checkout#v0.1.1: cd: ~ env: + CMAKE_BUILD_TYPE: $CMAKE_BUILD_TYPE REPO: ${BUILDKITE_PULL_REQUEST_REPO:-$BUILDKITE_REPO} REPO_COMMIT: $BUILDKITE_COMMIT TEMPLATE: $MOJAVE_ANKA_TEMPLATE_NAME From d8ad4a13bc81016fac49f93b216074eaa99557d4 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Wed, 27 Jan 2021 22:37:03 -0500 Subject: [PATCH 19/52] Skip tests for non-optimized build types --- .cicd/generate-pipeline.sh | 154 +++++++++++++++++++------------------ 1 file changed, 78 insertions(+), 76 deletions(-) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index cbd2adb9041..7aa3de9ca55 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -173,14 +173,15 @@ cat < Date: Wed, 27 Jan 2021 22:37:56 -0500 Subject: [PATCH 20/52] Rename CMAKE_BUILD_TYPE to DCMAKE_BUILD_TYPE --- .cicd/build.sh | 4 ++-- .cicd/generate-pipeline.sh | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.cicd/build.sh b/.cicd/build.sh index 1bfa3ff485e..5ffcff277fb 100755 --- a/.cicd/build.sh +++ b/.cicd/build.sh @@ -3,8 +3,8 @@ set -eo pipefail [[ "$ENABLE_INSTALL" == 'true' ]] || echo '--- :evergreen_tree: Configuring Environment' . ./.cicd/helpers/general.sh mkdir -p "$BUILD_DIR" -[[ -z "$CMAKE_BUILD_TYPE" ]] && export CMAKE_BUILD_TYPE='Release' -CMAKE_EXTRAS="-DCMAKE_BUILD_TYPE=\"$CMAKE_BUILD_TYPE\" -DENABLE_MULTIVERSION_PROTOCOL_TEST=\"true\" -DBUILD_MONGO_DB_PLUGIN=\"true\"" +[[ -z "$DCMAKE_BUILD_TYPE" ]] && export DCMAKE_BUILD_TYPE='Release' +CMAKE_EXTRAS="-DCMAKE_BUILD_TYPE=\"$DCMAKE_BUILD_TYPE\" -DENABLE_MULTIVERSION_PROTOCOL_TEST=\"true\" -DBUILD_MONGO_DB_PLUGIN=\"true\"" if [[ "$(uname)" == 'Darwin' && "$FORCE_LINUX" != 'true' ]]; then # You can't use chained commands in execute if [[ "$GITHUB_ACTIONS" == 'true' ]]; then diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index 7aa3de9ca55..cf8e0d07682 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -91,7 +91,7 @@ nIFS=$IFS # fix array splitting (\n won't work) echo ' - wait' echo '' # build steps -[[ -z "$CMAKE_BUILD_TYPE" ]] && export CMAKE_BUILD_TYPE='Release' +[[ -z "$DCMAKE_BUILD_TYPE" ]] && export DCMAKE_BUILD_TYPE='Release' export LATEST_UBUNTU="$(echo "$PLATFORMS_JSON_ARRAY" | jq -c 'map(select(.PLATFORM_NAME == "ubuntu")) | sort_by(.VERSION_MAJOR) | .[-1]')" # isolate latest ubuntu from array if [[ "$DEBUG" == 'true' ]]; then echo '# PLATFORMS_JSON_ARRAY' @@ -107,7 +107,7 @@ echo $PLATFORMS_JSON_ARRAY | jq -cr '.[]' | while read -r PLATFORM_JSON; do - label: "$(echo "$PLATFORM_JSON" | jq -r .ICON) $(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_FULL) - Build" command: "./.cicd/build.sh" env: - CMAKE_BUILD_TYPE: $CMAKE_BUILD_TYPE + DCMAKE_BUILD_TYPE: $DCMAKE_BUILD_TYPE IMAGE_TAG: $(echo "$PLATFORM_JSON" | jq -r .FILE_NAME) PLATFORM_TYPE: $PLATFORM_TYPE agents: @@ -141,7 +141,7 @@ EOF - EOSIO/skip-checkout#v0.1.1: cd: ~ env: - CMAKE_BUILD_TYPE: $CMAKE_BUILD_TYPE + DCMAKE_BUILD_TYPE: $DCMAKE_BUILD_TYPE REPO: ${BUILDKITE_PULL_REQUEST_REPO:-$BUILDKITE_REPO} REPO_COMMIT: $BUILDKITE_COMMIT TEMPLATE: $MOJAVE_ANKA_TEMPLATE_NAME @@ -173,7 +173,7 @@ cat < Date: Wed, 27 Jan 2021 23:07:57 -0500 Subject: [PATCH 21/52] Don't forget to delete the extra package builder steps you created --- .cicd/generate-pipeline.sh | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index cf8e0d07682..edb1d014edc 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -569,34 +569,6 @@ cat < Date: Fri, 29 Jan 2021 10:59:28 -0500 Subject: [PATCH 22/52] Add logic so the eosio-debug-build pipeline can run older branches which have yet to recieve Ubuntu 20.04 --- .cicd/generate-pipeline.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index edb1d014edc..04e3696a60c 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -13,6 +13,7 @@ if [[ $BUILDKITE_BRANCH =~ ^pull/[0-9]+/head: ]]; then PR_ID=$(echo $BUILDKITE_BRANCH | cut -d/ -f2) export GIT_FETCH="git fetch -v --prune origin refs/pull/$PR_ID/head &&" fi +[[ "$BUILDKITE_PIPELINE_SLUG" == 'eosio-debug-build' ]] && export SKIP_UBUNTU_18_04='false' # Determine which dockerfiles/scripts to use for the pipeline. if [[ $PINNED == false ]]; then export PLATFORM_TYPE="unpinned" From e0c8f177ad1d56111fde08bcc5ee70132929c038 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Fri, 29 Jan 2021 13:01:57 -0500 Subject: [PATCH 23/52] Support overriding SKIP_OS variables from inside generate-pipeline.sh --- .cicd/generate-pipeline.sh | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index 04e3696a60c..1397fff9380 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -114,7 +114,7 @@ echo $PLATFORMS_JSON_ARRAY | jq -cr '.[]' | while read -r PLATFORM_JSON; do agents: queue: "$BUILDKITE_BUILD_AGENT_QUEUE" timeout: ${TIMEOUT:-180} - skip: \${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_BUILD} + skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_BUILD} EOF else @@ -153,7 +153,7 @@ EOF PROJECT_TAG: $(echo "$PLATFORM_JSON" | jq -r .HASHED_IMAGE_TAG) timeout: ${TIMEOUT:-180} agents: "queue=mac-anka-large-node-fleet" - skip: \${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_BUILD} + skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_BUILD} EOF fi done @@ -196,7 +196,7 @@ if [[ "$DCMAKE_BUILD_TYPE" != 'Debug' ]]; then manual: permit_on_passed: true timeout: ${TIMEOUT:-30} - skip: \${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_UNIT_TESTS} + skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_UNIT_TESTS} EOF else @@ -225,7 +225,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-60} - skip: \${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_UNIT_TESTS} + skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_UNIT_TESTS} EOF fi @@ -249,7 +249,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-30} - skip: \${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_WASM_SPEC_TESTS} + skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_WASM_SPEC_TESTS} EOF else @@ -278,7 +278,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-60} - skip: \${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_WASM_SPEC_TESTS} + skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_WASM_SPEC_TESTS} EOF fi @@ -305,7 +305,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-20} - skip: \${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_SERIAL_TESTS} + skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_SERIAL_TESTS} EOF else @@ -334,7 +334,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-60} - skip: \${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_SERIAL_TESTS} + skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_SERIAL_TESTS} EOF fi echo @@ -362,7 +362,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-180} - skip: \${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_LONG_RUNNING_TESTS:-true} + skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_LONG_RUNNING_TESTS:-true} EOF else @@ -391,7 +391,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-180} - skip: \${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_LONG_RUNNING_TESTS:-true} + skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_LONG_RUNNING_TESTS:-true} EOF fi echo From 604e404ec913b3ca6c7bd7213ce84c0719fd7d7b Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Fri, 29 Jan 2021 13:54:41 -0500 Subject: [PATCH 24/52] Re-work how the SKIP_PLATFORM variables are constructed and parsed --- .cicd/generate-pipeline.sh | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index 1397fff9380..5c935c6fda4 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -57,9 +57,11 @@ for FILE in $(ls "$CICD_DIR/platforms/$PLATFORM_TYPE"); do [[ $FILE_NAME =~ 'centos' ]] && export ICON=':centos:' [[ $FILE_NAME =~ 'macos' ]] && export ICON=':darwin:' . "$HELPERS_DIR/file-hash.sh" "$CICD_DIR/platforms/$PLATFORM_TYPE/$FILE" # returns HASHED_IMAGE_TAG, etc + export PLATFORM_NAME_SKIP_SUFFIX="${PLATFORM_NAME_UPCASE}_${VERSION_MAJOR}${VERSION_MINOR}" export PLATFORMS_JSON_ARRAY=$(echo $PLATFORMS_JSON_ARRAY | jq -c '. += [{ "FILE_NAME": env.FILE_NAME, "PLATFORM_NAME": env.PLATFORM_NAME, + "PLATFORM_NAME_SKIP_SUFFIX": env.PLATFORM_NAME_SKIP_SUFFIX, "PLATFORM_NAME_UPCASE": env.PLATFORM_NAME_UPCASE, "VERSION_MAJOR": env.VERSION_MAJOR, "VERSION_MINOR": env.VERSION_MINOR, @@ -114,7 +116,7 @@ echo $PLATFORMS_JSON_ARRAY | jq -cr '.[]' | while read -r PLATFORM_JSON; do agents: queue: "$BUILDKITE_BUILD_AGENT_QUEUE" timeout: ${TIMEOUT:-180} - skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_BUILD} + skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_BUILD} EOF else @@ -153,7 +155,7 @@ EOF PROJECT_TAG: $(echo "$PLATFORM_JSON" | jq -r .HASHED_IMAGE_TAG) timeout: ${TIMEOUT:-180} agents: "queue=mac-anka-large-node-fleet" - skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_BUILD} + skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_BUILD} EOF fi done @@ -196,7 +198,7 @@ if [[ "$DCMAKE_BUILD_TYPE" != 'Debug' ]]; then manual: permit_on_passed: true timeout: ${TIMEOUT:-30} - skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_UNIT_TESTS} + skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_UNIT_TESTS} EOF else @@ -225,7 +227,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-60} - skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_UNIT_TESTS} + skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_UNIT_TESTS} EOF fi @@ -249,7 +251,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-30} - skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_WASM_SPEC_TESTS} + skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_WASM_SPEC_TESTS} EOF else @@ -278,7 +280,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-60} - skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_WASM_SPEC_TESTS} + skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_WASM_SPEC_TESTS} EOF fi @@ -305,7 +307,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-20} - skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_SERIAL_TESTS} + skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_SERIAL_TESTS} EOF else @@ -334,7 +336,8 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-60} - skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_SERIAL_TESTS} + skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_SERIAL_TESTS} + EOF fi echo @@ -362,7 +365,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-180} - skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_LONG_RUNNING_TESTS:-true} + skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_LONG_RUNNING_TESTS:-true} EOF else @@ -391,7 +394,8 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-180} - skip: ${SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_UPCASE)_$(echo "$PLATFORM_JSON" | jq -r .VERSION_MAJOR)$(echo "$PLATFORM_JSON" | jq -r .VERSION_MINOR)}${SKIP_LONG_RUNNING_TESTS:-true} + skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_LONG_RUNNING_TESTS:-true} + EOF fi echo From f5efd5d1fa8ef2b23f2c461621979432a0d4018f Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Fri, 29 Jan 2021 16:49:38 -0500 Subject: [PATCH 25/52] Fix whitespace --- .cicd/generate-pipeline.sh | 158 ++++++++++++++++++------------------- 1 file changed, 79 insertions(+), 79 deletions(-) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index 5c935c6fda4..e87c1cf0faf 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -10,8 +10,8 @@ BUILDKITE_TEST_AGENT_QUEUE='automation-eks-eos-tester-fleet' [[ -z "$ROUNDS" ]] && export ROUNDS='1' # Determine if it's a forked PR and make sure to add git fetch so we don't have to git clone the forked repo's url if [[ $BUILDKITE_BRANCH =~ ^pull/[0-9]+/head: ]]; then - PR_ID=$(echo $BUILDKITE_BRANCH | cut -d/ -f2) - export GIT_FETCH="git fetch -v --prune origin refs/pull/$PR_ID/head &&" + PR_ID=$(echo $BUILDKITE_BRANCH | cut -d/ -f2) + export GIT_FETCH="git fetch -v --prune origin refs/pull/$PR_ID/head &&" fi [[ "$BUILDKITE_PIPELINE_SLUG" == 'eosio-debug-build' ]] && export SKIP_UBUNTU_18_04='false' # Determine which dockerfiles/scripts to use for the pipeline. @@ -177,14 +177,14 @@ EOF # tests IFS=$oIFS if [[ "$DCMAKE_BUILD_TYPE" != 'Debug' ]]; then - for ROUND in $(seq 1 $ROUNDS); do - IFS=$'' - echo " # round $ROUND of $ROUNDS" - # parallel tests - echo ' # parallel tests' - echo $PLATFORMS_JSON_ARRAY | jq -cr '.[]' | while read -r PLATFORM_JSON; do - if [[ ! "$(echo "$PLATFORM_JSON" | jq -r .FILE_NAME)" =~ 'macos' ]]; then - cat < Date: Fri, 29 Jan 2021 19:25:30 -0500 Subject: [PATCH 26/52] Re-work SKIP_PLATFORM variables....again --- .cicd/generate-pipeline.sh | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index e87c1cf0faf..a860bb0bcc3 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -57,11 +57,11 @@ for FILE in $(ls "$CICD_DIR/platforms/$PLATFORM_TYPE"); do [[ $FILE_NAME =~ 'centos' ]] && export ICON=':centos:' [[ $FILE_NAME =~ 'macos' ]] && export ICON=':darwin:' . "$HELPERS_DIR/file-hash.sh" "$CICD_DIR/platforms/$PLATFORM_TYPE/$FILE" # returns HASHED_IMAGE_TAG, etc - export PLATFORM_NAME_SKIP_SUFFIX="${PLATFORM_NAME_UPCASE}_${VERSION_MAJOR}${VERSION_MINOR}" + export PLATFORM_SKIP_VAR="SKIP_${PLATFORM_NAME_UPCASE}_${VERSION_MAJOR}${VERSION_MINOR}" export PLATFORMS_JSON_ARRAY=$(echo $PLATFORMS_JSON_ARRAY | jq -c '. += [{ "FILE_NAME": env.FILE_NAME, "PLATFORM_NAME": env.PLATFORM_NAME, - "PLATFORM_NAME_SKIP_SUFFIX": env.PLATFORM_NAME_SKIP_SUFFIX, + "PLATFORM_SKIP_VAR": env.PLATFORM_SKIP_VAR, "PLATFORM_NAME_UPCASE": env.PLATFORM_NAME_UPCASE, "VERSION_MAJOR": env.VERSION_MAJOR, "VERSION_MINOR": env.VERSION_MINOR, @@ -116,7 +116,7 @@ echo $PLATFORMS_JSON_ARRAY | jq -cr '.[]' | while read -r PLATFORM_JSON; do agents: queue: "$BUILDKITE_BUILD_AGENT_QUEUE" timeout: ${TIMEOUT:-180} - skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_BUILD} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_BUILD} EOF else @@ -155,7 +155,7 @@ EOF PROJECT_TAG: $(echo "$PLATFORM_JSON" | jq -r .HASHED_IMAGE_TAG) timeout: ${TIMEOUT:-180} agents: "queue=mac-anka-large-node-fleet" - skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_BUILD} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_BUILD} EOF fi done @@ -198,7 +198,7 @@ if [[ "$DCMAKE_BUILD_TYPE" != 'Debug' ]]; then manual: permit_on_passed: true timeout: ${TIMEOUT:-30} - skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_UNIT_TESTS} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_UNIT_TESTS} EOF else @@ -227,7 +227,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-60} - skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_UNIT_TESTS} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_UNIT_TESTS} EOF fi @@ -251,7 +251,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-30} - skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_WASM_SPEC_TESTS} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_WASM_SPEC_TESTS} EOF else @@ -280,7 +280,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-60} - skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_WASM_SPEC_TESTS} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_WASM_SPEC_TESTS} EOF fi @@ -307,7 +307,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-20} - skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_SERIAL_TESTS} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_SERIAL_TESTS} EOF else @@ -336,8 +336,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-60} - skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_SERIAL_TESTS} - + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_SERIAL_TESTS} EOF fi echo @@ -365,7 +364,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-180} - skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_LONG_RUNNING_TESTS:-true} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_LONG_RUNNING_TESTS:-true} EOF else @@ -394,8 +393,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-180} - skip: $(echo "$SKIP_$(echo "$PLATFORM_JSON" | jq -r .PLATFORM_NAME_SKIP_SUFFIX)")${SKIP_LONG_RUNNING_TESTS:-true} - + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_LONG_RUNNING_TESTS:-true} EOF fi echo From 17c4e71f819403f97923626fb9660b7ccc6120c3 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Fri, 29 Jan 2021 19:33:32 -0500 Subject: [PATCH 27/52] Fix YAML whitespace -_- --- .cicd/generate-pipeline.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index a860bb0bcc3..32111c1c6bd 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -156,6 +156,7 @@ EOF timeout: ${TIMEOUT:-180} agents: "queue=mac-anka-large-node-fleet" skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_BUILD} + EOF fi done @@ -337,6 +338,7 @@ EOF permit_on_passed: true timeout: ${TIMEOUT:-60} skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_SERIAL_TESTS} + EOF fi echo @@ -394,6 +396,7 @@ EOF permit_on_passed: true timeout: ${TIMEOUT:-180} skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_LONG_RUNNING_TESTS:-true} + EOF fi echo @@ -605,6 +608,5 @@ cat < Date: Fri, 29 Jan 2021 19:46:00 -0500 Subject: [PATCH 28/52] Another ****ing whitespace change --- .cicd/generate-pipeline.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index 32111c1c6bd..06762f8358e 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -411,7 +411,7 @@ EOF done # Execute multiversion test if [[ ! "$PINNED" == 'false' || "$SKIP_MULTIVERSION_TEST" == 'false' ]]; then - cat < Date: Tue, 2 Feb 2021 06:51:18 -0600 Subject: [PATCH 29/52] EPE-389 net_plugin stall during head catchup This fix handles situations where a node is in head_catchup state and recieves the sync_last_requested_num and then does a sync_update_expected for the next block. And the next block has already been received by the node. In this instance, the block will not receive the next block and will be effectively hung. --- plugins/net_plugin/net_plugin.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index b1c72bd768f..7d8177f70e5 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -147,7 +147,7 @@ namespace eosio { private: constexpr static auto stage_str( stages s ); - void set_state( stages s ); + bool set_state( stages s ); bool is_sync_required( uint32_t fork_head_block_num ); void request_next_chunk( std::unique_lock g_sync, const connection_ptr& conn = connection_ptr() ); void start_sync( const connection_ptr& c, uint32_t target ); @@ -1372,12 +1372,13 @@ namespace eosio { } } - void sync_manager::set_state(stages newstate) { + bool sync_manager::set_state(stages newstate) { if( sync_state == newstate ) { - return; + return false; } fc_ilog( logger, "old state ${os} becoming ${ns}", ("os", stage_str( sync_state ))( "ns", stage_str( newstate ) ) ); sync_state = newstate; + return true; } void sync_manager::sync_reset_lib_num(const connection_ptr& c) { @@ -1808,7 +1809,9 @@ namespace eosio { } ); if( set_state_to_head_catchup ) { - set_state( head_catchup ); + if( set_state( head_catchup ) ) { + send_handshakes(); + } } else { set_state( in_sync ); send_handshakes(); From 18d6ed8d5a637c495534b2379180508e5d3e725a Mon Sep 17 00:00:00 2001 From: Luis Paris Date: Thu, 28 Jan 2021 11:16:09 -0500 Subject: [PATCH 30/52] remove cleos how-tos index file to prevent empty page from showing :doc --- docs/02_cleos/02_how-to-guides/index.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 docs/02_cleos/02_how-to-guides/index.md diff --git a/docs/02_cleos/02_how-to-guides/index.md b/docs/02_cleos/02_how-to-guides/index.md deleted file mode 100644 index e69de29bb2d..00000000000 From f8e59867db6593ae18496890644309d18dbdd681 Mon Sep 17 00:00:00 2001 From: Luis Paris Date: Thu, 4 Feb 2021 14:40:18 -0500 Subject: [PATCH 31/52] update how-to get block information :doc --- .../how-to-get-block-information.md | 76 +++++++++++++++++-- 1 file changed, 69 insertions(+), 7 deletions(-) diff --git a/docs/02_cleos/02_how-to-guides/how-to-get-block-information.md b/docs/02_cleos/02_how-to-guides/how-to-get-block-information.md index eed76c62bfb..7efd62427f6 100644 --- a/docs/02_cleos/02_how-to-guides/how-to-get-block-information.md +++ b/docs/02_cleos/02_how-to-guides/how-to-get-block-information.md @@ -1,18 +1,80 @@ ## Goal -Query infomation of a block +Query infomation about a block. ## Before you begin -* Install the currently supported version of `cleos` +Make sure to meet the following requirements: -* Understand the following: - * What is a block +* Familiarize with the [`cleos get block`](../03_command-reference/get/block.md) command and its parameters. +* Install the currently supported version of `cleos`. + +[[info | Note]] +| `cleos` is bundled with the EOSIO software. [Installing EOSIO](../../00_install/index.md) will also install `cleos`. + +* Understand what a [block](https://developers.eos.io/welcome/latest/glossary/index/#block) is and its role in the blockchain. +* Understand the [block lifecycle](https://developers.eos.io/welcome/latest/protocol-guides/consensus_protocol/#5-block-lifecycle) in the EOSIO consensus protocol. ## Steps -Execute the command below: +Perform the step below: + +Retrieve full information about a block: + +```sh +cleos get block +``` + +Where `block_number_or_id` is the specified block number or block ID. + +Some examples are provided below: + +* Query the testnet to retrieve full block information about block number `48351112` or block ID `02e1c7888a92206573ae38d00e09366c7ba7bc54cd8b7996506f7d2a619c43ba`: + +**Example Output** + +```sh +cleos -u https://api.testnet.eos.io get block 48351112 +``` +```json +{ + "timestamp": "2021-01-28T17:58:59.500", + "producer": "inith", + "confirmed": 0, + "previous": "02e1c78787ff4d4ce6124831b936bb4ef6015e470868a535f1c6e04f3afed8a1", + "transaction_mroot": "0000000000000000000000000000000000000000000000000000000000000000", + "action_mroot": "1bf9d17b5a951cbb6d0a8324e4039744db4137df498abd53046ea26fa74d73c9", + "schedule_version": 1, + "new_producers": null, + "producer_signature": "SIG_K1_JxFfxGA1wZx9LCVjbrBb5nxTuJai7RUSiwRXyY866fYvZZyRtdmQFn9KJCqVHFAiYEsJpDb6dhTmHNDwipJm4rDiyhEmGa", + "transactions": [], + "id": "02e1c7888a92206573ae38d00e09366c7ba7bc54cd8b7996506f7d2a619c43ba", + "block_num": 48351112, + "ref_block_prefix": 3493375603 +} +``` + +* Query the testnet to retrieve full block information about block ID `02e1c7888a92206573ae38d00e09366c7ba7bc54cd8b7996506f7d2a619c43ba`: + +**Example Output** ```sh -cleos get block BLOCK_NUMBER -``` \ No newline at end of file +cleos -u https://api.testnet.eos.io get block 02e1c7888a92206573ae38d00e09366c7ba7bc54cd8b7996506f7d2a619c43ba +``` +```json +{ + "timestamp": "2021-01-28T17:58:59.500", + "producer": "inith", + "confirmed": 0, + "previous": "02e1c78787ff4d4ce6124831b936bb4ef6015e470868a535f1c6e04f3afed8a1", + "transaction_mroot": "0000000000000000000000000000000000000000000000000000000000000000", + "action_mroot": "1bf9d17b5a951cbb6d0a8324e4039744db4137df498abd53046ea26fa74d73c9", + "schedule_version": 1, + "new_producers": null, + "producer_signature": "SIG_K1_JxFfxGA1wZx9LCVjbrBb5nxTuJai7RUSiwRXyY866fYvZZyRtdmQFn9KJCqVHFAiYEsJpDb6dhTmHNDwipJm4rDiyhEmGa", + "transactions": [], + "id": "02e1c7888a92206573ae38d00e09366c7ba7bc54cd8b7996506f7d2a619c43ba", + "block_num": 48351112, + "ref_block_prefix": 3493375603 +} +``` From b01013e8c0e07fd64c9419f9b045106ee0f45855 Mon Sep 17 00:00:00 2001 From: Luis Paris Date: Thu, 4 Feb 2021 14:47:12 -0500 Subject: [PATCH 32/52] update how-to create a wallet :doc --- .../how-to-create-a-wallet.md | 49 +++++++++++++------ 1 file changed, 33 insertions(+), 16 deletions(-) diff --git a/docs/02_cleos/02_how-to-guides/how-to-create-a-wallet.md b/docs/02_cleos/02_how-to-guides/how-to-create-a-wallet.md index c69fcf8c5a3..4973217f161 100644 --- a/docs/02_cleos/02_how-to-guides/how-to-create-a-wallet.md +++ b/docs/02_cleos/02_how-to-guides/how-to-create-a-wallet.md @@ -1,43 +1,60 @@ ## Goal -Create a `keosd` wallet +Create a wallet using `keosd`. ## Before you begin -* Install the currently supported version of `cleos` +Make sure you meet the following requirements: -* Understand the following: - * What is an account - * What is a public and private key pair +* Familiarize with the [`cleos wallet create`](../03_command-reference/wallet/create.md) command and its parameters. +* Familiarize with the other [`cleos wallet`](../03_command-reference/wallet/index.md) commands. +* Install the currently supported version of `cleos`. + +[[info | Note]] +| `cleos` is bundled with the EOSIO software. [Installing EOSIO](../../00_install/index.md) will also install `cleos`. + +* Understand what an [account](https://developers.eos.io/welcome/latest/glossary/index/#account) is and its role in the blockchain. +* Understand [Accounts and Permissions](https://developers.eos.io/welcome/latest/protocol-guides/accounts_and_permissions) in the protocol documents. +* Understand what a [public](https://developers.eos.io/welcome/latest/glossary/index/#public-key) and [private](https://developers.eos.io/welcome/latest/glossary/index/#private-key) key pair is. ## Steps -Create a wallet and save the password to a file: +Perform the step below: + +Create a default or named wallet and save the wallet password to a file: ```sh -cleos wallet create --file password.pwd +cleos wallet create [-n named_wallet] -f ``` -This should produce similar output as below. Note the wallet is named `default` if no name is provided. +Where `file_to_save_pwd` is the name of the file to write the wallet password to and `named_wallet` is an optional parameter to assign a name to the wallet. + +Some examples are provided below: + +* Create a default wallet and save the password to the file `default_wallet.pwd`: + +**Example Output** +```sh +cleos wallet create -f default_wallet.pwd +``` ```console Creating wallet: default Save password to use in the future to unlock this wallet. Without password imported keys will not be retrievable. -saving password to password.pwd +saving password to default_wallet.pwd ``` -Alternatively, you can name a wallet with `-n` option: +* Create a named wallet `my_wallet` and save the password to the file `my_wallet.pwd`: + +**Example Output** ```sh -cleos wallet create -n named_wallet -f passwd +cleos wallet create -n my_wallet -f my_wallet.pwd ``` - -You should see something like the output below: - ```console -Creating wallet: named_wallet +Creating wallet: my_wallet Save password to use in the future to unlock this wallet. Without password imported keys will not be retrievable. -saving password to passwd +saving password to my_wallet.pwd ``` From 7bafd42a73c72f20241711e8d2d32257db3b4a51 Mon Sep 17 00:00:00 2001 From: Luis Paris Date: Thu, 4 Feb 2021 16:35:32 -0500 Subject: [PATCH 33/52] update how-to delegate cpu resource :doc --- .../how-to-delegate-CPU-resource.md | 41 +++++++++++++------ 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/docs/02_cleos/02_how-to-guides/how-to-delegate-CPU-resource.md b/docs/02_cleos/02_how-to-guides/how-to-delegate-CPU-resource.md index d559d25cbb6..5345bd83f63 100644 --- a/docs/02_cleos/02_how-to-guides/how-to-delegate-CPU-resource.md +++ b/docs/02_cleos/02_how-to-guides/how-to-delegate-CPU-resource.md @@ -1,32 +1,47 @@ ## Goal -Delegate resource for an account or application +Delegate CPU bandwidth for an account or application. ## Before you begin -* Install the currently supported version of `cleos` +Make sure you meet the following requirements: -* Ensure the reference system contracts from `eosio.contracts` repository is deployed and used to manage system resources +* Familiarize with the [`cleos system delegatebw`](../03_command-reference/system/system-delegatebw.md) command and its parameters. +* Install the currently supported version of `cleos`. -* Understand the following: - * What is an account - * What is network bandwidth - * What is CPU bandwidth +[[info | Note]] +| `cleos` is bundled with the EOSIO software. [Installing EOSIO](../../00_install/index.md) will also install `cleos`. + +* Ensure the reference system contracts from [`eosio.contracts`](https://github.com/EOSIO/eosio.contracts) repository is deployed and used to manage system resources. +* Understand what an [account](https://developers.eos.io/welcome/latest/glossary/index/#account) is and its role in the blockchain. +* Understand [CPU bandwidth](https://developers.eos.io/welcome/latest/glossary/index/#cpu) in an EOSIO blockchain. +* Understand [NET bandwidth](https://developers.eos.io/welcome/latest/glossary/index/#net) in an EOSIO blockchain. ## Steps -Delegate 0.01 SYS CPU bandwidth from `bob` to `alice` +Perform the step below: + +Delegate CPU bandwidth from a source account to a receiver account: ```sh -cleos system delegatebw bob alice "0 SYS" "0.01 SYS" +cleos system delegatebw ``` -You should see something below: +Where `from` is the account to delegate bandwidth from, `receiver` is the account to receive the delegated bandwidth, and `stake_net_quantity` and/or `stake_cpu_quantity` is the amount of tokens to stake for network (NET) bandwidth and/or CPU bandwidth, respectively. + +Some examples are provided below: + +* Delegate 0.01 SYS CPU bandwidth from `bob` to `alice`: -```console +**Example Output** + +```sh +cleos system delegatebw bob alice "0 SYS" "0.01 SYS" +``` +```json executed transaction: 5487afafd67bf459a20fcc2dbc5d0c2f0d1f10e33123eaaa07088046fd18e3ae 192 bytes 503 us -# eosio <= eosio::delegatebw {"from":"bob","receiver":"alice","stake_net_quantity":"0.0000 SYS","stake_cpu_quanti... -# eosio.token <= eosio.token::transfer {"from":"bob","to":"eosio.stake","quantity":"0.0010 EOS","memo":"stake bandwidth"} +# eosio <= eosio::delegatebw {"from":"bob","receiver":"alice","stake_net_quantity":"0.0000 SYS","stake_cpu_quantity":"0.0100 SYS"... +# eosio.token <= eosio.token::transfer {"from":"bob","to":"eosio.stake","quantity":"0.0010 SYS","memo":"stake bandwidth"} # bob <= eosio.token::transfer {"from":"bob","to":"eosio.stake","quantity":"0.0010 SYS","memo":"stake bandwidth"} # eosio.stake <= eosio.token::transfer {"from":"bob","to":"eosio.stake","quantity":"0.0010 SYS","memo":"stake bandwidth"} ``` From f819d573d43843982ce50b5d5aa8472e1a010b2f Mon Sep 17 00:00:00 2001 From: Luis Paris Date: Thu, 4 Feb 2021 16:35:55 -0500 Subject: [PATCH 34/52] update how-to delegate net resource :doc --- .../how-to-delegate-net-resource.md | 39 ++++++++++++------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/docs/02_cleos/02_how-to-guides/how-to-delegate-net-resource.md b/docs/02_cleos/02_how-to-guides/how-to-delegate-net-resource.md index 3bd1260c4f8..118c36ba13a 100644 --- a/docs/02_cleos/02_how-to-guides/how-to-delegate-net-resource.md +++ b/docs/02_cleos/02_how-to-guides/how-to-delegate-net-resource.md @@ -1,33 +1,46 @@ ## Goal -Delegate resource for an account or application +Delegate network bandwidth for an account or application. ## Before you begin -* Install the currently supported version of `cleos` +Make sure you meet the following requirements: -* Ensure the reference system contracts from `eosio.contracts` repository is deployed and used to manage system resources - -* Understand the following: - * What is an account - * What is network bandwidth - * What is CPU bandwidth +* Familiarize with the [`cleos system delegatebw`](../03_command-reference/system/system-delegatebw.md) command and its parameters. +* Install the currently supported version of `cleos`. +[[info | Note]] +| `cleos` is bundled with the EOSIO software. [Installing EOSIO](../../00_install/index.md) will also install `cleos`. +* Ensure the reference system contracts from [`eosio.contracts`](https://github.com/EOSIO/eosio.contracts) repository is deployed and used to manage system resources. +* Understand what an [account](https://developers.eos.io/welcome/latest/glossary/index/#account) is and its role in the blockchain. +* Understand [NET bandwidth](https://developers.eos.io/welcome/latest/glossary/index/#net) in an EOSIO blockchain. +* Understand [CPU bandwidth](https://developers.eos.io/welcome/latest/glossary/index/#cpu) in an EOSIO blockchain. ## Steps -Delegate 0.01 SYS network bandwidth from `bob` to `alice` +Perform the step below: + +Delegate CPU bandwidth from a source account to a receiver account: ```sh -cleos system delegatebw bob alice "0.01 SYS" "0 SYS" +cleos system delegatebw ``` -You should see something below: +Where `from` is the account to delegate bandwidth from, `receiver` is the account to receive the delegated bandwidth, and `stake_net_quantity` and/or `stake_cpu_quantity` is the amount of tokens to stake for network (NET) bandwidth and/or CPU bandwidth, respectively. + +Some examples are provided below: + +* Delegate 0.01 SYS network bandwidth from `bob` to `alice`: -```console +**Example Output** + +```sh +cleos system delegatebw bob alice "0.01 SYS" "0 SYS" +``` +```json executed transaction: 5487afafd67bf459a20fcc2dbc5d0c2f0d1f10e33123eaaa07088046fd18e3ae 192 bytes 503 us -# eosio <= eosio::delegatebw {"from":"bob","receiver":"alice","stake_net_quantity":"0.0100 SYS","stake_cpu_quanti... +# eosio <= eosio::delegatebw {"from":"bob","receiver":"alice","stake_net_quantity":"0.0100 SYS","stake_cpu_quantity":"0.0000 SYS"... # eosio.token <= eosio.token::transfer {"from":"bob","to":"eosio.stake","quantity":"0.0010 SYS","memo":"stake bandwidth"} # bob <= eosio.token::transfer {"from":"bob","to":"eosio.stake","quantity":"0.0010 SYS","memo":"stake bandwidth"} # eosio.stake <= eosio.token::transfer {"from":"bob","to":"eosio.stake","quantity":"0.0010 SYS","memo":"stake bandwidth"} From 4271eefd88b2dce6e311262baf3d42f5322ea051 Mon Sep 17 00:00:00 2001 From: Luis Paris Date: Thu, 4 Feb 2021 17:30:35 -0500 Subject: [PATCH 35/52] fix link to getting started guide in README :doc --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3a34934f191..bd8088a046f 100644 --- a/README.md +++ b/README.md @@ -126,7 +126,7 @@ To uninstall the EOSIO built/installed binaries and dependencies, run: ## Getting Started -Instructions detailing the process of getting the software, building it, running a simple test network that produces blocks, account creation and uploading a sample contract to the blockchain can be found in the [Getting Started](https://developers.eos.io/welcome/latest/getting-started) walkthrough. +Instructions detailing the process of getting the software, building it, running a simple test network that produces blocks, account creation and uploading a sample contract to the blockchain can be found in the [Getting Started](https://developers.eos.io/welcome/latest/getting-started-guide) walkthrough. ## Contributing From fe115aec121657bb6d48660e05472b224a06e53f Mon Sep 17 00:00:00 2001 From: Scott Arnette Date: Fri, 5 Feb 2021 17:09:56 -0500 Subject: [PATCH 36/52] Fix skip vars being set to null. --- .cicd/generate-pipeline.sh | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index 06762f8358e..67b549e2b22 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -116,7 +116,7 @@ echo $PLATFORMS_JSON_ARRAY | jq -cr '.[]' | while read -r PLATFORM_JSON; do agents: queue: "$BUILDKITE_BUILD_AGENT_QUEUE" timeout: ${TIMEOUT:-180} - skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_BUILD} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.] // empty')${SKIP_BUILD} EOF else @@ -155,7 +155,7 @@ EOF PROJECT_TAG: $(echo "$PLATFORM_JSON" | jq -r .HASHED_IMAGE_TAG) timeout: ${TIMEOUT:-180} agents: "queue=mac-anka-large-node-fleet" - skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_BUILD} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.] // empty')${SKIP_BUILD} EOF fi @@ -199,7 +199,7 @@ if [[ "$DCMAKE_BUILD_TYPE" != 'Debug' ]]; then manual: permit_on_passed: true timeout: ${TIMEOUT:-30} - skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_UNIT_TESTS} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.] // empty')${SKIP_UNIT_TESTS} EOF else @@ -228,7 +228,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-60} - skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_UNIT_TESTS} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.] // empty')${SKIP_UNIT_TESTS} EOF fi @@ -252,7 +252,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-30} - skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_WASM_SPEC_TESTS} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.] // empty')${SKIP_WASM_SPEC_TESTS} EOF else @@ -281,7 +281,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-60} - skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_WASM_SPEC_TESTS} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.] // empty')${SKIP_WASM_SPEC_TESTS} EOF fi @@ -308,7 +308,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-20} - skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_SERIAL_TESTS} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.] // empty')${SKIP_SERIAL_TESTS} EOF else @@ -337,7 +337,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-60} - skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_SERIAL_TESTS} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.] // empty')${SKIP_SERIAL_TESTS} EOF fi @@ -366,7 +366,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-180} - skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_LONG_RUNNING_TESTS:-true} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.] // empty')${SKIP_LONG_RUNNING_TESTS:-true} EOF else @@ -395,7 +395,7 @@ EOF manual: permit_on_passed: true timeout: ${TIMEOUT:-180} - skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.]')${SKIP_LONG_RUNNING_TESTS:-true} + skip: $(echo "$PLATFORM_JSON" | jq -r '.PLATFORM_SKIP_VAR | env[.] // empty')${SKIP_LONG_RUNNING_TESTS:-true} EOF fi From da07e4db2f91335fd45b3e15d246398b059fb26c Mon Sep 17 00:00:00 2001 From: Rusty Fleming Date: Wed, 10 Feb 2021 09:49:00 -0600 Subject: [PATCH 37/52] EPE-165: Improve logic for unlinkable blocks while sync'ing The current logic for handling unlinkable blocks while sync'ing closes the node prematurely when the node is flooded with unlinkable blocks in a short time window. This fix groups unlinkable blocks into time windows in order to manage the flooding condition. --- plugins/net_plugin/net_plugin.cpp | 75 ++++++++++++++++++++++++++++--- 1 file changed, 70 insertions(+), 5 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 7d8177f70e5..17e9b19c224 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -380,7 +380,6 @@ namespace eosio { constexpr auto def_send_buffer_size = 1024*1024*def_send_buffer_size_mb; constexpr auto def_max_write_queue_size = def_send_buffer_size*10; constexpr auto def_max_trx_in_progress_size = 100*1024*1024; // 100 MB - constexpr auto def_max_consecutive_rejected_blocks = 13; // num of rejected blocks before disconnect constexpr auto def_max_consecutive_immediate_connection_close = 9; // back off if client keeps closing constexpr auto def_max_clients = 25; // 0 for unlimited clients constexpr auto def_max_nodes_per_host = 1; @@ -526,6 +525,45 @@ namespace eosio { }; // queued_buffer + /// monitors the status of blocks as to whether a block is accepted (sync'd) or + /// rejected. It groups consecutive rejected blocks in a (configurable) time + /// window (rbw) and maintains a metric of the number of consecutive rejected block + /// time windows (rbws). + class block_status_monitor { + private: + bool in_accepted_state_ {true}; ///< indicates of accepted(true) or rejected(false) state + fc::microseconds window_size_{2*1000}; ///< rbw time interval (2ms) + fc::time_point window_start_; ///< The start of the recent rbw (0 implies not started) + uint32_t events_{0}; ///< The number of consecutive rbws + const uint32_t max_consecutive_rejected_windows_{13}; + + public: + /// ctor + /// + /// @param[in] window_size The time, in microseconds, of the rejected block window + /// @param[in] max_rejected_windows The max consecutive number of rejected block windows + /// @note Copy ctor is not allowed + explicit block_status_monitor(fc::microseconds window_size = fc::microseconds(2*1000), + uint32_t max_rejected_windows = 13) : + window_size_(window_size) {} + block_status_monitor( const block_status_monitor& ) = delete; + block_status_monitor( block_status_monitor&& ) = delete; + ~block_status_monitor() = default; + /// reset to initial state + void reset(); + /// called when a block is accepted (sync_recv_block) + void accepted() { reset(); } + /// called when a block is rejected + void rejected(); + /// returns number of consecutive rbws + auto events() const { return events_; } + /// indicates if the max number of consecutive rbws has been reached or exceeded + bool max_events_violated() const { return events_ >= max_consecutive_rejected_windows_; } + /// assignment not allowed + block_status_monitor& operator=( const block_status_monitor& ) = delete; + block_status_monitor& operator=( block_status_monitor&& ) = delete; + }; + class connection : public std::enable_shared_from_this { public: explicit connection( string endpoint ); @@ -576,6 +614,7 @@ namespace eosio { std::atomic syncing{false}; uint16_t protocol_version = 0; uint16_t consecutive_rejected_blocks = 0; + block_status_monitor block_status_monitor_; std::atomic consecutive_immediate_connection_close = 0; std::mutex response_expected_timer_mtx; @@ -923,7 +962,7 @@ namespace eosio { self->flush_queues(); self->connecting = false; self->syncing = false; - self->consecutive_rejected_blocks = 0; + self->block_status_monitor_.reset(); ++self->consecutive_immediate_connection_close; bool has_last_req = false; { @@ -1351,6 +1390,31 @@ namespace eosio { sync_wait(); } + //----------------------------------------------------------- + void block_status_monitor::reset() { + in_accepted_state_ = true; + events_ = 0; + } + + void block_status_monitor::rejected() { + const auto now = fc::time_point::now(); + + // in rejected state + if(!in_accepted_state_) { + const auto elapsed = now - window_start_; + if( elapsed < window_size_ ) { + return; + } + ++events_; + window_start_ = now; + return; + } + + // switching to rejected state + in_accepted_state_ = false; + window_start_ = now; + events_ = 0; + } //----------------------------------------------------------- sync_manager::sync_manager( uint32_t req_span ) @@ -1743,9 +1807,10 @@ namespace eosio { // called from connection strand void sync_manager::rejected_block( const connection_ptr& c, uint32_t blk_num ) { - std::unique_lock g( sync_mtx ); - if( ++c->consecutive_rejected_blocks > def_max_consecutive_rejected_blocks ) { + c->block_status_monitor_.rejected(); + if( c->block_status_monitor_.max_events_violated()) { fc_wlog( logger, "block ${bn} not accepted from ${p}, closing connection", ("bn", blk_num)("p", c->peer_name()) ); + std::unique_lock g( sync_mtx ); sync_last_requested_num = 0; sync_source.reset(); g.unlock(); @@ -1779,7 +1844,7 @@ namespace eosio { c->close( false, true ); return; } - c->consecutive_rejected_blocks = 0; + c->block_status_monitor_.accepted(); sync_update_expected( c, blk_id, blk_num, blk_applied ); std::unique_lock g_sync( sync_mtx ); stages state = sync_state; From 09d1baa9ff86d6948f231b6e9e9d565cfb3be383 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Fri, 12 Feb 2021 16:34:33 -0500 Subject: [PATCH 38/52] Build: Don't check manifests at Docker Hub unless absolutely necessary --- .cicd/generate-base-images.sh | 75 ++++++++++++++++++----------------- 1 file changed, 39 insertions(+), 36 deletions(-) diff --git a/.cicd/generate-base-images.sh b/.cicd/generate-base-images.sh index e2c1f0337d8..2041c0d744c 100755 --- a/.cicd/generate-base-images.sh +++ b/.cicd/generate-base-images.sh @@ -5,55 +5,58 @@ set -eo pipefail # search for base image in docker registries echo '--- :docker: Build or Pull Base Image :minidisc:' echo "Looking for '$HASHED_IMAGE_TAG' container in our registries." -EXISTS_ALL='true' -EXISTS_DOCKER_HUB='false' -EXISTS_ECR='false' -for REGISTRY in ${CI_REGISTRIES[*]}; do - if [[ ! -z "$REGISTRY" ]]; then - MANIFEST_COMMAND="docker manifest inspect '$REGISTRY:$HASHED_IMAGE_TAG'" - echo "$ $MANIFEST_COMMAND" - set +e - eval $MANIFEST_COMMAND - MANIFEST_INSPECT_EXIT_STATUS="$?" - set -eo pipefail - if [[ "$MANIFEST_INSPECT_EXIT_STATUS" == '0' ]]; then - if [[ "$(echo "$REGISTRY" | grep -icP '[.]amazonaws[.]com/')" != '0' ]]; then - EXISTS_ECR='true' - elif [[ "$(echo "$REGISTRY" | grep -icP 'docker[.]io/')" != '0' ]]; then - EXISTS_DOCKER_HUB='true' - fi - else - EXISTS_ALL='false' - fi +export EXISTS_DOCKER_HUB='false' +export EXISTS_ECR='false' +MANIFEST_QUERY_REGISTRY="${MIRROR_REGISTRY:-$DOCKERHUB_CI_REGISTRY}" +MANIFEST_COMMAND="docker manifest inspect '$MANIFEST_QUERY_REGISTRY:$HASHED_IMAGE_TAG'" +echo "$ $MANIFEST_COMMAND" +set +e +eval $MANIFEST_COMMAND +MANIFEST_INSPECT_EXIT_STATUS="$?" +set -eo pipefail +if [[ "$MANIFEST_INSPECT_EXIT_STATUS" == '0' ]]; then + if [[ "$(echo "$REGISTRY" | grep -icP 'docker[.]io/')" != '0' ]]; then + export EXISTS_DOCKER_HUB='true' + else + export EXISTS_ECR='true' + fi +fi +# pull and copy as-necessary +if [[ "$EXISTS_ECR" == 'true' ]]; then + DOCKER_PULL_COMMAND="docker pull '$MANIFEST_QUERY_REGISTRY:$HASHED_IMAGE_TAG'" + echo "$ $DOCKER_PULL_COMMAND" + eval $DOCKER_PULL_COMMAND + # copy, if necessary + if [[ "$EXISTS_DOCKER_HUB" == 'false' ]]; then + # tag + DOCKER_TAG_COMMAND="docker tag '$MANIFEST_QUERY_REGISTRY:$HASHED_IMAGE_TAG' '$DOCKERHUB_CI_REGISTRY:$HASHED_IMAGE_TAG'" + echo "$ $DOCKER_TAG_COMMAND" + eval $DOCKER_TAG_COMMAND + # push + DOCKER_PUSH_COMMAND="docker push '$DOCKERHUB_CI_REGISTRY:$HASHED_IMAGE_TAG'" + echo "$ $DOCKER_PUSH_COMMAND" + eval $DOCKER_PUSH_COMMAND + export EXISTS_DOCKER_HUB='true' fi -done -# copy, if possible, since it is so much faster -if [[ "$EXISTS_ECR" == 'false' && "$EXISTS_DOCKER_HUB" == 'true' && "$OVERWRITE_BASE_IMAGE" != 'true' && ! -z "$MIRROR_REGISTRY" ]]; then - echo 'Attempting copy from Docker Hub to the mirror instead of a new base image build.' +elif [[ "$EXISTS_DOCKER_HUB" == 'true' && ! -z "$MIRROR_REGISTRY" ]]; then DOCKER_PULL_COMMAND="docker pull '$DOCKERHUB_CI_REGISTRY:$HASHED_IMAGE_TAG'" echo "$ $DOCKER_PULL_COMMAND" - set +e eval $DOCKER_PULL_COMMAND - DOCKER_PULL_EXIT_STATUS="$?" - set -eo pipefail - if [[ "$DOCKER_PULL_EXIT_STATUS" == '0' ]]; then - echo 'Pull from Docker Hub worked! Pushing to mirror.' + # copy, if necessary + if [[ "$EXISTS_DOCKER_HUB" == 'false' ]]; then # tag - DOCKER_TAG_COMMAND="docker tag '$DOCKERHUB_CI_REGISTRY:$HASHED_IMAGE_TAG' '$MIRROR_REGISTRY:$HASHED_IMAGE_TAG'" + DOCKER_TAG_COMMAND="docker tag '$DOCKERHUB_CI_REGISTRY:$HASHED_IMAGE_TAG' '$MANIFEST_QUERY_REGISTRY:$HASHED_IMAGE_TAG'" echo "$ $DOCKER_TAG_COMMAND" eval $DOCKER_TAG_COMMAND # push DOCKER_PUSH_COMMAND="docker push '$MIRROR_REGISTRY:$HASHED_IMAGE_TAG'" echo "$ $DOCKER_PUSH_COMMAND" eval $DOCKER_PUSH_COMMAND - EXISTS_ALL='true' - EXISTS_ECR='true' - else - echo 'Pull from Docker Hub failed, rebuilding base image from scratch.' + export EXISTS_ECR='true' fi fi # esplain yerself -if [[ "$EXISTS_ALL" == 'false' ]]; then +if [[ "$EXISTS_DOCKER_HUB" == 'false' && "$EXISTS_ECR" == 'false' ]]; then echo 'Building base image from scratch.' elif [[ "$OVERWRITE_BASE_IMAGE" == 'true' ]]; then echo "OVERWRITE_BASE_IMAGE is set to 'true', building from scratch and pushing to docker registries." @@ -61,7 +64,7 @@ elif [[ "$FORCE_BASE_IMAGE" == 'true' ]]; then echo "FORCE_BASE_IMAGE is set to 'true', building from scratch and NOT pushing to docker registries." fi # build, if neccessary -if [[ "$EXISTS_ALL" == 'false' || "$FORCE_BASE_IMAGE" == 'true' || "$OVERWRITE_BASE_IMAGE" == 'true' ]]; then # if we cannot pull the image, we build and push it first +if [[ ("$EXISTS_DOCKER_HUB" == 'false' && "$EXISTS_ECR" == 'false') || "$FORCE_BASE_IMAGE" == 'true' || "$OVERWRITE_BASE_IMAGE" == 'true' ]]; then # if we cannot pull the image, we build and push it first export DOCKER_BUILD_COMMAND="docker build --no-cache -t 'ci:$HASHED_IMAGE_TAG' -f '$CICD_DIR/platforms/$PLATFORM_TYPE/$IMAGE_TAG.dockerfile' ." echo "$ $DOCKER_BUILD_COMMAND" eval $DOCKER_BUILD_COMMAND From 47733e5c781cde4e6a6d6fa1954be34060554b65 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Fri, 12 Feb 2021 16:50:16 -0500 Subject: [PATCH 39/52] Simplify variables --- .cicd/generate-base-images.sh | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/.cicd/generate-base-images.sh b/.cicd/generate-base-images.sh index 2041c0d744c..6aa219eb349 100755 --- a/.cicd/generate-base-images.sh +++ b/.cicd/generate-base-images.sh @@ -7,8 +7,7 @@ echo '--- :docker: Build or Pull Base Image :minidisc:' echo "Looking for '$HASHED_IMAGE_TAG' container in our registries." export EXISTS_DOCKER_HUB='false' export EXISTS_ECR='false' -MANIFEST_QUERY_REGISTRY="${MIRROR_REGISTRY:-$DOCKERHUB_CI_REGISTRY}" -MANIFEST_COMMAND="docker manifest inspect '$MANIFEST_QUERY_REGISTRY:$HASHED_IMAGE_TAG'" +MANIFEST_COMMAND="docker manifest inspect '${MIRROR_REGISTRY:-$DOCKERHUB_CI_REGISTRY}:$HASHED_IMAGE_TAG'" echo "$ $MANIFEST_COMMAND" set +e eval $MANIFEST_COMMAND @@ -23,13 +22,13 @@ if [[ "$MANIFEST_INSPECT_EXIT_STATUS" == '0' ]]; then fi # pull and copy as-necessary if [[ "$EXISTS_ECR" == 'true' ]]; then - DOCKER_PULL_COMMAND="docker pull '$MANIFEST_QUERY_REGISTRY:$HASHED_IMAGE_TAG'" + DOCKER_PULL_COMMAND="docker pull '$MIRROR_REGISTRY:$HASHED_IMAGE_TAG'" echo "$ $DOCKER_PULL_COMMAND" eval $DOCKER_PULL_COMMAND # copy, if necessary if [[ "$EXISTS_DOCKER_HUB" == 'false' ]]; then # tag - DOCKER_TAG_COMMAND="docker tag '$MANIFEST_QUERY_REGISTRY:$HASHED_IMAGE_TAG' '$DOCKERHUB_CI_REGISTRY:$HASHED_IMAGE_TAG'" + DOCKER_TAG_COMMAND="docker tag '$MIRROR_REGISTRY:$HASHED_IMAGE_TAG' '$DOCKERHUB_CI_REGISTRY:$HASHED_IMAGE_TAG'" echo "$ $DOCKER_TAG_COMMAND" eval $DOCKER_TAG_COMMAND # push @@ -45,7 +44,7 @@ elif [[ "$EXISTS_DOCKER_HUB" == 'true' && ! -z "$MIRROR_REGISTRY" ]]; then # copy, if necessary if [[ "$EXISTS_DOCKER_HUB" == 'false' ]]; then # tag - DOCKER_TAG_COMMAND="docker tag '$DOCKERHUB_CI_REGISTRY:$HASHED_IMAGE_TAG' '$MANIFEST_QUERY_REGISTRY:$HASHED_IMAGE_TAG'" + DOCKER_TAG_COMMAND="docker tag '$DOCKERHUB_CI_REGISTRY:$HASHED_IMAGE_TAG' '$MIRROR_REGISTRY:$HASHED_IMAGE_TAG'" echo "$ $DOCKER_TAG_COMMAND" eval $DOCKER_TAG_COMMAND # push From 81eba82ba8c700fb543b974ff6f0a96bb31f8c35 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Fri, 12 Feb 2021 16:53:41 -0500 Subject: [PATCH 40/52] Bug fixes --- .cicd/generate-base-images.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.cicd/generate-base-images.sh b/.cicd/generate-base-images.sh index 6aa219eb349..747f2045340 100755 --- a/.cicd/generate-base-images.sh +++ b/.cicd/generate-base-images.sh @@ -21,7 +21,7 @@ if [[ "$MANIFEST_INSPECT_EXIT_STATUS" == '0' ]]; then fi fi # pull and copy as-necessary -if [[ "$EXISTS_ECR" == 'true' ]]; then +if [[ "$EXISTS_ECR" == 'true' && ! -z "$MIRROR_REGISTRY" ]]; then DOCKER_PULL_COMMAND="docker pull '$MIRROR_REGISTRY:$HASHED_IMAGE_TAG'" echo "$ $DOCKER_PULL_COMMAND" eval $DOCKER_PULL_COMMAND @@ -37,12 +37,12 @@ if [[ "$EXISTS_ECR" == 'true' ]]; then eval $DOCKER_PUSH_COMMAND export EXISTS_DOCKER_HUB='true' fi -elif [[ "$EXISTS_DOCKER_HUB" == 'true' && ! -z "$MIRROR_REGISTRY" ]]; then +elif [[ "$EXISTS_DOCKER_HUB" == 'true' ]]; then DOCKER_PULL_COMMAND="docker pull '$DOCKERHUB_CI_REGISTRY:$HASHED_IMAGE_TAG'" echo "$ $DOCKER_PULL_COMMAND" eval $DOCKER_PULL_COMMAND # copy, if necessary - if [[ "$EXISTS_DOCKER_HUB" == 'false' ]]; then + if [[ "$EXISTS_ECR" == 'false' && ! -z "$MIRROR_REGISTRY" ]]; then # tag DOCKER_TAG_COMMAND="docker tag '$DOCKERHUB_CI_REGISTRY:$HASHED_IMAGE_TAG' '$MIRROR_REGISTRY:$HASHED_IMAGE_TAG'" echo "$ $DOCKER_TAG_COMMAND" From a52fc1a40072b0bab324d195453ab317d32f82d2 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Fri, 12 Feb 2021 16:54:24 -0500 Subject: [PATCH 41/52] Generic names --- .cicd/generate-base-images.sh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.cicd/generate-base-images.sh b/.cicd/generate-base-images.sh index 747f2045340..b6731a51f6f 100755 --- a/.cicd/generate-base-images.sh +++ b/.cicd/generate-base-images.sh @@ -6,7 +6,7 @@ set -eo pipefail echo '--- :docker: Build or Pull Base Image :minidisc:' echo "Looking for '$HASHED_IMAGE_TAG' container in our registries." export EXISTS_DOCKER_HUB='false' -export EXISTS_ECR='false' +export EXISTS_MIRROR='false' MANIFEST_COMMAND="docker manifest inspect '${MIRROR_REGISTRY:-$DOCKERHUB_CI_REGISTRY}:$HASHED_IMAGE_TAG'" echo "$ $MANIFEST_COMMAND" set +e @@ -17,11 +17,11 @@ if [[ "$MANIFEST_INSPECT_EXIT_STATUS" == '0' ]]; then if [[ "$(echo "$REGISTRY" | grep -icP 'docker[.]io/')" != '0' ]]; then export EXISTS_DOCKER_HUB='true' else - export EXISTS_ECR='true' + export EXISTS_MIRROR='true' fi fi # pull and copy as-necessary -if [[ "$EXISTS_ECR" == 'true' && ! -z "$MIRROR_REGISTRY" ]]; then +if [[ "$EXISTS_MIRROR" == 'true' && ! -z "$MIRROR_REGISTRY" ]]; then DOCKER_PULL_COMMAND="docker pull '$MIRROR_REGISTRY:$HASHED_IMAGE_TAG'" echo "$ $DOCKER_PULL_COMMAND" eval $DOCKER_PULL_COMMAND @@ -42,7 +42,7 @@ elif [[ "$EXISTS_DOCKER_HUB" == 'true' ]]; then echo "$ $DOCKER_PULL_COMMAND" eval $DOCKER_PULL_COMMAND # copy, if necessary - if [[ "$EXISTS_ECR" == 'false' && ! -z "$MIRROR_REGISTRY" ]]; then + if [[ "$EXISTS_MIRROR" == 'false' && ! -z "$MIRROR_REGISTRY" ]]; then # tag DOCKER_TAG_COMMAND="docker tag '$DOCKERHUB_CI_REGISTRY:$HASHED_IMAGE_TAG' '$MIRROR_REGISTRY:$HASHED_IMAGE_TAG'" echo "$ $DOCKER_TAG_COMMAND" @@ -51,11 +51,11 @@ elif [[ "$EXISTS_DOCKER_HUB" == 'true' ]]; then DOCKER_PUSH_COMMAND="docker push '$MIRROR_REGISTRY:$HASHED_IMAGE_TAG'" echo "$ $DOCKER_PUSH_COMMAND" eval $DOCKER_PUSH_COMMAND - export EXISTS_ECR='true' + export EXISTS_MIRROR='true' fi fi # esplain yerself -if [[ "$EXISTS_DOCKER_HUB" == 'false' && "$EXISTS_ECR" == 'false' ]]; then +if [[ "$EXISTS_DOCKER_HUB" == 'false' && "$EXISTS_MIRROR" == 'false' ]]; then echo 'Building base image from scratch.' elif [[ "$OVERWRITE_BASE_IMAGE" == 'true' ]]; then echo "OVERWRITE_BASE_IMAGE is set to 'true', building from scratch and pushing to docker registries." @@ -63,7 +63,7 @@ elif [[ "$FORCE_BASE_IMAGE" == 'true' ]]; then echo "FORCE_BASE_IMAGE is set to 'true', building from scratch and NOT pushing to docker registries." fi # build, if neccessary -if [[ ("$EXISTS_DOCKER_HUB" == 'false' && "$EXISTS_ECR" == 'false') || "$FORCE_BASE_IMAGE" == 'true' || "$OVERWRITE_BASE_IMAGE" == 'true' ]]; then # if we cannot pull the image, we build and push it first +if [[ ("$EXISTS_DOCKER_HUB" == 'false' && "$EXISTS_MIRROR" == 'false') || "$FORCE_BASE_IMAGE" == 'true' || "$OVERWRITE_BASE_IMAGE" == 'true' ]]; then # if we cannot pull the image, we build and push it first export DOCKER_BUILD_COMMAND="docker build --no-cache -t 'ci:$HASHED_IMAGE_TAG' -f '$CICD_DIR/platforms/$PLATFORM_TYPE/$IMAGE_TAG.dockerfile' ." echo "$ $DOCKER_BUILD_COMMAND" eval $DOCKER_BUILD_COMMAND From f5af2df2a4c4c3a36588e4ab8c0775472ee65f0e Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Fri, 12 Feb 2021 17:12:53 -0500 Subject: [PATCH 42/52] Avoid hitting Docker Hub in docker tag/label step, where possible --- .cicd/docker-tag.sh | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/.cicd/docker-tag.sh b/.cicd/docker-tag.sh index 2e922d1a9a5..ea71d20bc20 100755 --- a/.cicd/docker-tag.sh +++ b/.cicd/docker-tag.sh @@ -9,23 +9,17 @@ echo '$ echo ${#CONTRACT_REGISTRIES[*]} # array length' echo ${#CONTRACT_REGISTRIES[*]} echo '$ echo ${CONTRACT_REGISTRIES[*]} # array' echo ${CONTRACT_REGISTRIES[*]} +export IMAGE="${MIRROR_REGISTRY:-$DOCKERHUB_CI_REGISTRY}:$PREFIX-$BUILDKITE_COMMIT-$PLATFORM_TYPE" # pull echo '+++ :arrow_down: Pulling Container(s)' -for REGISTRY in ${CONTRACT_REGISTRIES[*]}; do - if [[ ! -z "$REGISTRY" ]]; then - echo "Pulling from '$REGISTRY'." - IMAGE="$REGISTRY:$PREFIX-$BUILDKITE_COMMIT-$PLATFORM_TYPE" - DOCKER_PULL_COMMAND="docker pull '$IMAGE'" - echo "$ $DOCKER_PULL_COMMAND" - eval $DOCKER_PULL_COMMAND - fi -done +DOCKER_PULL_COMMAND="docker pull '$IMAGE'" +echo "$ $DOCKER_PULL_COMMAND" +eval $DOCKER_PULL_COMMAND # tag echo '+++ :label: Tagging Container(s)' for REGISTRY in ${CONTRACT_REGISTRIES[*]}; do if [[ ! -z "$REGISTRY" ]]; then echo "Tagging for registry $REGISTRY." - IMAGE="$REGISTRY:$PREFIX-$BUILDKITE_COMMIT-$PLATFORM_TYPE" DOCKER_TAG_COMMAND="docker tag '$IMAGE' '$REGISTRY:$PREFIX-$SANITIZED_BRANCH'" echo "$ $DOCKER_TAG_COMMAND" eval $DOCKER_TAG_COMMAND From e287137eaf744c11f31143986f49aa452bed8459 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Fri, 12 Feb 2021 18:13:25 -0500 Subject: [PATCH 43/52] Don't ever fail while cleaning up images --- .cicd/docker-tag.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.cicd/docker-tag.sh b/.cicd/docker-tag.sh index ea71d20bc20..87d09bf0f08 100755 --- a/.cicd/docker-tag.sh +++ b/.cicd/docker-tag.sh @@ -50,15 +50,15 @@ echo '--- :put_litter_in_its_place: Cleaning Up' for REGISTRY in ${CONTRACT_REGISTRIES[*]}; do if [[ ! -z "$REGISTRY" ]]; then echo "Cleaning up from $REGISTRY." - DOCKER_RMI_COMMAND="docker rmi '$REGISTRY:$PREFIX-$SANITIZED_BRANCH'" + DOCKER_RMI_COMMAND="docker rmi '$REGISTRY:$PREFIX-$SANITIZED_BRANCH' || :" echo "$ $DOCKER_RMI_COMMAND" eval $DOCKER_RMI_COMMAND if [[ ! -z "$BUILDKITE_TAG" && "$SANITIZED_BRANCH" != "$SANITIZED_TAG" ]]; then - DOCKER_RMI_COMMAND="docker rmi '$REGISTRY:$PREFIX-$SANITIZED_TAG'" + DOCKER_RMI_COMMAND="docker rmi '$REGISTRY:$PREFIX-$SANITIZED_TAG' || :" echo "$ $DOCKER_RMI_COMMAND" eval $DOCKER_RMI_COMMAND fi - DOCKER_RMI_COMMAND="docker rmi '$REGISTRY:$PREFIX-$BUILDKITE_COMMIT-$PLATFORM_TYPE'" + DOCKER_RMI_COMMAND="docker rmi '$REGISTRY:$PREFIX-$BUILDKITE_COMMIT-$PLATFORM_TYPE' || :" echo "$ $DOCKER_RMI_COMMAND" eval $DOCKER_RMI_COMMAND fi From a33e3ae173ad9dd7d390c651910996e305811baa Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Fri, 12 Feb 2021 18:14:14 -0500 Subject: [PATCH 44/52] Clean up container by commit without platform type, should it be present --- .cicd/docker-tag.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.cicd/docker-tag.sh b/.cicd/docker-tag.sh index 87d09bf0f08..85142e60b52 100755 --- a/.cicd/docker-tag.sh +++ b/.cicd/docker-tag.sh @@ -53,6 +53,9 @@ for REGISTRY in ${CONTRACT_REGISTRIES[*]}; do DOCKER_RMI_COMMAND="docker rmi '$REGISTRY:$PREFIX-$SANITIZED_BRANCH' || :" echo "$ $DOCKER_RMI_COMMAND" eval $DOCKER_RMI_COMMAND + DOCKER_RMI_COMMAND="docker rmi '$REGISTRY:$PREFIX-$BUILDKITE_COMMIT' || :" + echo "$ $DOCKER_RMI_COMMAND" + eval $DOCKER_RMI_COMMAND if [[ ! -z "$BUILDKITE_TAG" && "$SANITIZED_BRANCH" != "$SANITIZED_TAG" ]]; then DOCKER_RMI_COMMAND="docker rmi '$REGISTRY:$PREFIX-$SANITIZED_TAG' || :" echo "$ $DOCKER_RMI_COMMAND" From 63d31325e17e32521251be8f588cd2a24a6f22d3 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Fri, 12 Feb 2021 20:32:40 -0500 Subject: [PATCH 45/52] Support skipping public docker build step --- .cicd/generate-pipeline.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cicd/generate-pipeline.sh b/.cicd/generate-pipeline.sh index 67b549e2b22..ba9ed3e21b2 100755 --- a/.cicd/generate-pipeline.sh +++ b/.cicd/generate-pipeline.sh @@ -606,7 +606,7 @@ cat < Date: Wed, 24 Feb 2021 17:12:38 -0500 Subject: [PATCH 46/52] Pin cmake and boost versions for unpinned builds at this time. --- .cicd/platforms/unpinned/macos-10.14-unpinned.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.cicd/platforms/unpinned/macos-10.14-unpinned.sh b/.cicd/platforms/unpinned/macos-10.14-unpinned.sh index 44f32c7a65a..49487b34c0f 100755 --- a/.cicd/platforms/unpinned/macos-10.14-unpinned.sh +++ b/.cicd/platforms/unpinned/macos-10.14-unpinned.sh @@ -1,8 +1,12 @@ #!/bin/bash set -eo pipefail -VERSION=2 +VERSION=1 brew update -brew install git cmake python libtool libusb graphviz automake wget gmp llvm@7 pkgconfig doxygen openssl@1.1 jq boost || : +brew install git python libtool libusb graphviz automake wget gmp llvm@7 pkgconfig doxygen openssl@1.1 jq || : +curl -LO https://raw.githubusercontent.com/Homebrew/homebrew-core/0b9ee3127763e8ebce62599bae85f37ab1687622/Formula/boost.rb +brew install -f boost.rb +curl -LO https://raw.githubusercontent.com/Homebrew/homebrew-core/eddff4dd64c441c60df3badbe081e82dfb223cf9/Formula/cmake.rb +brew install -f cmake.rb # install mongoDB cd ~ curl -OL https://fastdl.mongodb.org/osx/mongodb-osx-ssl-x86_64-3.6.3.tgz From 61683f82e5e985d072982bc51d0d06126b47dcbe Mon Sep 17 00:00:00 2001 From: Scott Arnette Date: Wed, 24 Feb 2021 17:18:42 -0500 Subject: [PATCH 47/52] Boost appears to be fine locally. Try using known cmake version. --- .cicd/platforms/unpinned/macos-10.14-unpinned.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.cicd/platforms/unpinned/macos-10.14-unpinned.sh b/.cicd/platforms/unpinned/macos-10.14-unpinned.sh index 49487b34c0f..6ee448bb96a 100755 --- a/.cicd/platforms/unpinned/macos-10.14-unpinned.sh +++ b/.cicd/platforms/unpinned/macos-10.14-unpinned.sh @@ -2,9 +2,7 @@ set -eo pipefail VERSION=1 brew update -brew install git python libtool libusb graphviz automake wget gmp llvm@7 pkgconfig doxygen openssl@1.1 jq || : -curl -LO https://raw.githubusercontent.com/Homebrew/homebrew-core/0b9ee3127763e8ebce62599bae85f37ab1687622/Formula/boost.rb -brew install -f boost.rb +brew install git python libtool libusb graphviz automake wget gmp llvm@7 pkgconfig doxygen openssl@1.1 boost jq || : curl -LO https://raw.githubusercontent.com/Homebrew/homebrew-core/eddff4dd64c441c60df3badbe081e82dfb223cf9/Formula/cmake.rb brew install -f cmake.rb # install mongoDB From 1b12bd956d42a51ea0af8682007d2f75aec19f0c Mon Sep 17 00:00:00 2001 From: Scott Arnette Date: Thu, 25 Feb 2021 09:21:16 -0500 Subject: [PATCH 48/52] Maybe boost isn't fine. Try pinning boost version. --- .cicd/platforms/unpinned/macos-10.14-unpinned.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.cicd/platforms/unpinned/macos-10.14-unpinned.sh b/.cicd/platforms/unpinned/macos-10.14-unpinned.sh index 6ee448bb96a..49487b34c0f 100755 --- a/.cicd/platforms/unpinned/macos-10.14-unpinned.sh +++ b/.cicd/platforms/unpinned/macos-10.14-unpinned.sh @@ -2,7 +2,9 @@ set -eo pipefail VERSION=1 brew update -brew install git python libtool libusb graphviz automake wget gmp llvm@7 pkgconfig doxygen openssl@1.1 boost jq || : +brew install git python libtool libusb graphviz automake wget gmp llvm@7 pkgconfig doxygen openssl@1.1 jq || : +curl -LO https://raw.githubusercontent.com/Homebrew/homebrew-core/0b9ee3127763e8ebce62599bae85f37ab1687622/Formula/boost.rb +brew install -f boost.rb curl -LO https://raw.githubusercontent.com/Homebrew/homebrew-core/eddff4dd64c441c60df3badbe081e82dfb223cf9/Formula/cmake.rb brew install -f cmake.rb # install mongoDB From 29901dd651ef3ebe3ada54e5038cdda8d4d36cb8 Mon Sep 17 00:00:00 2001 From: Scott Arnette Date: Thu, 25 Feb 2021 10:15:59 -0500 Subject: [PATCH 49/52] Confirmed boost is the issue. --- .cicd/platforms/unpinned/macos-10.14-unpinned.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.cicd/platforms/unpinned/macos-10.14-unpinned.sh b/.cicd/platforms/unpinned/macos-10.14-unpinned.sh index 49487b34c0f..c2e5f65d79b 100755 --- a/.cicd/platforms/unpinned/macos-10.14-unpinned.sh +++ b/.cicd/platforms/unpinned/macos-10.14-unpinned.sh @@ -2,11 +2,9 @@ set -eo pipefail VERSION=1 brew update -brew install git python libtool libusb graphviz automake wget gmp llvm@7 pkgconfig doxygen openssl@1.1 jq || : +brew install git python libtool libusb graphviz automake wget gmp llvm@7 pkgconfig doxygen openssl@1.1 cmake jq || : curl -LO https://raw.githubusercontent.com/Homebrew/homebrew-core/0b9ee3127763e8ebce62599bae85f37ab1687622/Formula/boost.rb brew install -f boost.rb -curl -LO https://raw.githubusercontent.com/Homebrew/homebrew-core/eddff4dd64c441c60df3badbe081e82dfb223cf9/Formula/cmake.rb -brew install -f cmake.rb # install mongoDB cd ~ curl -OL https://fastdl.mongodb.org/osx/mongodb-osx-ssl-x86_64-3.6.3.tgz From bfa374219dc22257af75224c028efba0c684c6c4 Mon Sep 17 00:00:00 2001 From: Scott Arnette Date: Thu, 25 Feb 2021 10:18:11 -0500 Subject: [PATCH 50/52] Ensure the boost dependency is listed. --- .cicd/platforms/unpinned/macos-10.14-unpinned.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cicd/platforms/unpinned/macos-10.14-unpinned.sh b/.cicd/platforms/unpinned/macos-10.14-unpinned.sh index c2e5f65d79b..79992322746 100755 --- a/.cicd/platforms/unpinned/macos-10.14-unpinned.sh +++ b/.cicd/platforms/unpinned/macos-10.14-unpinned.sh @@ -2,7 +2,7 @@ set -eo pipefail VERSION=1 brew update -brew install git python libtool libusb graphviz automake wget gmp llvm@7 pkgconfig doxygen openssl@1.1 cmake jq || : +brew install git icu4c python libtool libusb graphviz automake wget gmp llvm@7 pkgconfig doxygen openssl@1.1 cmake jq || : curl -LO https://raw.githubusercontent.com/Homebrew/homebrew-core/0b9ee3127763e8ebce62599bae85f37ab1687622/Formula/boost.rb brew install -f boost.rb # install mongoDB From 941bf030654c99fbb2a360ede38b9576f1a3830c Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Thu, 25 Feb 2021 13:44:20 -0500 Subject: [PATCH 51/52] Bump version from v2.0.9 to v2.0.10 --- CMakeLists.txt | 2 +- README.md | 12 ++++++------ docs/00_install/00_install-prebuilt-binaries.md | 12 ++++++------ 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a268bcc4e10..d01fa392dd3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -25,7 +25,7 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 2) set(VERSION_MINOR 0) -set(VERSION_PATCH 9) +set(VERSION_PATCH 10) #set(VERSION_SUFFIX rc3) if(VERSION_SUFFIX) diff --git a/README.md b/README.md index bd8088a046f..77477dd6c12 100644 --- a/README.md +++ b/README.md @@ -74,13 +74,13 @@ brew remove eosio #### Ubuntu 18.04 Package Install ```sh -wget https://github.com/eosio/eos/releases/download/v2.0.9/eosio_2.0.9-1-ubuntu-18.04_amd64.deb -sudo apt install ./eosio_2.0.9-1-ubuntu-18.04_amd64.deb +wget https://github.com/eosio/eos/releases/download/v2.0.10/eosio_2.0.10-1-ubuntu-18.04_amd64.deb +sudo apt install ./eosio_2.0.10-1-ubuntu-18.04_amd64.deb ``` #### Ubuntu 16.04 Package Install ```sh -wget https://github.com/eosio/eos/releases/download/v2.0.9/eosio_2.0.9-1-ubuntu-16.04_amd64.deb -sudo apt install ./eosio_2.0.9-1-ubuntu-16.04_amd64.deb +wget https://github.com/eosio/eos/releases/download/v2.0.10/eosio_2.0.10-1-ubuntu-16.04_amd64.deb +sudo apt install ./eosio_2.0.10-1-ubuntu-16.04_amd64.deb ``` #### Ubuntu Package Uninstall ```sh @@ -91,8 +91,8 @@ sudo apt remove eosio #### RPM Package Install ```sh -wget https://github.com/eosio/eos/releases/download/v2.0.9/eosio-2.0.9-1.el7.x86_64.rpm -sudo yum install ./eosio-2.0.9-1.el7.x86_64.rpm +wget https://github.com/eosio/eos/releases/download/v2.0.10/eosio-2.0.10-1.el7.x86_64.rpm +sudo yum install ./eosio-2.0.10-1.el7.x86_64.rpm ``` #### RPM Package Uninstall ```sh diff --git a/docs/00_install/00_install-prebuilt-binaries.md b/docs/00_install/00_install-prebuilt-binaries.md index f014b88f1ee..d3d366961c7 100644 --- a/docs/00_install/00_install-prebuilt-binaries.md +++ b/docs/00_install/00_install-prebuilt-binaries.md @@ -25,13 +25,13 @@ brew remove eosio #### Ubuntu 18.04 Package Install ```sh -wget https://github.com/eosio/eos/releases/download/v2.0.9/eosio_2.0.9-1-ubuntu-18.04_amd64.deb -sudo apt install ./eosio_2.0.9-1-ubuntu-18.04_amd64.deb +wget https://github.com/eosio/eos/releases/download/v2.0.10/eosio_2.0.10-1-ubuntu-18.04_amd64.deb +sudo apt install ./eosio_2.0.10-1-ubuntu-18.04_amd64.deb ``` #### Ubuntu 16.04 Package Install ```sh -wget https://github.com/eosio/eos/releases/download/v2.0.9/eosio_2.0.9-1-ubuntu-16.04_amd64.deb -sudo apt install ./eosio_2.0.9-1-ubuntu-16.04_amd64.deb +wget https://github.com/eosio/eos/releases/download/v2.0.10/eosio_2.0.10-1-ubuntu-16.04_amd64.deb +sudo apt install ./eosio_2.0.10-1-ubuntu-16.04_amd64.deb ``` #### Ubuntu Package Uninstall ```sh @@ -42,8 +42,8 @@ sudo apt remove eosio #### RPM Package Install ```sh -wget https://github.com/eosio/eos/releases/download/v2.0.9/eosio-2.0.9-1.el7.x86_64.rpm -sudo yum install ./eosio-2.0.9-1.el7.x86_64.rpm +wget https://github.com/eosio/eos/releases/download/v2.0.10/eosio-2.0.10-1.el7.x86_64.rpm +sudo yum install ./eosio-2.0.10-1.el7.x86_64.rpm ``` #### RPM Package Uninstall ```sh From 9c254a9c3566768d0035ac58771a4c7bf7e86e56 Mon Sep 17 00:00:00 2001 From: johndebord Date: Thu, 25 Feb 2021 13:29:20 -0500 Subject: [PATCH 52/52] Consolidated Security Fixes for 2.0.10 - Fix issue with account query db that could result in incorrect data or hung processes - Implement a Subjective CPU billing system that helps P2P and API nodes better respond to extreme network congestion Co-Authored-By: Bart Wyatt bart.wyatt@block.one Co-Authored-By: Kevin Heifner heifnerk@objectcomputing.com --- libraries/chain/controller.cpp | 14 +- .../chain/include/eosio/chain/controller.hpp | 3 +- .../include/eosio/chain/resource_limits.hpp | 2 + .../eosio/chain/resource_limits_private.hpp | 65 ++++++ .../eosio/chain/transaction_context.hpp | 1 + libraries/chain/resource_limits.cpp | 7 + libraries/chain/transaction_context.cpp | 4 +- libraries/testing/tester.cpp | 6 +- plugins/chain_plugin/account_query_db.cpp | 81 +++++-- .../test/test_account_query_db.cpp | 111 +++++++--- plugins/producer_plugin/CMakeLists.txt | 3 + .../producer_plugin/subjective_billing.hpp | 205 ++++++++++++++++++ plugins/producer_plugin/producer_plugin.cpp | 115 +++++----- plugins/producer_plugin/test/CMakeLists.txt | 5 + .../test/test_subjective_billing.cpp | 165 ++++++++++++++ unittests/wasm_tests.cpp | 27 ++- 16 files changed, 682 insertions(+), 132 deletions(-) create mode 100644 plugins/producer_plugin/include/eosio/producer_plugin/subjective_billing.hpp create mode 100644 plugins/producer_plugin/test/CMakeLists.txt create mode 100644 plugins/producer_plugin/test/test_subjective_billing.cpp diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 0b742fd2369..807260e6d86 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1403,7 +1403,8 @@ struct controller_impl { transaction_trace_ptr push_transaction( const transaction_metadata_ptr& trx, fc::time_point deadline, uint32_t billed_cpu_time_us, - bool explicit_billed_cpu_time ) + bool explicit_billed_cpu_time, + uint32_t subjective_cpu_bill_us ) { EOS_ASSERT(deadline != fc::time_point(), transaction_exception, "deadline cannot be uninitialized"); @@ -1432,6 +1433,7 @@ struct controller_impl { trx_context.deadline = deadline; trx_context.explicit_billed_cpu_time = explicit_billed_cpu_time; trx_context.billed_cpu_time_us = billed_cpu_time_us; + trx_context.subjective_cpu_bill_us = subjective_cpu_bill_us; trace = trx_context.trace; try { if( trx->implicit ) { @@ -1504,6 +1506,7 @@ struct controller_impl { trace->error_code = controller::convert_exception_to_error_code( e ); trace->except = e; trace->except_ptr = std::current_exception(); + trace->elapsed = fc::time_point::now() - trx_context.start; } emit( self.accepted_transaction, trx ); @@ -1645,7 +1648,7 @@ struct controller_impl { in_trx_requiring_checks = old_value; }); in_trx_requiring_checks = true; - push_transaction( onbtrx, fc::time_point::maximum(), self.get_global_properties().configuration.min_transaction_cpu_usage, true ); + push_transaction( onbtrx, fc::time_point::maximum(), self.get_global_properties().configuration.min_transaction_cpu_usage, true, 0 ); } catch( const std::bad_alloc& e ) { elog( "on block transaction failed due to a std::bad_alloc" ); throw; @@ -1903,7 +1906,7 @@ struct controller_impl { : ( !!std::get<0>( trx_metas.at( packed_idx ) ) ? std::get<0>( trx_metas.at( packed_idx ) ) : std::get<1>( trx_metas.at( packed_idx ) ).get() ) ); - trace = push_transaction( trx_meta, fc::time_point::maximum(), receipt.cpu_usage_us, true ); + trace = push_transaction( trx_meta, fc::time_point::maximum(), receipt.cpu_usage_us, true, 0 ); ++packed_idx; } else if( receipt.trx.contains() ) { trace = push_scheduled_transaction( receipt.trx.get(), fc::time_point::maximum(), receipt.cpu_usage_us, true ); @@ -2688,11 +2691,12 @@ void controller::push_block( std::future& block_state_future, } transaction_trace_ptr controller::push_transaction( const transaction_metadata_ptr& trx, fc::time_point deadline, - uint32_t billed_cpu_time_us, bool explicit_billed_cpu_time ) { + uint32_t billed_cpu_time_us, bool explicit_billed_cpu_time, + uint32_t subjective_cpu_bill_us ) { validate_db_available_size(); EOS_ASSERT( get_read_mode() != db_read_mode::IRREVERSIBLE, transaction_type_exception, "push transaction not allowed in irreversible mode" ); EOS_ASSERT( trx && !trx->implicit && !trx->scheduled, transaction_type_exception, "Implicit/Scheduled transaction not allowed" ); - return my->push_transaction(trx, deadline, billed_cpu_time_us, explicit_billed_cpu_time ); + return my->push_transaction(trx, deadline, billed_cpu_time_us, explicit_billed_cpu_time, subjective_cpu_bill_us ); } transaction_trace_ptr controller::push_scheduled_transaction( const transaction_id_type& trxid, fc::time_point deadline, diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index e6bb848bd72..2d08e4239a4 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -144,7 +144,8 @@ namespace eosio { namespace chain { * */ transaction_trace_ptr push_transaction( const transaction_metadata_ptr& trx, fc::time_point deadline, - uint32_t billed_cpu_time_us, bool explicit_billed_cpu_time ); + uint32_t billed_cpu_time_us, bool explicit_billed_cpu_time, + uint32_t subjective_cpu_bill_us ); /** * Attempt to execute a specific transaction in our deferred trx database diff --git a/libraries/chain/include/eosio/chain/resource_limits.hpp b/libraries/chain/include/eosio/chain/resource_limits.hpp index 5e1f3ca8423..9ea8521147a 100644 --- a/libraries/chain/include/eosio/chain/resource_limits.hpp +++ b/libraries/chain/include/eosio/chain/resource_limits.hpp @@ -78,6 +78,8 @@ namespace eosio { namespace chain { namespace resource_limits { bool set_account_limits( const account_name& account, int64_t ram_bytes, int64_t net_weight, int64_t cpu_weight); void get_account_limits( const account_name& account, int64_t& ram_bytes, int64_t& net_weight, int64_t& cpu_weight) const; + bool is_unlimited_cpu( const account_name& account ) const; + void process_account_limit_updates(); void process_block_usage( uint32_t block_num ); diff --git a/libraries/chain/include/eosio/chain/resource_limits_private.hpp b/libraries/chain/include/eosio/chain/resource_limits_private.hpp index ccaed484ce5..e65de0fcdaf 100644 --- a/libraries/chain/include/eosio/chain/resource_limits_private.hpp +++ b/libraries/chain/include/eosio/chain/resource_limits_private.hpp @@ -120,6 +120,71 @@ namespace eosio { namespace chain { namespace resource_limits { } }; + /** + * This class accumulates a value that decays over quantums based on inputs + * The decay is linear between updates and exponential if the set of inputs has no gaps + * + * The value stored is Precision times the sum of the inputs. + */ + template + struct exponential_decay_accumulator + { + static_assert( Precision > 0, "Precision must be positive" ); + static constexpr uint64_t max_raw_value = std::numeric_limits::max() / Precision; + + exponential_decay_accumulator() + : last_ordinal(0) + , value_ex(0) + { + } + + uint32_t last_ordinal; ///< The ordinal of the last period which has contributed to the accumulator + uint64_t value_ex; ///< The current accumulated value pre-multiplied by Precision + + /** + * return the extended value at a current or future ordinal + */ + uint64_t value_ex_at( uint32_t ordinal, uint32_t window_size ) const { + if( last_ordinal < ordinal ) { + if( (uint64_t)last_ordinal + window_size > (uint64_t)ordinal ) { + const auto delta = ordinal - last_ordinal; // clearly 0 < delta < window_size + const auto decay = make_ratio( + (uint128_t)window_size - delta, + (uint128_t)window_size + ); + + return downgrade_cast((uint128_t)value_ex * decay); + } else { + return 0; + } + } else { + return value_ex; + } + } + + /** + * return the value at a current or future ordinal + */ + uint64_t value_at( uint32_t ordinal, uint32_t window_size ) const { + return integer_divide_ceil(value_ex_at(ordinal, window_size), Precision); + } + + void add( uint64_t units, uint32_t ordinal, uint32_t window_size /* must be positive */ ) + { + // check for some numerical limits before doing any state mutations + EOS_ASSERT(units <= max_raw_value, rate_limiting_state_inconsistent, "Usage exceeds maximum value representable after extending for precision"); + + uint128_t units_ex = (uint128_t)units * Precision; + if (last_ordinal < ordinal) { + value_ex = value_ex_at(ordinal, window_size); + last_ordinal = ordinal; + } + + // saturate the value + uint128_t new_value_ex = std::min(units_ex + (uint128_t)value_ex, std::numeric_limits::max()); + value_ex = downgrade_cast(new_value_ex); + } + }; } using usage_accumulator = impl::exponential_moving_average_accumulator<>; diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index a10c7ba9e9a..d9cdff7b5e4 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -129,6 +129,7 @@ namespace eosio { namespace chain { fc::time_point deadline = fc::time_point::maximum(); fc::microseconds leeway = fc::microseconds( config::default_subjective_cpu_leeway_us ); int64_t billed_cpu_time_us = 0; + uint32_t subjective_cpu_bill_us = 0; bool explicit_billed_cpu_time = false; transaction_checktime_timer transaction_timer; diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index e9ec714fabf..d834ac42420 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -285,6 +285,13 @@ void resource_limits_manager::get_account_limits( const account_name& account, i } } +bool resource_limits_manager::is_unlimited_cpu( const account_name& account ) const { + const auto* buo = _db.find( boost::make_tuple(false, account) ); + if (buo) { + return buo->cpu_weight == -1; + } + return false; +} void resource_limits_manager::process_account_limit_updates() { auto& multi_index = _db.get_mutable_index(); diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 99f5c57c17e..17d09d4a4dc 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -174,7 +174,9 @@ namespace eosio { namespace chain { if( !explicit_billed_cpu_time ) { // Fail early if amount of the previous speculative execution is within 10% of remaining account cpu available - int64_t validate_account_cpu_limit = account_cpu_limit - EOS_PERCENT( account_cpu_limit, 10 * config::percent_1 ); + int64_t validate_account_cpu_limit = account_cpu_limit - subjective_cpu_bill_us; + if( validate_account_cpu_limit > 0 ) + validate_account_cpu_limit -= EOS_PERCENT( validate_account_cpu_limit, 10 * config::percent_1 ); if( validate_account_cpu_limit < 0 ) validate_account_cpu_limit = 0; validate_account_cpu_usage( billed_cpu_time_us, validate_account_cpu_limit, true ); } diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index ca67bdb0580..ae88d56b737 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -329,7 +329,7 @@ namespace eosio { namespace testing { if( !skip_pending_trxs ) { for( auto itr = unapplied_transactions.begin(); itr != unapplied_transactions.end(); ) { - auto trace = control->push_transaction( itr->trx_meta, fc::time_point::maximum(), DEFAULT_BILLED_CPU_TIME_US, true ); + auto trace = control->push_transaction( itr->trx_meta, fc::time_point::maximum(), DEFAULT_BILLED_CPU_TIME_US, true, 0 ); traces.emplace_back( trace ); if(!no_throw && trace->except) { // this always throws an fc::exception, since the original exception is copied into an fc::exception @@ -549,7 +549,7 @@ namespace eosio { namespace testing { fc::microseconds::maximum() : fc::microseconds( deadline - fc::time_point::now() ); auto fut = transaction_metadata::start_recover_keys( ptrx, control->get_thread_pool(), control->get_chain_id(), time_limit ); - auto r = control->push_transaction( fut.get(), deadline, billed_cpu_time_us, billed_cpu_time_us > 0 ); + auto r = control->push_transaction( fut.get(), deadline, billed_cpu_time_us, billed_cpu_time_us > 0, 0 ); if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); if( r->except ) throw *r->except; return r; @@ -574,7 +574,7 @@ namespace eosio { namespace testing { fc::microseconds( deadline - fc::time_point::now() ); auto ptrx = std::make_shared( trx, c ); auto fut = transaction_metadata::start_recover_keys( ptrx, control->get_thread_pool(), control->get_chain_id(), time_limit ); - auto r = control->push_transaction( fut.get(), deadline, billed_cpu_time_us, billed_cpu_time_us > 0 ); + auto r = control->push_transaction( fut.get(), deadline, billed_cpu_time_us, billed_cpu_time_us > 0, 0 ); if (no_throw) return r; if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); if( r->except) throw *r->except; diff --git a/plugins/chain_plugin/account_query_db.cpp b/plugins/chain_plugin/account_query_db.cpp index 28d7b546902..eaea2214e77 100644 --- a/plugins/chain_plugin/account_query_db.cpp +++ b/plugins/chain_plugin/account_query_db.cpp @@ -22,13 +22,13 @@ using namespace boost::bimaps; namespace { /** * Structure to hold indirect reference to a `property_object` via {owner,name} as well as a non-standard - * index over `last_updated` for roll-back support + * index over `last_updated_height` (which is truncated at the LIB during initialization) for roll-back support */ struct permission_info { // indexed data chain::name owner; chain::name name; - fc::time_point last_updated; + uint32_t last_updated_height; // un-indexed data uint32_t threshold; @@ -37,10 +37,10 @@ namespace { }; struct by_owner_name; - struct by_last_updated; + struct by_last_updated_height; /** - * Multi-index providing fast lookup for {owner,name} as well as {last_updated} + * Multi-index providing fast lookup for {owner,name} as well as {last_updated_height} */ using permission_info_index_t = multi_index_container< permission_info, @@ -53,8 +53,8 @@ namespace { > >, ordered_non_unique< - tag, - member + tag, + member > > >; @@ -143,8 +143,19 @@ namespace eosio::chain_apis { auto start = fc::time_point::now(); const auto& index = controller.db().get_index().indices().get(); + // build a initial time to block number map + const auto lib_num = controller.last_irreversible_block_num(); + const auto head_num = controller.head_block_num(); + + for (uint32_t block_num = lib_num + 1; block_num <= head_num; block_num++) { + const auto block_p = controller.fetch_block_by_number(block_num); + EOS_ASSERT(block_p, chain::plugin_exception, "cannot fetch reversible block ${block_num}, required for account_db initialization", ("block_num", block_num)); + time_to_block_num.emplace(block_p->timestamp.to_time_point(), block_num); + } + for (const auto& po : index ) { - const auto& pi = permission_info_index.emplace( permission_info{ po.owner, po.name, po.last_updated, po.auth.threshold } ).first; + uint32_t last_updated_height = last_updated_time_to_height(po.last_updated); + const auto& pi = permission_info_index.emplace( permission_info{ po.owner, po.name, last_updated_height, po.auth.threshold } ).first; add_to_bimaps(*pi, po); } auto duration = fc::time_point::now() - start; @@ -185,14 +196,14 @@ namespace eosio::chain_apis { bool is_rollback_required( const chain::block_state_ptr& bsp ) const { std::shared_lock read_lock(rw_mutex); - const auto t = bsp->block->timestamp.to_time_point(); - const auto& index = permission_info_index.get(); + const auto bnum = bsp->block->block_num(); + const auto& index = permission_info_index.get(); if (index.empty()) { return false; } else { const auto& pi = (*index.rbegin()); - if (pi.last_updated < t) { + if (pi.last_updated_height < bnum) { return false; } } @@ -200,22 +211,42 @@ namespace eosio::chain_apis { return true; } + uint32_t last_updated_time_to_height( const fc::time_point& last_updated) { + const auto lib_num = controller.last_irreversible_block_num(); + const auto lib_time = controller.last_irreversible_block_time(); + + uint32_t last_updated_height = lib_num; + if (last_updated > lib_time) { + const auto iter = time_to_block_num.find(last_updated); + EOS_ASSERT(iter != time_to_block_num.end(), chain::plugin_exception, "invalid block time encountered in on-chain accounts ${time}", ("time", last_updated)); + last_updated_height = iter->second; + } + + return last_updated_height; + } + /** - * Given a time_point, remove all permissions that were last updated at or after that time_point - * this will effectively remove any updates that happened at or after that time point + * Given a block number, remove all permissions that were last updated at or after that block number + * this will effectively roll back the database to just before the incoming block * * For each removed entry, this will create a new entry if there exists an equivalent {owner, name} permission * at the HEAD state of the chain. * @param bsp - the block to rollback before */ void rollback_to_before( const chain::block_state_ptr& bsp ) { - const auto t = bsp->block->timestamp.to_time_point(); - auto& index = permission_info_index.get(); + const auto bnum = bsp->block->block_num(); + auto& index = permission_info_index.get(); const auto& permission_by_owner = controller.db().get_index().indices().get(); + // roll back time-map + auto time_iter = time_to_block_num.rbegin(); + while (time_iter != time_to_block_num.rend() && time_iter->second >= bnum) { + time_iter = decltype(time_iter){time_to_block_num.erase( std::next(time_iter).base() )}; + } + while (!index.empty()) { const auto& pi = (*index.rbegin()); - if (pi.last_updated < t) { + if (pi.last_updated_height < bnum) { break; } @@ -228,8 +259,11 @@ namespace eosio::chain_apis { index.erase(index.iterator_to(pi)); } else { const auto& po = *itr; - index.modify(index.iterator_to(pi), [&po](auto& mutable_pi) { - mutable_pi.last_updated = po.last_updated; + + uint32_t last_updated_height = po.last_updated == bsp->header.timestamp ? bsp->block_num : last_updated_time_to_height(po.last_updated); + + index.modify(index.iterator_to(pi), [&po, last_updated_height](auto& mutable_pi) { + mutable_pi.last_updated_height = last_updated_height; mutable_pi.threshold = po.auth.threshold; }); add_to_bimaps(pi, po); @@ -331,6 +365,11 @@ namespace eosio::chain_apis { std::unique_lock write_lock(rw_mutex); rollback_to_before(bsp); + + // insert this blocks time into the time map + time_to_block_num.emplace(bsp->header.timestamp, bsp->block_num); + + const auto bnum = bsp->block_num; auto& index = permission_info_index.get(); const auto& permission_by_owner = controller.db().get_index().indices().get(); @@ -342,11 +381,11 @@ namespace eosio::chain_apis { auto itr = index.find(key); if (itr == index.end()) { const auto& po = *source_itr; - itr = index.emplace(permission_info{ po.owner, po.name, po.last_updated, po.auth.threshold }).first; + itr = index.emplace(permission_info{ po.owner, po.name, bnum, po.auth.threshold }).first; } else { remove_from_bimaps(*itr); index.modify(itr, [&](auto& mutable_pi){ - mutable_pi.last_updated = source_itr->last_updated; + mutable_pi.last_updated_height = bnum; mutable_pi.threshold = source_itr->auth.threshold; }); } @@ -440,6 +479,10 @@ namespace eosio::chain_apis { cached_trace_map_t cached_trace_map; ///< temporary cache of uncommitted traces onblock_trace_t onblock_trace; ///< temporary cache of on_block trace + using time_map_t = std::map; + time_map_t time_to_block_num; + + using name_bimap_t = bimap>, multiset_of>; using key_bimap_t = bimap>, multiset_of>; diff --git a/plugins/chain_plugin/test/test_account_query_db.cpp b/plugins/chain_plugin/test/test_account_query_db.cpp index 36cceb67647..e0ef730f839 100644 --- a/plugins/chain_plugin/test/test_account_query_db.cpp +++ b/plugins/chain_plugin/test/test_account_query_db.cpp @@ -40,26 +40,26 @@ BOOST_AUTO_TEST_SUITE(account_query_db_tests) BOOST_FIXTURE_TEST_CASE(newaccount_test, TESTER) { try { - // instantiate an account_query_db - auto aq_db = account_query_db(*control); + // instantiate an account_query_db + auto aq_db = account_query_db(*control); //link aq_db to the `accepted_block` signal on the controller - auto c2 = control->accepted_block.connect([&](const block_state_ptr& blk) { + auto c2 = control->accepted_block.connect([&](const block_state_ptr& blk) { aq_db.commit_block( blk); - }); + }); - produce_blocks(10); + produce_blocks(10); - account_name tester_account = N(tester); - const auto trace_ptr = create_account(tester_account); - aq_db.cache_transaction_trace(trace_ptr); - produce_block(); + account_name tester_account = N(tester); + const auto trace_ptr = create_account(tester_account); + aq_db.cache_transaction_trace(trace_ptr); + produce_block(); - params pars; - pars.keys.emplace_back(get_public_key(tester_account, "owner")); - const auto results = aq_db.get_accounts_by_authorizers(pars); + params pars; + pars.keys.emplace_back(get_public_key(tester_account, "owner")); + const auto results = aq_db.get_accounts_by_authorizers(pars); - BOOST_TEST_REQUIRE(find_account_name(results, tester_account) == true); + BOOST_TEST_REQUIRE(find_account_name(results, tester_account) == true); } FC_LOG_AND_RETHROW() } @@ -75,25 +75,72 @@ BOOST_FIXTURE_TEST_CASE(updateauth_test, TESTER) { try { produce_blocks(10); - const auto& tester_account = N(tester); - const string role = "first"; - produce_block(); - create_account(tester_account); - - const auto trace_ptr = push_action(config::system_account_name, updateauth::get_name(), tester_account, fc::mutable_variant_object() - ("account", tester_account) - ("permission", N(role)) - ("parent", "active") - ("auth", authority(get_public_key(tester_account, role), 5)) - ); - aq_db.cache_transaction_trace(trace_ptr); - produce_block(); - - params pars; - pars.keys.emplace_back(get_public_key(tester_account, role)); - const auto results = aq_db.get_accounts_by_authorizers(pars); - - BOOST_TEST_REQUIRE(find_account_auth(results, tester_account, N(role)) == true); + const auto& tester_account = N(tester); + const string role = "first"; + produce_block(); + create_account(tester_account); + + const auto trace_ptr = push_action(config::system_account_name, updateauth::get_name(), tester_account, fc::mutable_variant_object() + ("account", tester_account) + ("permission", N(role)) + ("parent", "active") + ("auth", authority(get_public_key(tester_account, role), 5)) + ); + aq_db.cache_transaction_trace(trace_ptr); + produce_block(); + + params pars; + pars.keys.emplace_back(get_public_key(tester_account, role)); + const auto results = aq_db.get_accounts_by_authorizers(pars); + + BOOST_TEST_REQUIRE(find_account_auth(results, tester_account, N(role)) == true); + +} FC_LOG_AND_RETHROW() } + +BOOST_AUTO_TEST_CASE(future_fork_test) { try { + tester node_a(setup_policy::none); + tester node_b(setup_policy::none); + + // instantiate an account_query_db + auto aq_db = account_query_db(*node_a.control); + + //link aq_db to the `accepted_block` signal on the controller + auto c = node_a.control->accepted_block.connect([&](const block_state_ptr& blk) { + aq_db.commit_block( blk); + }); + + // create 10 blocks synced + for (int i = 0; i < 10; i++) { + node_b.push_block(node_a.produce_block()); + } + + // produce a block on node A with a new account and permission + const auto& tester_account = N(tester); + const string role = "first"; + node_a.create_account(tester_account); + + const auto trace_ptr = node_a.push_action(config::system_account_name, updateauth::get_name(), tester_account, fc::mutable_variant_object() + ("account", tester_account) + ("permission", N(role)) + ("parent", "active") + ("auth", authority(node_a.get_public_key(tester_account, role), 5)) + ); + aq_db.cache_transaction_trace(trace_ptr); + node_a.produce_block(); + + params pars; + pars.keys.emplace_back(node_a.get_public_key(tester_account, role)); + + const auto pre_results = aq_db.get_accounts_by_authorizers(pars); + BOOST_TEST_REQUIRE(find_account_auth(pre_results, tester_account, N(role)) == true); + + // have node B take over from head-1 and produce "future" blocks to overtake + node_a.push_block(node_b.produce_block(fc::milliseconds(config::block_interval_ms * 100))); + node_a.push_block(node_b.produce_block()); + + // ensure the account was forked away + const auto post_results = aq_db.get_accounts_by_authorizers(pars); + BOOST_TEST_REQUIRE(post_results.accounts.size() == 0); } FC_LOG_AND_RETHROW() } diff --git a/plugins/producer_plugin/CMakeLists.txt b/plugins/producer_plugin/CMakeLists.txt index fe161f8d913..4e5c23efe58 100644 --- a/plugins/producer_plugin/CMakeLists.txt +++ b/plugins/producer_plugin/CMakeLists.txt @@ -8,3 +8,6 @@ add_library( producer_plugin target_link_libraries( producer_plugin chain_plugin http_client_plugin appbase eosio_chain ) target_include_directories( producer_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/../chain_interface/include" ) + +add_subdirectory( test ) + diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/subjective_billing.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/subjective_billing.hpp new file mode 100644 index 00000000000..0be8de26983 --- /dev/null +++ b/plugins/producer_plugin/include/eosio/producer_plugin/subjective_billing.hpp @@ -0,0 +1,205 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace eosio { + +namespace bmi = boost::multi_index; +using chain::transaction_id_type; +using chain::account_name; +using chain::block_state_ptr; +using chain::packed_transaction; +namespace config = chain::config; + +class subjective_billing { +private: + + struct trx_cache_entry { + transaction_id_type trx_id; + account_name account; + uint32_t subjective_cpu_bill; + fc::time_point expiry; + }; + struct by_id; + struct by_expiry; + + using trx_cache_index = bmi::multi_index_container< + trx_cache_entry, + indexed_by< + bmi::hashed_unique, BOOST_MULTI_INDEX_MEMBER( trx_cache_entry, transaction_id_type, trx_id ) >, + ordered_non_unique, BOOST_MULTI_INDEX_MEMBER( trx_cache_entry, fc::time_point, expiry ) > + > + >; + + using decaying_accumulator = chain::resource_limits::impl::exponential_decay_accumulator<>; + + struct subjective_billing_info { + uint64_t pending_cpu_us; // tracked cpu us for transactions that may still succeed in a block + decaying_accumulator expired_accumulator; // accumulator used to account for transactions that have expired + + bool empty(uint32_t time_ordinal) { + return pending_cpu_us == 0 && expired_accumulator.value_at(time_ordinal, expired_accumulator_average_window) == 0; + } + }; + + using account_subjective_bill_cache = std::map; + using block_subjective_bill_cache = std::map; + + bool _disabled = false; + trx_cache_index _trx_cache_index; + account_subjective_bill_cache _account_subjective_bill_cache; + block_subjective_bill_cache _block_subjective_bill_cache; + +private: + uint32_t time_ordinal_for( const fc::time_point& t ) const { + auto ordinal = t.time_since_epoch().count() / (1000U * (uint64_t)subjective_time_interval_ms); + EOS_ASSERT(ordinal <= std::numeric_limits::max(), chain::tx_resource_exhaustion, "overflow of quantized time in subjective billing"); + return ordinal; + } + + void remove_subjective_billing( const trx_cache_entry& entry, uint32_t time_ordinal ) { + auto aitr = _account_subjective_bill_cache.find( entry.account ); + if( aitr != _account_subjective_bill_cache.end() ) { + aitr->second.pending_cpu_us -= entry.subjective_cpu_bill; + EOS_ASSERT( aitr->second.pending_cpu_us >= 0, chain::tx_resource_exhaustion, + "Logic error in subjective account billing ${a}", ("a", entry.account) ); + if( aitr->second.empty(time_ordinal) ) _account_subjective_bill_cache.erase( aitr ); + } + } + + void transition_to_expired( const trx_cache_entry& entry, uint32_t time_ordinal ) { + auto aitr = _account_subjective_bill_cache.find( entry.account ); + if( aitr != _account_subjective_bill_cache.end() ) { + aitr->second.pending_cpu_us -= entry.subjective_cpu_bill; + aitr->second.expired_accumulator.add(entry.subjective_cpu_bill, time_ordinal, expired_accumulator_average_window); + } + } + + void remove_subjective_billing( const block_state_ptr& bsp, uint32_t time_ordinal ) { + if( !_trx_cache_index.empty() ) { + for( const auto& receipt : bsp->block->transactions ) { + if( receipt.trx.contains() ) { + const auto& pt = receipt.trx.get(); + remove_subjective_billing( pt.id(), time_ordinal ); + } + } + } + } + +public: // public for tests + static constexpr uint32_t subjective_time_interval_ms = 5'000; + static constexpr uint32_t expired_accumulator_average_window = config::account_cpu_usage_average_window_ms / subjective_time_interval_ms; + + void remove_subjective_billing( const transaction_id_type& trx_id, uint32_t time_ordinal ) { + auto& idx = _trx_cache_index.get(); + auto itr = idx.find( trx_id ); + if( itr != idx.end() ) { + remove_subjective_billing( *itr, time_ordinal ); + idx.erase( itr ); + } + } + +public: + void disable() { _disabled = true; } + + /// @param in_pending_block pass true if pt's bill time is accounted for in the pending block + void subjective_bill( const transaction_id_type& id, const fc::time_point& expire, const account_name& first_auth, + const fc::microseconds& elapsed, bool in_pending_block ) + { + if( !_disabled ) { + uint32_t bill = std::max( 0, elapsed.count() ); + auto p = _trx_cache_index.emplace( + trx_cache_entry{id, + first_auth, + bill, + expire} ); + if( p.second ) { + _account_subjective_bill_cache[first_auth].pending_cpu_us += bill; + if( in_pending_block ) { + _block_subjective_bill_cache[first_auth] += bill; + } + } + } + } + + void subjective_bill_failure( const account_name& first_auth, const fc::microseconds& elapsed, const fc::time_point& now ) + { + if( !_disabled ) { + uint32_t bill = std::max( 0, elapsed.count() ); + const auto time_ordinal = time_ordinal_for(now); + _account_subjective_bill_cache[first_auth].expired_accumulator.add(bill, time_ordinal, expired_accumulator_average_window); + } + } + + uint32_t get_subjective_bill( const account_name& first_auth, const fc::time_point& now ) const { + if( _disabled ) return 0; + const auto time_ordinal = time_ordinal_for(now); + const subjective_billing_info* sub_bill_info = nullptr; + auto aitr = _account_subjective_bill_cache.find( first_auth ); + if( aitr != _account_subjective_bill_cache.end() ) { + sub_bill_info = &aitr->second; + } + uint64_t in_block_pending_cpu_us = 0; + auto bitr = _block_subjective_bill_cache.find( first_auth ); + if( bitr != _block_subjective_bill_cache.end() ) { + in_block_pending_cpu_us = bitr->second; + } + + if (sub_bill_info) { + EOS_ASSERT(sub_bill_info->pending_cpu_us >= in_block_pending_cpu_us, chain::tx_resource_exhaustion, "Logic error subjective billing ${a}", ("a", first_auth) ); + uint32_t sub_bill = sub_bill_info->pending_cpu_us - in_block_pending_cpu_us + sub_bill_info->expired_accumulator.value_at(time_ordinal, expired_accumulator_average_window ); + return sub_bill; + } else { + return 0; + } + } + + void abort_block() { + _block_subjective_bill_cache.clear(); + } + + void on_block( const block_state_ptr& bsp, const fc::time_point& now ) { + if( bsp == nullptr ) return; + const auto time_ordinal = time_ordinal_for(now); + remove_subjective_billing( bsp, time_ordinal ); + } + + bool remove_expired( fc::logger& log, const fc::time_point& pending_block_time, const fc::time_point& now, const fc::time_point& deadline ) { + bool exhausted = false; + auto& idx = _trx_cache_index.get(); + if( !idx.empty() ) { + const auto time_ordinal = time_ordinal_for(now); + const auto orig_count = _trx_cache_index.size(); + uint32_t num_expired = 0; + + while( !idx.empty() ) { + if( deadline <= fc::time_point::now() ) { + exhausted = true; + break; + } + auto b = idx.begin(); + if( b->expiry > pending_block_time ) break; + transition_to_expired( *b, time_ordinal ); + idx.erase( b ); + num_expired++; + } + + fc_dlog( log, "Processed ${n} subjective billed transactions, Expired ${expired}", + ("n", orig_count)( "expired", num_expired ) ); + } + return !exhausted; + } +}; + +} //eosio diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 5a738957cc8..b33e3264cd8 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -174,6 +175,7 @@ enum class pending_block_mode { speculating }; + class producer_plugin_impl : public std::enable_shared_from_this { public: producer_plugin_impl(boost::asio::io_service& io) @@ -234,6 +236,7 @@ class producer_plugin_impl : public std::enable_shared_from_this _accepted_block_connection; fc::optional _accepted_block_header_connection; @@ -277,6 +280,7 @@ class producer_plugin_impl : public std::enable_shared_from_this - auto publish_results_of(const Type &data, Channel& channel, F f) { - auto publish_success = fc::make_scoped_exit([&, this](){ - channel.publish(std::pair(nullptr, data)); - }); + void abort_block() { + auto& chain = chain_plug->chain(); - try { - auto trace = f(); - if (trace->except) { - publish_success.cancel(); - channel.publish(std::pair(trace->except->dynamic_copy_exception(), data)); - } - return trace; - } catch (const fc::exception& e) { - publish_success.cancel(); - channel.publish(std::pair(e.dynamic_copy_exception(), data)); - throw e; - } catch( const std::exception& e ) { - publish_success.cancel(); - auto fce = fc::exception( - FC_LOG_MESSAGE( info, "Caught std::exception: ${what}", ("what",e.what())), - fc::std_exception_code, - BOOST_CORE_TYPEID(e).name(), - e.what() - ); - channel.publish(std::pair(fce.dynamic_copy_exception(),data)); - throw fce; - } catch( ... ) { - publish_success.cancel(); - auto fce = fc::unhandled_exception( - FC_LOG_MESSAGE( info, "Caught unknown exception"), - std::current_exception() - ); - - channel.publish(std::pair(fce.dynamic_copy_exception(), data)); - throw fce; - } - }; + _unapplied_transactions.add_aborted( chain.abort_block() ); + _subjective_billing.abort_block(); + } bool on_incoming_block(const signed_block_ptr& block, const std::optional& block_id) { auto& chain = chain_plug->chain(); @@ -366,7 +338,7 @@ class producer_plugin_impl : public std::enable_shared_from_thisid(); fc::time_point bt = chain.is_building_block() ? chain.pending_block_time() : chain.head_block_time(); - if( fc::time_point( trx->packed_trx()->expiration()) < bt ) { + const fc::time_point expire = trx->packed_trx()->expiration(); + if( expire < bt ) { send_response( std::static_pointer_cast( std::make_shared( FC_LOG_MESSAGE( error, "expired transaction ${id}, expiration ${e}, block time ${bt}", - ("id", id)("e", trx->packed_trx()->expiration())( "bt", bt ))))); + ("id", id)("e", expire)( "bt", bt ))))); return true; } @@ -565,7 +538,12 @@ class producer_plugin_impl : public std::enable_shared_from_thisbilled_cpu_time_us, false ); + auto first_auth = trx->packed_trx()->get_transaction().first_authorizer(); + uint32_t sub_bill = 0; + if( _pending_block_mode != pending_block_mode::producing) + sub_bill = _subjective_billing.get_subjective_bill( first_auth, fc::time_point::now() ); + + auto trace = chain.push_transaction( trx, deadline, trx->billed_cpu_time_us, false, sub_bill ); if( trace->except ) { if( exception_is_exhausted( *trace->except, deadline_is_subjective )) { _pending_incoming_transactions.add( trx, persist_until_expired, next ); @@ -578,9 +556,9 @@ class producer_plugin_impl : public std::enable_shared_from_thisid())); } - if( !exhausted ) - exhausted = block_is_exhausted(); + exhausted = block_is_exhausted(); } else { + _subjective_billing.subjective_bill_failure( first_auth, trace->elapsed, fc::time_point::now() ); auto e_ptr = trace->except->dynamic_copy_exception(); send_response( e_ptr ); } @@ -588,7 +566,12 @@ class producer_plugin_impl : public std::enable_shared_from_thisid(), expire, first_auth, trace->elapsed, + chain.get_read_mode() == chain::db_read_mode::SPECULATIVE ); } send_response( trace ); } @@ -730,6 +713,8 @@ void producer_plugin::set_program_options( "Maximum size (in MiB) of the incoming transaction queue. Exceeding this value will subjectively drop transaction with resource exhaustion.") ("disable-api-persisted-trx", bpo::bool_switch()->default_value(false), "Disable the re-apply of API transactions.") + ("disable-subjective-billing", bpo::bool_switch()->default_value(false), + "Disable subjective billing.") ("producer-threads", bpo::value()->default_value(config::default_controller_thread_pool_size), "Number of worker threads in producer thread pool") ("snapshots-dir", bpo::value()->default_value("snapshots"), @@ -914,6 +899,7 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ my->_incoming_defer_ratio = options.at("incoming-defer-ratio").as(); my->_disable_persist_until_expired = options.at("disable-api-persisted-trx").as(); + if( options.at("disable-subjective-billing").as() ) my->_subjective_billing.disable(); auto thread_pool_size = options.at( "producer-threads" ).as(); EOS_ASSERT( thread_pool_size > 0, plugin_config_exception, @@ -1055,8 +1041,7 @@ void producer_plugin::resume() { // re-evaluate that now // if (my->_pending_block_mode == pending_block_mode::speculating) { - chain::controller& chain = my->chain_plug->chain(); - my->_unapplied_transactions.add_aborted( chain.abort_block() ); + my->abort_block(); fc_ilog(_log, "Producer resumed. Scheduling production."); my->schedule_production_loop(); } else { @@ -1098,7 +1083,7 @@ void producer_plugin::update_runtime_options(const runtime_options& options) { } if (check_speculating && my->_pending_block_mode == pending_block_mode::speculating) { - my->_unapplied_transactions.add_aborted( chain.abort_block() ); + my->abort_block(); my->schedule_production_loop(); } @@ -1182,7 +1167,7 @@ producer_plugin::integrity_hash_information producer_plugin::get_integrity_hash( if (chain.is_building_block()) { // abort the pending block - my->_unapplied_transactions.add_aborted( chain.abort_block() ); + my->abort_block(); } else { reschedule.cancel(); } @@ -1211,7 +1196,7 @@ void producer_plugin::create_snapshot(producer_plugin::next_function_unapplied_transactions.add_aborted( chain.abort_block() ); + my->abort_block(); } else { reschedule.cancel(); } @@ -1567,7 +1552,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { blocks_to_confirm = (uint16_t)(std::min(blocks_to_confirm, (uint32_t)(hbs->block_num - hbs->dpos_irreversible_blocknum))); } - _unapplied_transactions.add_aborted( chain.abort_block() ); + abort_block(); auto features_to_activate = chain.get_preactivated_protocol_features(); if( _pending_block_mode == pending_block_mode::producing && _protocol_features_to_activate.size() > 0 ) { @@ -1621,6 +1606,8 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { return start_block_result::exhausted; if( !remove_expired_blacklisted_trxs( preprocess_deadline ) ) return start_block_result::exhausted; + if( !_subjective_billing.remove_expired( _log, chain.pending_block_time(), fc::time_point::now(), preprocess_deadline ) ) + return start_block_result::exhausted; // limit execution of pending incoming to once per block size_t pending_incoming_process_limit = _pending_incoming_transactions.size(); @@ -1818,6 +1805,7 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin if( !_unapplied_transactions.empty() ) { account_failures account_fails; chain::controller& chain = chain_plug->chain(); + const auto& rl = chain.get_resource_limits_manager(); int num_applied = 0, num_failed = 0, num_processed = 0; auto unapplied_trxs_size = _unapplied_transactions.size(); auto itr = (_pending_block_mode == pending_block_mode::producing) ? @@ -1835,14 +1823,16 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin try { auto start = fc::time_point::now(); auto trx_deadline = start + fc::milliseconds( _max_transaction_time_ms ); - if( account_fails.failure_limit( trx->packed_trx()->get_transaction().first_authorizer() ) ) { + + auto first_auth = trx->packed_trx()->get_transaction().first_authorizer(); + if( account_fails.failure_limit( first_auth ) ) { ++num_failed; itr = _unapplied_transactions.erase( itr ); continue; } auto prev_billed_cpu_time_us = trx->billed_cpu_time_us; - if( prev_billed_cpu_time_us > 0 ) { + if( prev_billed_cpu_time_us > 0 && !rl.is_unlimited_cpu( first_auth )) { auto prev_billed_plus100 = prev_billed_cpu_time_us + EOS_PERCENT( prev_billed_cpu_time_us, 100 * config::percent_1 ); auto trx_dl = start + fc::microseconds( prev_billed_plus100 ); if( trx_dl < trx_deadline ) trx_deadline = trx_dl; @@ -1853,8 +1843,10 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin deadline_is_subjective = true; trx_deadline = deadline; } + // no subjective billing since we are producing or processing persisted trxs + const uint32_t sub_bill = 0; - auto trace = chain.push_transaction( trx, trx_deadline, prev_billed_cpu_time_us, false ); + auto trace = chain.push_transaction( trx, trx_deadline, prev_billed_cpu_time_us, false, sub_bill ); if( trace->except ) { if( exception_is_exhausted( *trace->except, deadline_is_subjective ) ) { if( block_is_exhausted() ) { @@ -1864,16 +1856,22 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin } } else { auto failure_code = trace->except->code(); - // this failed our configured maximum transaction time, we don't want to replay it - fc_dlog( _log, "Failed ${c} trx, prev billed: ${p}us, ran: ${r}us, id: ${id}", - ("c", trace->except->code())("p", prev_billed_cpu_time_us) - ("r", fc::time_point::now() - start)("id", trx->id()) ); - account_fails.add( trx->packed_trx()->get_transaction().first_authorizer(), failure_code ); + if( failure_code != tx_duplicate::code_value ) { + // this failed our configured maximum transaction time, we don't want to replay it + fc_dlog( _log, "Failed ${c} trx, prev billed: ${p}us, ran: ${r}us, id: ${id}", + ("c", trace->except->code())("p", prev_billed_cpu_time_us) + ("r", fc::time_point::now() - start)("id", trx->id()) ); + account_fails.add( first_auth, failure_code ); + _subjective_billing.subjective_bill_failure( first_auth, trace->elapsed, fc::time_point::now() ); + } ++num_failed; itr = _unapplied_transactions.erase( itr ); continue; } } else { + // if db_read_mode SPECULATIVE then trx is in the pending block and not immediately reverted + _subjective_billing.subjective_bill( trx->id(), trx->packed_trx()->expiration(), first_auth, trace->elapsed, + chain.get_read_mode() == chain::db_read_mode::SPECULATIVE ); ++num_applied; itr = _unapplied_transactions.erase( itr ); continue; @@ -2157,8 +2155,7 @@ bool producer_plugin_impl::maybe_produce_block() { } LOG_AND_DROP(); fc_dlog(_log, "Aborting block due to produce_block error"); - chain::controller& chain = chain_plug->chain(); - _unapplied_transactions.add_aborted( chain.abort_block() ); + abort_block(); return false; } diff --git a/plugins/producer_plugin/test/CMakeLists.txt b/plugins/producer_plugin/test/CMakeLists.txt new file mode 100644 index 00000000000..689dca3981f --- /dev/null +++ b/plugins/producer_plugin/test/CMakeLists.txt @@ -0,0 +1,5 @@ +add_executable( test_subjective_billing test_subjective_billing.cpp ) +target_link_libraries( test_subjective_billing producer_plugin eosio_testing ) + +add_test(NAME test_subjective_billing COMMAND plugins/producer_plugin/test/test_subjective_billing WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) + diff --git a/plugins/producer_plugin/test/test_subjective_billing.cpp b/plugins/producer_plugin/test/test_subjective_billing.cpp new file mode 100644 index 00000000000..dfccb54973d --- /dev/null +++ b/plugins/producer_plugin/test/test_subjective_billing.cpp @@ -0,0 +1,165 @@ +#define BOOST_TEST_MODULE subjective_billing +#include + +#include + +#include + +namespace { + +using namespace eosio; +using namespace eosio::chain; + +BOOST_AUTO_TEST_SUITE( subjective_billing_test ) + +BOOST_AUTO_TEST_CASE( subjective_bill_test ) { + + fc::logger log; + + transaction_id_type id1 = sha256::hash( "1" ); + transaction_id_type id2 = sha256::hash( "2" ); + transaction_id_type id3 = sha256::hash( "3" ); + transaction_id_type id4 = sha256::hash( "4" ); + transaction_id_type id5 = sha256::hash( "5" ); + transaction_id_type id6 = sha256::hash( "6" ); + account_name a = N("a"); + account_name b = N("b"); + account_name c = N("c"); + + const auto now = time_point::now(); + const auto halftime = now + fc::milliseconds(subjective_billing::expired_accumulator_average_window * subjective_billing::subjective_time_interval_ms / 2); + const auto endtime = now + fc::milliseconds(subjective_billing::expired_accumulator_average_window * subjective_billing::subjective_time_interval_ms); + + + { // Failed transactions remain until expired in subjective billing. + subjective_billing sub_bill; + + sub_bill.subjective_bill( id1, now, a, fc::microseconds( 13 ), false ); + sub_bill.subjective_bill( id2, now, a, fc::microseconds( 11 ), false ); + sub_bill.subjective_bill( id3, now, b, fc::microseconds( 9 ), false ); + + BOOST_CHECK_EQUAL( 13+11, sub_bill.get_subjective_bill(a, now) ); + BOOST_CHECK_EQUAL( 9, sub_bill.get_subjective_bill(b, now) ); + + sub_bill.on_block({}, now); + sub_bill.abort_block(); // they all failed so nothing in aborted block + + BOOST_CHECK_EQUAL( 13+11, sub_bill.get_subjective_bill(a, now) ); + BOOST_CHECK_EQUAL( 9, sub_bill.get_subjective_bill(b, now) ); + + // expires transactions but leaves them in the decay at full value + sub_bill.remove_expired( log, now + fc::microseconds(1), now, fc::time_point::maximum() ); + + BOOST_CHECK_EQUAL( 13+11, sub_bill.get_subjective_bill(a, now) ); + BOOST_CHECK_EQUAL( 9, sub_bill.get_subjective_bill(b, now) ); + BOOST_CHECK_EQUAL( 0, sub_bill.get_subjective_bill(c, now) ); + + // ensure that the value decays away at the window + BOOST_CHECK_EQUAL( 0, sub_bill.get_subjective_bill(a, endtime) ); + BOOST_CHECK_EQUAL( 0, sub_bill.get_subjective_bill(b, endtime) ); + BOOST_CHECK_EQUAL( 0, sub_bill.get_subjective_bill(c, endtime) ); + } + { // db_read_mode HEAD mode, so transactions are immediately reverted + subjective_billing sub_bill; + + sub_bill.subjective_bill( id1, now, a, fc::microseconds( 23 ), false ); + sub_bill.subjective_bill( id2, now, a, fc::microseconds( 19 ), false ); + sub_bill.subjective_bill( id3, now, b, fc::microseconds( 7 ), false ); + + BOOST_CHECK_EQUAL( 23+19, sub_bill.get_subjective_bill(a, now) ); + BOOST_CHECK_EQUAL( 7, sub_bill.get_subjective_bill(b, now) ); + + sub_bill.on_block({}, now); // have not seen any of the transactions come back yet + sub_bill.abort_block(); + + BOOST_CHECK_EQUAL( 23+19, sub_bill.get_subjective_bill(a, now) ); + BOOST_CHECK_EQUAL( 7, sub_bill.get_subjective_bill(b, now) ); + + sub_bill.on_block({}, now); + sub_bill.remove_subjective_billing( id1, 0 ); // simulate seeing id1 come back in block (this is what on_block would do) + sub_bill.abort_block(); + + BOOST_CHECK_EQUAL( 19, sub_bill.get_subjective_bill(a, now) ); + BOOST_CHECK_EQUAL( 7, sub_bill.get_subjective_bill(b, now) ); + } + { // db_read_mode SPECULATIVE mode, so transactions are in pending block until aborted + subjective_billing sub_bill; + + sub_bill.subjective_bill( id1, now, a, fc::microseconds( 23 ), true ); + sub_bill.subjective_bill( id2, now, a, fc::microseconds( 19 ), false ); // trx outside of block + sub_bill.subjective_bill( id3, now, a, fc::microseconds( 55 ), true ); + sub_bill.subjective_bill( id4, now, b, fc::microseconds( 3 ), true ); + sub_bill.subjective_bill( id5, now, b, fc::microseconds( 7 ), true ); + sub_bill.subjective_bill( id6, now, a, fc::microseconds( 11 ), false ); // trx outside of block + + BOOST_CHECK_EQUAL( 19+11, sub_bill.get_subjective_bill(a, now) ); // should not include what is in the pending block + BOOST_CHECK_EQUAL( 0, sub_bill.get_subjective_bill(b, now) ); // should not include what is in the pending block + BOOST_CHECK_EQUAL( 0, sub_bill.get_subjective_bill(c, now) ); + + sub_bill.on_block({}, now); // have not seen any of the transactions come back yet + sub_bill.abort_block(); // aborts the pending block, so subjective billing needs to include the reverted trxs + + BOOST_CHECK_EQUAL( 23+19+55+11, sub_bill.get_subjective_bill(a, now) ); + BOOST_CHECK_EQUAL( 3+7, sub_bill.get_subjective_bill(b, now) ); + + sub_bill.on_block({}, now); + sub_bill.remove_subjective_billing( id3, 0 ); // simulate seeing id3 come back in block (this is what on_block would do) + sub_bill.remove_subjective_billing( id4, 0 ); // simulate seeing id4 come back in block (this is what on_block would do) + sub_bill.abort_block(); + + BOOST_CHECK_EQUAL( 23+19+11, sub_bill.get_subjective_bill(a, now) ); + BOOST_CHECK_EQUAL( 7, sub_bill.get_subjective_bill(b, now) ); + } + { // failed handling logic, decay with repeated failures should be exponential, single failures should be linear + subjective_billing sub_bill; + + sub_bill.subjective_bill_failure(a, fc::microseconds(1024), now); + sub_bill.subjective_bill_failure(b, fc::microseconds(1024), now); + BOOST_CHECK_EQUAL( 1024, sub_bill.get_subjective_bill(a, now) ); + BOOST_CHECK_EQUAL( 1024, sub_bill.get_subjective_bill(b, now) ); + + sub_bill.subjective_bill_failure(a, fc::microseconds(1024), halftime); + BOOST_CHECK_EQUAL( 512 + 1024, sub_bill.get_subjective_bill(a, halftime) ); + BOOST_CHECK_EQUAL( 512, sub_bill.get_subjective_bill(b, halftime) ); + + sub_bill.subjective_bill_failure(a, fc::microseconds(1024), endtime); + BOOST_CHECK_EQUAL( 256 + 512 + 1024, sub_bill.get_subjective_bill(a, endtime) ); + BOOST_CHECK_EQUAL( 0, sub_bill.get_subjective_bill(b, endtime) ); + } + + { // expired handling logic, full billing until expiration then failed/decay logic + subjective_billing sub_bill; + constexpr uint32_t window_size = subjective_billing::expired_accumulator_average_window; + + sub_bill.subjective_bill( id1, now, a, fc::microseconds( 1024 ), false ); + sub_bill.subjective_bill( id2, now + fc::microseconds(1), a, fc::microseconds( 1024 ), false ); + sub_bill.subjective_bill( id3, now, b, fc::microseconds( 1024 ), false ); + BOOST_CHECK_EQUAL( 1024 + 1024, sub_bill.get_subjective_bill(a, now) ); + BOOST_CHECK_EQUAL( 1024, sub_bill.get_subjective_bill(b, now) ); + + sub_bill.remove_expired( log, now, now, fc::time_point::maximum() ); + BOOST_CHECK_EQUAL( 1024 + 1024, sub_bill.get_subjective_bill(a, now) ); + BOOST_CHECK_EQUAL( 1024, sub_bill.get_subjective_bill(b, now) ); + + BOOST_CHECK_EQUAL( 512 + 1024, sub_bill.get_subjective_bill(a, halftime) ); + BOOST_CHECK_EQUAL( 512, sub_bill.get_subjective_bill(b, halftime) ); + + BOOST_CHECK_EQUAL( 1024, sub_bill.get_subjective_bill(a, endtime) ); + BOOST_CHECK_EQUAL( 0, sub_bill.get_subjective_bill(b, endtime) ); + + sub_bill.remove_expired( log, now + fc::microseconds(1), now, fc::time_point::maximum() ); + BOOST_CHECK_EQUAL( 1024 + 1024, sub_bill.get_subjective_bill(a, now) ); + BOOST_CHECK_EQUAL( 1024, sub_bill.get_subjective_bill(b, now) ); + + BOOST_CHECK_EQUAL( 512 + 512, sub_bill.get_subjective_bill(a, halftime) ); + BOOST_CHECK_EQUAL( 512, sub_bill.get_subjective_bill(b, halftime) ); + + BOOST_CHECK_EQUAL( 0, sub_bill.get_subjective_bill(a, endtime) ); + BOOST_CHECK_EQUAL( 0, sub_bill.get_subjective_bill(b, endtime) ); + } + +} + +BOOST_AUTO_TEST_SUITE_END() + +} diff --git a/unittests/wasm_tests.cpp b/unittests/wasm_tests.cpp index b36b0bc05a4..655971bd64d 100644 --- a/unittests/wasm_tests.cpp +++ b/unittests/wasm_tests.cpp @@ -1921,8 +1921,8 @@ BOOST_AUTO_TEST_CASE( billed_cpu_test ) try { }; auto push_trx = [&]( const transaction_metadata_ptr& trx, fc::time_point deadline, - uint32_t billed_cpu_time_us, bool explicit_billed_cpu_time ) { - auto r = chain.control->push_transaction( trx, deadline, billed_cpu_time_us, explicit_billed_cpu_time ); + uint32_t billed_cpu_time_us, bool explicit_billed_cpu_time, uint32_t subjective_cpu_bill_us ) { + auto r = chain.control->push_transaction( trx, deadline, billed_cpu_time_us, explicit_billed_cpu_time, subjective_cpu_bill_us ); if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); if( r->except ) throw *r->except; return r; @@ -1930,7 +1930,7 @@ BOOST_AUTO_TEST_CASE( billed_cpu_test ) try { auto ptrx = create_trx(0); // no limits, just verifying trx works - push_trx( ptrx, fc::time_point::maximum(), 0, false ); // non-explicit billing + push_trx( ptrx, fc::time_point::maximum(), 0, false, 0 ); // non-explicit billing // setup account acc with large limits chain.push_action( config::system_account_name, N(setalimits), config::system_account_name, fc::mutable_variant_object() @@ -1956,7 +1956,7 @@ BOOST_AUTO_TEST_CASE( billed_cpu_test ) try { ptrx = create_trx(0); BOOST_CHECK_LT( max_cpu_time_us, cpu_limit ); // max_cpu_time_us has to be less than cpu_limit to actually test max and not account // indicate explicit billing at transaction max, max_cpu_time_us has to be greater than account cpu time - push_trx( ptrx, fc::time_point::maximum(), max_cpu_time_us, true ); + push_trx( ptrx, fc::time_point::maximum(), max_cpu_time_us, true, 0 ); chain.produce_block(); cpu_limit = mgr.get_account_cpu_limit(acc).first; @@ -1965,14 +1965,14 @@ BOOST_AUTO_TEST_CASE( billed_cpu_test ) try { ptrx = create_trx(0); BOOST_CHECK_LT( max_cpu_time_us + 1, cpu_limit ); // max_cpu_time_us+1 has to be less than cpu_limit to actually test max and not account // indicate explicit billing at max + 1 - BOOST_CHECK_EXCEPTION( push_trx( ptrx, fc::time_point::maximum(), max_cpu_time_us + 1, true ), tx_cpu_usage_exceeded, + BOOST_CHECK_EXCEPTION( push_trx( ptrx, fc::time_point::maximum(), max_cpu_time_us + 1, true, 0 ), tx_cpu_usage_exceeded, fc_exception_message_starts_with( "billed") ); // allow to bill at trx configured max ptrx = create_trx(5); // set trx max at 5ms BOOST_CHECK_LT( 5 * 1000, cpu_limit ); // 5ms has to be less than cpu_limit to actually test trx max and not account // indicate explicit billing at max - push_trx( ptrx, fc::time_point::maximum(), 5 * 1000, true ); + push_trx( ptrx, fc::time_point::maximum(), 5 * 1000, true, 0 ); chain.produce_block(); cpu_limit = mgr.get_account_cpu_limit(acc).first; // update after last trx @@ -1981,19 +1981,19 @@ BOOST_AUTO_TEST_CASE( billed_cpu_test ) try { ptrx = create_trx(5); // set trx max at 5ms BOOST_CHECK_LT( 5 * 1000 + 1, cpu_limit ); // 5ms has to be less than cpu_limit to actually test trx max and not account // indicate explicit billing at max + 1 - BOOST_CHECK_EXCEPTION( push_trx( ptrx, fc::time_point::maximum(), 5 * 1000 + 1, true ), tx_cpu_usage_exceeded, + BOOST_CHECK_EXCEPTION( push_trx( ptrx, fc::time_point::maximum(), 5 * 1000 + 1, true, 0 ), tx_cpu_usage_exceeded, fc_exception_message_starts_with("billed") ); // bill at minimum ptrx = create_trx(0); // indicate explicit billing at transaction minimum - push_trx( ptrx, fc::time_point::maximum(), min_cpu_time_us, true ); + push_trx( ptrx, fc::time_point::maximum(), min_cpu_time_us, true, 0 ); chain.produce_block(); // do not allow to bill less than minimum ptrx = create_trx(0); // indicate explicit billing at minimum-1, objective failure even with explicit billing for under min - BOOST_CHECK_EXCEPTION( push_trx( ptrx, fc::time_point::maximum(), min_cpu_time_us - 1, true ), transaction_exception, + BOOST_CHECK_EXCEPTION( push_trx( ptrx, fc::time_point::maximum(), min_cpu_time_us - 1, true, 0 ), transaction_exception, fc_exception_message_starts_with("cannot bill CPU time less than the minimum") ); chain.push_action( config::system_account_name, N(setalimits), config::system_account_name, fc::mutable_variant_object() @@ -2012,13 +2012,16 @@ BOOST_AUTO_TEST_CASE( billed_cpu_test ) try { ptrx = create_trx(0); BOOST_CHECK_LT( cpu_limit+1, max_cpu_time_us ); // needs to be less or this just tests the same thing as max_cpu_time_us test above // indicate non-explicit billing with 1 more than our account cpu limit, triggers optimization check #8638 and fails trx - BOOST_CHECK_EXCEPTION( push_trx( ptrx, fc::time_point::maximum(), cpu_limit+1, false ), tx_cpu_usage_exceeded, + BOOST_CHECK_EXCEPTION( push_trx( ptrx, fc::time_point::maximum(), cpu_limit+1, false, 0 ), tx_cpu_usage_exceeded, + fc_exception_message_starts_with("estimated") ); + // indicate non-explicit billing with 1 more (subjective) than our account cpu limit, triggers optimization check #8638 and fails trx + BOOST_CHECK_EXCEPTION( push_trx( ptrx, fc::time_point::maximum(), cpu_limit, false, 1 ), tx_cpu_usage_exceeded, fc_exception_message_starts_with("estimated") ); ptrx = create_trx(0); BOOST_CHECK_LT( cpu_limit, max_cpu_time_us ); // indicate non-explicit billing at our account cpu limit, will allow this trx to run, but only bills for actual use - auto r = push_trx( ptrx, fc::time_point::maximum(), cpu_limit, false ); + auto r = push_trx( ptrx, fc::time_point::maximum(), cpu_limit, false, 0 ); BOOST_CHECK_LT( r->receipt->cpu_usage_us, cpu_limit ); // verify not billed at provided bill amount when explicit_billed_cpu_time=false chain.produce_block(); @@ -2028,7 +2031,7 @@ BOOST_AUTO_TEST_CASE( billed_cpu_test ) try { BOOST_CHECK_LT( cpu_limit+1, max_cpu_time_us ); // needs to be less or this just tests the same thing as max_cpu_time_us test above // indicate explicit billing at over our account cpu limit, not allowed cpu_limit = mgr.get_account_cpu_limit_ex(acc).first.max; - BOOST_CHECK_EXCEPTION( push_trx( ptrx, fc::time_point::maximum(), cpu_limit+1, true ), tx_cpu_usage_exceeded, + BOOST_CHECK_EXCEPTION( push_trx( ptrx, fc::time_point::maximum(), cpu_limit+1, true, 0 ), tx_cpu_usage_exceeded, fc_exception_message_starts_with("billed") ); } FC_LOG_AND_RETHROW()