diff --git a/.clang-format b/.clang-format index 50a16dbaf4..26699cd819 100644 --- a/.clang-format +++ b/.clang-format @@ -14,56 +14,82 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. + +# Reference: https://releases.llvm.org/14.0.0/tools/clang/docs/ClangFormatStyleOptions.html --- Language: Cpp # BasedOnStyle: LLVM AccessModifierOffset: -4 AlignAfterOpenBracket: Align -AlignConsecutiveAssignments: false -AlignConsecutiveDeclarations: false -AlignEscapedNewlinesLeft: false -AlignOperands: true +AlignArrayOfStructures: None +AlignConsecutiveAssignments: None +AlignConsecutiveBitFields: None +AlignConsecutiveDeclarations: None +AlignConsecutiveMacros: None +AlignEscapedNewlines: Right +AlignOperands: Align AlignTrailingComments: true +AllowAllArgumentsOnNextLine: true AllowAllParametersOfDeclarationOnNextLine: true -AllowShortBlocksOnASingleLine: false +AllowShortBlocksOnASingleLine: Never AllowShortCaseLabelsOnASingleLine: false +AllowShortEnumsOnASingleLine: true AllowShortFunctionsOnASingleLine: All -AllowShortIfStatementsOnASingleLine: false +AllowShortIfStatementsOnASingleLine: Never +AllowShortLambdasOnASingleLine: All AllowShortLoopsOnASingleLine: false AlwaysBreakAfterDefinitionReturnType: None AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false -AlwaysBreakTemplateDeclarations: true +AlwaysBreakTemplateDeclarations: Yes BinPackArguments: false BinPackParameters: false -BraceWrapping: +BitFieldColonSpacing: Both +BraceWrapping: + AfterCaseLabel: false AfterClass: true - AfterControlStatement: false + AfterControlStatement: Never AfterEnum: true + AfterExternBlock: false AfterFunction: true AfterNamespace: false AfterObjCDeclaration: false - AfterStruct: true - AfterUnion: true - BeforeCatch: false - BeforeElse: false - IndentBraces: false + AfterStruct: true + AfterUnion: true + BeforeCatch: false + BeforeElse: false + BeforeLambdaBody: false + BeforeWhile: false + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyNamespace: true + SplitEmptyRecord: true +BreakAfterJavaFieldAnnotations: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Custom +BreakBeforeConceptDeclarations: true BreakBeforeTernaryOperators: true +BreakConstructorInitializers: BeforeColon BreakConstructorInitializersBeforeComma: false -BreakAfterJavaFieldAnnotations: false +BreakInheritanceList: BeforeColon BreakStringLiterals: true ColumnLimit: 100 CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false ConstructorInitializerAllOnOneLineOrOnePerLine: true ConstructorInitializerIndentWidth: 4 ContinuationIndentWidth: 4 Cpp11BracedListStyle: true +DeriveLineEnding: true DerivePointerAlignment: false DisableFormat: false +EmptyLineAfterAccessModifier: Never +EmptyLineBeforeAccessModifier: LogicalBlock ExperimentalAutoDetectBinPacking: true +FixNamespaceComments: true ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] +IfMacros: [ KJ_IF_MAYBE ] +IncludeBlocks: Preserve IncludeCategories: - Regex: '^"(llvm|llvm-c|clang|clang-c)/' Priority: 2 @@ -72,40 +98,86 @@ IncludeCategories: - Regex: '.*' Priority: 1 IncludeIsMainRegex: '$' +IndentAccessModifiers: false +IndentCaseBlocks: false IndentCaseLabels: false +IndentExternBlock: AfterExternBlock +IndentGotoLabels: true +IndentPPDirectives: None +IndentRequires: false IndentWidth: 4 IndentWrappedFunctionNames: false +InsertTrailingCommas: None JavaScriptQuotes: Leave JavaScriptWrapImports: true KeepEmptyLinesAtTheStartOfBlocks: true +LambdaBodyIndentation: Signature +Language: Cpp MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 NamespaceIndentation: None +ObjCBinPackProtocolList: Auto ObjCBlockIndentWidth: 2 +ObjCBreakBeforeNestedBlockParam: true ObjCSpaceAfterProperty: false ObjCSpaceBeforeProtocolList: true +PPIndentWidth: -1 +PackConstructorInitializers: BinPack +PenaltyBreakAssignment: 4 PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 +PenaltyBreakOpenParenthesis: 0 PenaltyBreakString: 1000 +PenaltyBreakTemplateDeclaration: 4 PenaltyExcessCharacter: 1000000 +PenaltyIndentedWhitespace: 0 PenaltyReturnTypeOnItsOwnLine: 60 PointerAlignment: Right +QualifierAlignment: Leave +ReferenceAlignment: Pointer ReflowComments: true -SortIncludes: false +RemoveBracesLLVM: false +SeparateDefinitionBlocks: Leave +ShortNamespaceLines: 1 +SortIncludes: Never +SortJavaStaticImport: Before +SortUsingDeclarations: true SpaceAfterCStyleCast: false +SpaceAfterLogicalNot: false +SpaceAfterTemplateKeyword: true +SpaceAroundPointerQualifiers: Default SpaceBeforeAssignmentOperators: true +SpaceBeforeCaseColon: false +SpaceBeforeCpp11BracedList: false +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true SpaceBeforeParens: ControlStatements +SpaceBeforeParensOptions: + AfterControlStatements: true + AfterForeachMacros: true + AfterIfMacros: true +SpaceBeforeRangeBasedForLoopColon: true +SpaceBeforeSquareBrackets: false +SpaceInEmptyBlock: false SpaceInEmptyParentheses: false SpacesBeforeTrailingComments: 1 -SpacesInAngles: false -SpacesInContainerLiterals: true +SpacesInAngles: Never SpacesInCStyleCastParentheses: false +SpacesInConditionalStatement: false +SpacesInContainerLiterals: true +SpacesInLineCommentPrefix: + Maximum: -1 + Minimum: 1 SpacesInParentheses: false SpacesInSquareBrackets: false -Standard: Cpp11 +Standard: c++17 +StatementAttributeLikeMacros: [ Q_EMIT] +StatementMacros: [ Q_UNUSED, QT_REQUIRE_VERSION] TabWidth: 4 +UseCRLF: false UseTab: Never +WhitespaceSensitiveMacros: [ STRINGIZE, PP_STRINGIZE, BOOST_PP_STRINGIZE, NS_SWIFT_NAME, CF_SWIFT_NAME] ... diff --git a/.clang-tidy b/.clang-tidy new file mode 100644 index 0000000000..888e91b95b --- /dev/null +++ b/.clang-tidy @@ -0,0 +1,31 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# https://releases.llvm.org/14.0.0/tools/clang/tools/extra/docs/clang-tidy/index.html + +CheckOptions: [] +# Disable some checks that are not useful for us now. +# They are sorted by names, and should be consistent to build_tools/clang_tidy.py. +Checks: 'abseil-*,boost-*,bugprone-*,cert-*,clang-analyzer-*,concurrency-*,cppcoreguidelines-*,darwin-*,fuchsia-*,google-*,hicpp-*,linuxkernel-*,llvm-*,misc-*,modernize-*,performance-*,portability-*,readability-*,-bugprone-easily-swappable-parameters,-bugprone-lambda-function-name,-bugprone-macro-parentheses,-cert-err58-cpp,-concurrency-mt-unsafe,-cppcoreguidelines-avoid-c-arrays,-cppcoreguidelines-avoid-magic-numbers,-cppcoreguidelines-avoid-non-const-global-variables,-cppcoreguidelines-macro-usage,-cppcoreguidelines-non-private-member-variables-in-classes,-cppcoreguidelines-owning-memory,-cppcoreguidelines-pro-bounds-array-to-pointer-decay,-cppcoreguidelines-pro-bounds-pointer-arithmetic,-cppcoreguidelines-pro-type-const-cast,-cppcoreguidelines-pro-type-union-access,-fuchsia-default-arguments-calls,-fuchsia-overloaded-operator,-fuchsia-statically-constructed-objects,-google-readability-avoid-underscore-in-googletest-name,-hicpp-avoid-c-arrays,-hicpp-named-parameter,-hicpp-no-array-decay,-llvm-include-order,-misc-definitions-in-headers,-misc-non-private-member-variables-in-classes,-modernize-avoid-c-arrays,-modernize-replace-disallow-copy-and-assign-macro,-modernize-use-trailing-return-type,-readability-function-cognitive-complexity,-readability-identifier-length,-readability-magic-numbers,-readability-named-parameter' +ExtraArgs: +ExtraArgsBefore: [] +FormatStyle: none +HeaderFilterRegex: '' +InheritParentConfig: true +UseColor: true +User: 'clang-tidy' +WarningsAsErrors: '' diff --git a/.github/actions/build_pegasus/action.yaml b/.github/actions/build_pegasus/action.yaml index ad640f2fa5..d0093ffb66 100644 --- a/.github/actions/build_pegasus/action.yaml +++ b/.github/actions/build_pegasus/action.yaml @@ -38,12 +38,12 @@ runs: shell: bash - name: Pack Server run: | - ./run.sh pack_server -j + ./run.sh pack_server -j ${PACK_OPTIONS} rm -rf pegasus-server-* shell: bash - name: Pack Tools run: | - ./run.sh pack_tools -j + ./run.sh pack_tools -j ${PACK_OPTIONS} rm -rf pegasus-tools-* shell: bash - name: Clear Build Files diff --git a/.github/actions/rebuild_thirdparty_if_needed/action.yaml b/.github/actions/rebuild_thirdparty_if_needed/action.yaml index 6e64f7db46..838ac3f615 100644 --- a/.github/actions/rebuild_thirdparty_if_needed/action.yaml +++ b/.github/actions/rebuild_thirdparty_if_needed/action.yaml @@ -42,8 +42,8 @@ runs: cmake --build build/ -j $(nproc) rm -rf build/Build build/Download/[a-y]* build/Source/[a-g]* build/Source/[i-q]* build/Source/[s-z]* find ./ -name '*CMakeFiles*' -type d -exec rm -rf "{}" + - ../scripts/download_hadoop.sh hadoop-bin - ../scripts/download_zk.sh zookeeper-bin + ../build_tools/download_hadoop.sh hadoop-bin + ../build_tools/download_zk.sh zookeeper-bin rm -rf hadoop-bin/share/doc rm -rf zookeeper-bin/docs shell: bash diff --git a/.github/workflows/build-push-env-docker.yml b/.github/workflows/build-push-env-docker.yml index 1bf695e7ed..bc6bf447da 100644 --- a/.github/workflows/build-push-env-docker.yml +++ b/.github/workflows/build-push-env-docker.yml @@ -34,6 +34,12 @@ on: jobs: build_compilation_env_docker_images: runs-on: ubuntu-latest + env: + # The glibc version on ubuntu1804 and centos7 is lower than the node20 required, so + # we need to force the node version to 16. + # See more details: https://github.com/actions/checkout/issues/1809 + ACTIONS_RUNNER_FORCE_ACTIONS_NODE_VERSION: node16 + ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true strategy: fail-fast: false matrix: @@ -44,19 +50,22 @@ jobs: - centos7 steps: - name: Checkout - uses: actions/checkout@v3 + # The glibc version on ubuntu1804 and centos7 is lower than the actions/checkout@v4 required, so + # we need to force to use actions/checkout@v3. + uses: actions/checkout@v4 - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v3 - name: Login to DockerHub - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USER }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and push - uses: docker/build-push-action@v2.10.0 + uses: docker/build-push-action@v6 with: + platforms: linux/amd64,linux/arm64 context: . file: ./docker/pegasus-build-env/${{ matrix.dockertag }}/Dockerfile push: true diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index eb13a2db7b..2f20d430bc 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -23,13 +23,16 @@ on: types: [ synchronize, opened, reopened, edited ] jobs: - process: + labeler: name: Module Labeler runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Assign GitHub labels - uses: actions/labeler@v4 + uses: actions/labeler@v5 with: repo-token: ${{ secrets.GITHUB_TOKEN }} configuration-path: .github/workflows/module_labeler_conf.yml diff --git a/.github/workflows/lint_and_test_admin-cli.yml b/.github/workflows/lint_and_test_admin-cli.yml index 788ba8ba3e..4fef441bf0 100644 --- a/.github/workflows/lint_and_test_admin-cli.yml +++ b/.github/workflows/lint_and_test_admin-cli.yml @@ -41,7 +41,7 @@ jobs: name: Lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Go uses: actions/setup-go@v2 with: @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Go uses: actions/setup-go@v2 with: diff --git a/.github/workflows/lint_and_test_collector.yml b/.github/workflows/lint_and_test_collector.yml index e21ede1a87..6fc8308c2f 100644 --- a/.github/workflows/lint_and_test_collector.yml +++ b/.github/workflows/lint_and_test_collector.yml @@ -43,7 +43,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go uses: actions/setup-go@v2 with: @@ -60,7 +60,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 1 - name: Set up Go @@ -80,7 +80,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 1 - name: Set up Go diff --git a/.github/workflows/lint_and_test_cpp.yaml b/.github/workflows/lint_and_test_cpp.yaml index 40c6655ae9..bbe2571831 100644 --- a/.github/workflows/lint_and_test_cpp.yaml +++ b/.github/workflows/lint_and_test_cpp.yaml @@ -30,13 +30,13 @@ on: - .github/actions - .github/workflows/lint_and_test_cpp.yaml - .github/workflows/thirdparty-regular-push.yml + - build_tools/pack_server.sh + - build_tools/pack_tools.sh - docker/thirdparties-bin/** - docker/thirdparties-src/** - CMakeLists.txt - cmake_modules/** - run.sh - - scripts/pack_server.sh - - scripts/pack_tools.sh - src/** - thirdparty/** @@ -50,25 +50,46 @@ env: jobs: cpp_clang_format_linter: - name: Lint - runs-on: ubuntu-latest - container: - image: apache/pegasus:clang-format-3.9 + name: Format + runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: clang-format - run: ./scripts/run-clang-format.py --clang-format-executable clang-format-3.9 -e ./src/shell/linenoise -e ./src/shell/sds -e ./thirdparty -r . + run: ./build_tools/run-clang-format.py --clang-format-executable clang-format-14 -e ./src/shell/linenoise -e ./src/shell/sds -e ./thirdparty -r . + + cpp_clang_tidy_linter: + name: Tidy + runs-on: ubuntu-22.04 + container: + image: apache/pegasus:thirdparties-bin-ubuntu2204-${{ github.base_ref }} + steps: + - name: Install Softwares + run: | + apt-get update + apt-get install clang-tidy -y + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Rebuild thirdparty if needed + uses: "./.github/actions/rebuild_thirdparty_if_needed" + - name: clang-tidy + run: | + git config --global --add safe.directory $(pwd) + ./run.sh build --test --compiler clang-14,clang++-14 -t debug --skip_thirdparty -c --cmake_only + ./build_tools/clang_tidy.py --rev-range $(git log origin/${{ github.base_ref }} -n1 --format=format:"%H") + shell: bash iwyu: name: IWYU - needs: cpp_clang_format_linter + needs: + - cpp_clang_format_linter runs-on: ubuntu-latest env: USE_JEMALLOC: OFF container: image: apache/pegasus:thirdparties-bin-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Free Disk Space (Ubuntu) run: | .github/workflows/free_disk_space.sh @@ -89,7 +110,8 @@ jobs: build_Release: name: Build Release - needs: cpp_clang_format_linter + needs: + - cpp_clang_format_linter runs-on: ubuntu-latest env: USE_JEMALLOC: OFF @@ -99,7 +121,7 @@ jobs: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - name: Clone code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Rebuild thirdparty if needed uses: "./.github/actions/rebuild_thirdparty_if_needed" - name: Build Pegasus @@ -138,12 +160,14 @@ jobs: - dsn_replica_bulk_load_test - dsn_replica_dup_test - dsn_replica_split_test + - dsn_rpc_tests - dsn.replica.test - dsn_replication_common_test - dsn.replication.simple_kv - dsn.rep_tests.simple_kv - dsn_runtime_tests - dsn_security_tests + - dsn_task_tests - dsn_utils_tests - dsn.zookeeper.tests # TODO(yingchun): Disable it because we find it's too flaky, we will re-enable it after @@ -164,7 +188,7 @@ jobs: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} options: --cap-add=SYS_PTRACE steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Download artifact uses: "./.github/actions/download_artifact" - name: Run server tests @@ -172,7 +196,8 @@ jobs: build_ASAN: name: Build ASAN - needs: cpp_clang_format_linter + needs: + - cpp_clang_format_linter runs-on: ubuntu-latest env: USE_JEMALLOC: OFF @@ -181,7 +206,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Rebuild thirdparty if needed uses: "./.github/actions/rebuild_thirdparty_if_needed" - name: Build Pegasus @@ -220,12 +245,14 @@ jobs: - dsn_replica_bulk_load_test - dsn_replica_dup_test - dsn_replica_split_test + - dsn_rpc_tests - dsn.replica.test - dsn_replication_common_test - dsn.replication.simple_kv - dsn.rep_tests.simple_kv - dsn_runtime_tests - dsn_security_tests + - dsn_task_tests - dsn_utils_tests - dsn.zookeeper.tests # TODO(yingchun): Disable it because we find it's too flaky, we will re-enable it after @@ -248,7 +275,7 @@ jobs: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} options: --cap-add=SYS_PTRACE steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Download artifact uses: "./.github/actions/download_artifact" - name: Run server tests @@ -259,7 +286,8 @@ jobs: # before we find any way to reduce the time cost. # build_UBSAN: # name: Build UBSAN -# needs: cpp_clang_format_linter +# needs: +# - cpp_clang_format_linter # runs-on: ubuntu-latest # env: # USE_JEMALLOC: OFF @@ -268,7 +296,7 @@ jobs: # container: # image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} # steps: -# - uses: actions/checkout@v3 +# - uses: actions/checkout@v4 # - name: Rebuild thirdparty if needed # uses: "./.github/actions/rebuild_thirdparty_if_needed" # - name: Build Pegasus @@ -307,12 +335,14 @@ jobs: # - dsn_replica_bulk_load_test # - dsn_replica_dup_test # - dsn_replica_split_test +# - dsn_rpc_tests # - dsn.replica.test # - dsn_replication_common_test # - dsn.replication.simple_kv # - dsn.rep_tests.simple_kv # - dsn_runtime_tests # - dsn_security_tests +# - dsn_task_tests # - dsn_utils_tests # - dsn.zookeeper.tests # - partition_split_test @@ -331,7 +361,7 @@ jobs: # image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} # options: --cap-add=SYS_PTRACE # steps: -# - uses: actions/checkout@v3 +# - uses: actions/checkout@v4 # - name: Download artifact # uses: "./.github/actions/download_artifact" # - name: Run server tests @@ -339,7 +369,8 @@ jobs: build_with_jemalloc: name: Build with jemalloc - needs: cpp_clang_format_linter + needs: + - cpp_clang_format_linter runs-on: ubuntu-latest env: USE_JEMALLOC: ON @@ -348,7 +379,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-jemallc-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Rebuild thirdparty if needed uses: "./.github/actions/rebuild_thirdparty_if_needed" # TODO(yingchun): Append "-m dsn_utils_tests" to the command if not needed to pack server or tools, for example, the dependencies are static linked. @@ -372,15 +403,16 @@ jobs: image: apache/pegasus:thirdparties-bin-test-jemallc-ubuntu2204-${{ github.base_ref }} options: --cap-add=SYS_PTRACE steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Download artifact uses: "./.github/actions/download_artifact" - name: Run server tests uses: "./.github/actions/run_server_tests" - build_pegasus_on_macos: - name: macOS - needs: cpp_clang_format_linter + build_release_on_macos: + name: Build Release on macOS + needs: + - cpp_clang_format_linter runs-on: macos-12 steps: - name: Install Softwares @@ -388,7 +420,7 @@ jobs: # Preinstalled softwares: https://github.com/actions/virtual-environments/blob/main/images/macos/macos-12-Readme.md brew install ccache brew install openssl@1.1 - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup cache uses: actions/cache@v3 with: @@ -411,3 +443,24 @@ jobs: ccache -z ./run.sh build --test --skip_thirdparty -j $(sysctl -n hw.physicalcpu) ccache -s + + build_debug_on_centos7: + name: Build Debug on CentOS 7 + needs: + - cpp_clang_format_linter + runs-on: ubuntu-latest + env: + USE_JEMALLOC: OFF + BUILD_OPTIONS: -t debug --test --separate_servers + PACK_OPTIONS: --separate_servers + ACTIONS_RUNNER_FORCE_ACTIONS_NODE_VERSION: node16 + ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true + container: + image: apache/pegasus:thirdparties-bin-centos7-${{ github.base_ref }} + steps: + - name: Clone code + uses: actions/checkout@v3 + - name: Rebuild thirdparty if needed + uses: "./.github/actions/rebuild_thirdparty_if_needed" + - name: Build Pegasus + uses: "./.github/actions/build_pegasus" diff --git a/.github/workflows/lint_and_test_go-client.yml b/.github/workflows/lint_and_test_go-client.yml index 4e02501f14..691121b0f1 100644 --- a/.github/workflows/lint_and_test_go-client.yml +++ b/.github/workflows/lint_and_test_go-client.yml @@ -45,7 +45,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go uses: actions/setup-go@v2 with: @@ -66,7 +66,7 @@ jobs: - name: Install thrift run: sudo apt-get install -y thrift-compiler - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Go uses: actions/setup-go@v2 with: @@ -91,7 +91,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: "./.github/actions/rebuild_thirdparty_if_needed" - uses: "./.github/actions/build_pegasus" - uses: "./.github/actions/upload_artifact" @@ -118,7 +118,7 @@ jobs: make install cd - && rm -rf thrift-${THRIFT_VERSION} v${THRIFT_VERSION}.tar.gz - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Go uses: actions/setup-go@v2 with: diff --git a/.github/workflows/lint_and_test_java-client.yml b/.github/workflows/lint_and_test_java-client.yml index a11ebb5ee4..6d172bd910 100644 --- a/.github/workflows/lint_and_test_java-client.yml +++ b/.github/workflows/lint_and_test_java-client.yml @@ -39,7 +39,7 @@ jobs: name: Spotless runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-java@v1 with: java-version: 8 @@ -58,7 +58,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Rebuild thirdparty if needed uses: "./.github/actions/rebuild_thirdparty_if_needed" - name: Build Pegasus @@ -79,7 +79,7 @@ jobs: matrix: java: [ '8', '11'] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/cache@v2 with: path: ~/.m2/repository @@ -98,9 +98,9 @@ jobs: export LD_LIBRARY_PATH=$(pwd)/thirdparty/output/lib:${JAVA_HOME}/jre/lib/amd64/server ulimit -s unlimited ./run.sh start_onebox - - name: Recompile thrift + - name: Download thrift working-directory: ./java-client/scripts - run: ./recompile_thrift.sh + run: ./download_thrift.sh - name: Run Java client tests working-directory: ./java-client run: mvn test --no-transfer-progress diff --git a/.github/workflows/lint_and_test_pegic.yml b/.github/workflows/lint_and_test_pegic.yml index 3788b44121..da94f9b4a8 100644 --- a/.github/workflows/lint_and_test_pegic.yml +++ b/.github/workflows/lint_and_test_pegic.yml @@ -41,7 +41,7 @@ jobs: name: Lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: golangci-lint uses: golangci/golangci-lint-action@v3 with: @@ -53,7 +53,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Go uses: actions/setup-go@v2 with: diff --git a/.github/workflows/lint_and_test_scala-client.yml b/.github/workflows/lint_and_test_scala-client.yml index b261902e73..29925d051b 100644 --- a/.github/workflows/lint_and_test_scala-client.yml +++ b/.github/workflows/lint_and_test_scala-client.yml @@ -39,7 +39,7 @@ jobs: name: Format runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-java@v1 with: java-version: 8 @@ -57,7 +57,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: "./.github/actions/rebuild_thirdparty_if_needed" - uses: "./.github/actions/build_pegasus" - uses: "./.github/actions/upload_artifact" @@ -73,7 +73,7 @@ jobs: matrix: java: [ '8', '11'] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/cache@v2 with: path: ~/.m2/repository @@ -89,9 +89,9 @@ jobs: source /github/home/.sdkman/bin/sdkman-init.sh sdk install sbt sbt -V - - name: Recompile thrift + - name: Download thrift working-directory: ./java-client/scripts - run: ./recompile_thrift.sh + run: ./download_thrift.sh - name: Build Java client working-directory: ./java-client run: | diff --git a/.github/workflows/module_labeler_conf.yml b/.github/workflows/module_labeler_conf.yml index ad23e4bf08..64f0e065a2 100644 --- a/.github/workflows/module_labeler_conf.yml +++ b/.github/workflows/module_labeler_conf.yml @@ -15,40 +15,74 @@ # specific language governing permissions and limitations # under the License. --- + github: - - .github/**/* +- changed-files: + - any-glob-to-any-file: + - .github/**/* admin-cli: - - admin-cli/**/* +- changed-files: + - any-glob-to-any-file: + - admin-cli/**/* collector: - - collector/**/* +- changed-files: + - any-glob-to-any-file: + - collector/**/* docker: - - docker/**/* +- changed-files: + - any-glob-to-any-file: + - docker/**/* go-client: - - go-client/**/* +- changed-files: + - any-glob-to-any-file: + - go-client/**/* java-client: - - java-client/**/* +- changed-files: + - any-glob-to-any-file: + - java-client/**/* nodejs-client: - - nodejs-client/**/* +- changed-files: + - any-glob-to-any-file: + - nodejs-client/**/* pegic: - - pegic/**/* +- changed-files: + - any-glob-to-any-file: + - pegic/**/* python-client: - - python-client/**/* +- changed-files: + - any-glob-to-any-file: + - python-client/**/* scala-client: - - scala-client/**/* +- changed-files: + - any-glob-to-any-file: + - scala-client/**/* thirdparty: - - thirdparty/**/* +- changed-files: + - any-glob-to-any-file: + - thirdparty/**/* thrift: - - '**/*.thrift' +- changed-files: + - any-glob-to-any-file: + - '**/*.thrift' docs: - - '**/*.md' +- changed-files: + - any-glob-to-any-file: + - '**/*.md' scripts: - - 'scripts/**/*' - - '**/*.sh' +- changed-files: + - any-glob-to-any-file: + - 'admin_tools/**/*' + - 'build_tools/**/*' + - '**/*.sh' build: - - 'cmake_modules/**/*' - - '**/CMakeLists.txt' - - 'run.sh' +- changed-files: + - any-glob-to-any-file: + - 'cmake_modules/**/*' + - '**/CMakeLists.txt' + - 'run.sh' cpp: +- changed-files: + - any-glob-to-any-file: # TODO(yingchun): add more fine-grained labels - - 'src/**/*.h' - - 'src/**/*.cpp' + - 'src/**/*.h' + - 'src/**/*.cpp' diff --git a/.github/workflows/regular-build.yml b/.github/workflows/regular-build.yml index a8f4386217..01c17f4fc7 100644 --- a/.github/workflows/regular-build.yml +++ b/.github/workflows/regular-build.yml @@ -37,17 +37,21 @@ on: jobs: lint_cpp: name: Lint Cpp - runs-on: ubuntu-latest - container: - image: apache/pegasus:clang-format-3.9 + runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: clang-format - run: ./scripts/run-clang-format.py --clang-format-executable clang-format-3.9 -e ./src/shell/linenoise -e ./src/shell/sds -e ./thirdparty -r . + run: ./build_tools/run-clang-format.py --clang-format-executable clang-format-14 -e ./src/shell/linenoise -e ./src/shell/sds -e ./thirdparty -r . build_cpp: name: Build Cpp runs-on: ubuntu-latest + env: + # The glibc version on ubuntu1804 and centos7 is lower than the node20 required, so + # we need to force the node version to 16. + # See more details: https://github.com/actions/checkout/issues/1809 + ACTIONS_RUNNER_FORCE_ACTIONS_NODE_VERSION: node16 + ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true strategy: fail-fast: false matrix: @@ -72,6 +76,8 @@ jobs: working-directory: /root/incubator-pegasus steps: - name: Clone Apache Pegasus Source + # The glibc version on ubuntu1804 and centos7 is lower than the actions/checkout@v4 required, so + # we need to force to use actions/checkout@v3. uses: actions/checkout@v3 - name: Unpack prebuilt third-parties uses: "./.github/actions/unpack_prebuilt_thirdparties" @@ -87,7 +93,7 @@ jobs: # to generate code as well. The thrift-compiler version on ubuntu-20.04 is 0.13.0 run: sudo apt-get install -y thrift-compiler - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Go uses: actions/setup-go@v2 with: @@ -130,13 +136,15 @@ jobs: key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} restore-keys: | ${{ runner.os }}-maven- - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-java@v1 with: java-version: ${{ matrix.java }} + - name: Download thrift + working-directory: ./java-client/scripts + run: ./download_thrift.sh - name: Build working-directory: ./java-client run: | - cd scripts && bash recompile_thrift.sh && cd - mvn spotless:apply mvn clean package -DskipTests diff --git a/.github/workflows/standardization_lint.yaml b/.github/workflows/standardization_lint.yaml index 66bcd16276..a88f13e66a 100644 --- a/.github/workflows/standardization_lint.yaml +++ b/.github/workflows/standardization_lint.yaml @@ -47,14 +47,14 @@ jobs: name: Check Markdown links runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13 dockerfile_linter: name: Lint Dockerfile runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: hadolint/hadolint-action@v3.1.0 with: recursive: true @@ -65,7 +65,7 @@ jobs: runs-on: ubuntu-latest steps: - name: "Checkout ${{ github.ref }} ( ${{ github.sha }} )" - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check License Header uses: apache/skywalking-eyes@main env: diff --git a/.github/workflows/test_nodejs-client.yml b/.github/workflows/test_nodejs-client.yml index 371263e80e..e662e2458f 100644 --- a/.github/workflows/test_nodejs-client.yml +++ b/.github/workflows/test_nodejs-client.yml @@ -46,7 +46,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: "./.github/actions/rebuild_thirdparty_if_needed" - uses: "./.github/actions/build_pegasus" - uses: "./.github/actions/upload_artifact" @@ -58,7 +58,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install nodejs uses: actions/setup-node@v3 with: diff --git a/.github/workflows/test_python-client.yml b/.github/workflows/test_python-client.yml index c1eaaa7b70..f846be60f0 100644 --- a/.github/workflows/test_python-client.yml +++ b/.github/workflows/test_python-client.yml @@ -46,7 +46,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: "./.github/actions/rebuild_thirdparty_if_needed" - uses: "./.github/actions/build_pegasus" - uses: "./.github/actions/upload_artifact" @@ -58,7 +58,7 @@ jobs: container: image: apache/pegasus:thirdparties-bin-test-ubuntu2204-${{ github.base_ref }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: '3.11' diff --git a/.github/workflows/thirdparty-regular-push.yml b/.github/workflows/thirdparty-regular-push.yml index 59d0430ea2..ce8f5d5dc9 100644 --- a/.github/workflows/thirdparty-regular-push.yml +++ b/.github/workflows/thirdparty-regular-push.yml @@ -43,7 +43,7 @@ jobs: build_push_src_docker_images: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx @@ -68,6 +68,12 @@ jobs: build_push_bin_docker_images: runs-on: ubuntu-latest + env: + # The glibc version on ubuntu1804 and centos7 is lower than the node20 required, so + # we need to force the node version to 16. + # See more details: https://github.com/actions/checkout/issues/1809 + ACTIONS_RUNNER_FORCE_ACTIONS_NODE_VERSION: node16 + ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true needs: build_push_src_docker_images strategy: fail-fast: false @@ -78,6 +84,8 @@ jobs: - ubuntu2204 - centos7 steps: + # The glibc version on ubuntu1804 and centos7 is lower than the actions/checkout@v4 required, so + # we need to force to use actions/checkout@v3. - uses: actions/checkout@v3 - name: Set up QEMU uses: docker/setup-qemu-action@v1 @@ -104,6 +112,12 @@ jobs: build_push_bin_jemalloc_docker_images: runs-on: ubuntu-latest + env: + # The glibc version on ubuntu1804 and centos7 is lower than the node20 required, so + # we need to force the node version to 16. + # See more details: https://github.com/actions/checkout/issues/1809 + ACTIONS_RUNNER_FORCE_ACTIONS_NODE_VERSION: node16 + ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true needs: build_push_src_docker_images strategy: fail-fast: false @@ -114,6 +128,8 @@ jobs: - ubuntu2204 - centos7 steps: + # The glibc version on ubuntu1804 and centos7 is lower than the actions/checkout@v4 required, so + # we need to force to use actions/checkout@v3. - uses: actions/checkout@v3 - name: Set up QEMU uses: docker/setup-qemu-action@v1 @@ -148,7 +164,7 @@ jobs: osversion: - ubuntu2204 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx @@ -184,7 +200,7 @@ jobs: osversion: - ubuntu2204 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx diff --git a/.gitignore b/.gitignore index 8873dcf800..d458cd5c10 100644 --- a/.gitignore +++ b/.gitignore @@ -342,11 +342,6 @@ package-lock.json # ============= # go-client/bin go-client/coverage.txt -go-client/idl/admin/ -go-client/idl/cmd/ -go-client/idl/radmin/ -go-client/idl/replication/ -go-client/idl/rrdb/ thirdparty/output/ diff --git a/.licenserc.yaml b/.licenserc.yaml index 3a15f3201d..49ab4b54a6 100644 --- a/.licenserc.yaml +++ b/.licenserc.yaml @@ -42,10 +42,12 @@ header: - '**/*.pdf' # Special files for golang. - '**/go.sum' - # TODO(wangdan): Generated files for go client, could generate dynamically? - - 'go-client/idl/base/GoUnusedProtection__.go' - - 'go-client/idl/base/dsn_err_string.go' - - 'go-client/idl/base/rocskdb_err_string.go' + - 'go-client/idl/admin/**' + - 'go-client/idl/base/**' + - 'go-client/idl/cmd/**' + - 'go-client/idl/radmin/**' + - 'go-client/idl/replication/**' + - 'go-client/idl/rrdb/**' # Special files for nodejs. - '**/.npmigonre' # Special files for python. @@ -59,7 +61,6 @@ header: - 'src/meta/test/suite2' - 'src/nfs/test/nfs_test_file1' - 'src/nfs/test/nfs_test_file2' - - 'src/runtime/test/gtest.filter' # Used for tests and should be empty, or ignore all comment lines (otherwise would lead to error). - 'src/utils/test/config-empty.ini' # Binary files used for tests and could not be added with copyright info (otherwise would lead to error). @@ -72,7 +73,7 @@ header: - 'thirdparty/fix_rocksdb-cmake-PORTABLE-option.patch' - 'thirdparty/fix_snappy-Wsign-compare-warning.patch' - 'thirdparty/fix_s2_build_with_absl_and_gtest.patch' - - 'thirdparty/fix_thrift_for_cpp11.patch' + - 'thirdparty/fix_thrift_build_and_link_errors.patch' # TODO(yingchun): shell/* files are import from thirdparties, we can move them to thirdparty later. # Copyright (c) 2016, Adi Shavit - 'src/shell/argh.h' @@ -108,123 +109,26 @@ header: - 'cmake_modules/FindRT.cmake' - 'cmake_modules/FindDL.cmake' # Copyright (c) 2017 Guillaume Papin - - 'scripts/run-clang-format.py' + - 'build_tools/run-clang-format.py' # The MIT License (MIT), Copyright (c) 2015 Microsoft Corporation + - 'admin_tools/learn_stat.py' + - 'build_tools/compile_thrift.py' - 'cmake_modules/BaseFunctions.cmake' - 'docs/rdsn-README.md' - 'idl/command.thrift' - 'idl/dsn.layer2.thrift' - 'idl/dsn.thrift' - - 'idl/metadata.thrift' - 'idl/meta_admin.thrift' + - 'idl/metadata.thrift' - 'idl/replica_admin.thrift' - - 'scripts/compile_thrift.py' - - 'scripts/learn_stat.py' - - 'src/runtime/api_layer1.h' - - 'src/runtime/api_task.h' - - 'src/utils/api_utilities.h' - - 'src/runtime/app_model.h' - - 'src/common/json_helper.h' - - 'src/runtime/rpc/rpc_stream.h' - - 'src/runtime/rpc/serialization.h' - - 'src/common/serialization_helper/dsn_types.h' - - 'src/common/serialization_helper/thrift_helper.h' - - 'src/runtime/serverlet.h' - - 'src/runtime/service_app.h' - - 'src/utils/distributed_lock_service.h' - - 'src/failure_detector/failure_detector.h' - - 'src/failure_detector/fd.client.h' - - 'src/failure_detector/fd.code.definition.h' - - 'src/failure_detector/fd.server.h' - - 'src/failure_detector/failure_detector_multimaster.h' - - 'src/meta/meta_state_service.h' - - 'src/nfs/nfs_node.h' - - 'src/meta/meta_service_app.h' - - 'src/tools/mutation_log_tool.h' - - 'src/client/partition_resolver.h' - - 'src/replica/replica_base.h' - - 'src/common/replica_envs.h' - - 'src/common/replication.codes.h' - - 'src/replica/replication_app_base.h' - - 'src/client/replication_ddl_client.h' - - 'src/common/replication_enums.h' - - 'src/common/replication_other_types.h' - - 'src/replica/replication_service_app.h' - - 'src/common/storage_serverlet.h' - - 'src/perf_counter/perf_counter.h' - - 'src/perf_counter/perf_counter_wrapper.h' - - 'src/perf_counter/perf_counters.h' - - 'src/aio/aio_task.h' - - 'src/runtime/task/async_calls.h' - - 'src/utils/command_manager.h' - - 'src/runtime/env_provider.h' - - 'src/aio/file_io.h' - - 'src/runtime/task/future_types.h' - - 'src/runtime/global_config.h' - - 'src/common/gpid.h' - - 'src/runtime/rpc/group_address.h' - - 'src/utils/logging_provider.h' - - 'src/runtime/rpc/message_parser.h' - - 'src/runtime/rpc/network.h' - - 'src/runtime/rpc/rpc_address.cpp' - - 'src/runtime/rpc/rpc_address.h' - - 'src/runtime/rpc/rpc_message.h' - - 'src/runtime/task/task.h' - - 'src/runtime/task/task_code.h' - - 'src/runtime/task/task_queue.h' - - 'src/runtime/task/task_spec.h' - - 'src/runtime/task/task_tracker.h' - - 'src/runtime/task/task_worker.h' - - 'src/utils/thread_access_checker.h' - - 'src/utils/threadpool_code.h' - - 'src/utils/threadpool_spec.h' - - 'src/runtime/task/timer_service.h' - - 'src/utils/uniq_timestamp_us.h' - - 'src/utils/zlocks.h' - - 'src/runtime/nativerun.h' - - 'src/runtime/node_scoper.h' - - 'src/runtime/providers.common.h' - - 'src/runtime/simulator.h' - - 'src/runtime/tool_api.h' - - 'src/runtime/fault_injector.h' - - 'src/runtime/profiler.h' - - 'src/runtime/tracer.h' - - 'src/utils/autoref_ptr.h' - - 'src/utils/binary_reader.h' - - 'src/utils/binary_writer.h' - - 'src/utils/blob.h' - - 'src/utils/chrono_literals.h' - - 'src/utils/config_api.h' - - 'src/utils/config_helper.h' - - 'src/utils/configuration.h' - - 'src/utils/crc.h' - - 'src/utils/customizable_id.h' - - 'src/utils/enum_helper.h' - - 'src/utils/error_code.h' - - 'src/utils/errors.h' - - 'src/utils/exp_delay.h' - - 'src/utils/extensible_object.h' - - 'src/utils/factory_store.h' - - 'src/utils/filesystem.h' - - 'src/utils/fixed_size_buffer_pool.h' - - 'src/utils/function_traits.h' - - 'src/utils/join_point.h' - - 'src/utils/link.h' - - 'src/utils/optional.h' - - 'src/utils/ports.h' - - 'src/utils/priority_queue.h' - - 'src/utils/singleton_store.h' - - 'src/utils/strings.h' - - 'src/utils/synchronize.h' - - 'src/utils/utils.h' - - 'src/utils/work_queue.h' - - 'src/utils/time_utils.h' - 'src/aio/CMakeLists.txt' - 'src/aio/aio_provider.cpp' - 'src/aio/aio_provider.h' + - 'src/aio/aio_task.h' - 'src/aio/disk_engine.cpp' - 'src/aio/disk_engine.h' - 'src/aio/file_io.cpp' + - 'src/aio/file_io.h' - 'src/aio/native_linux_aio_provider.cpp' - 'src/aio/native_linux_aio_provider.h' - 'src/aio/test/CMakeLists.txt' @@ -235,16 +139,27 @@ header: - 'src/block_service/test/config-test.ini' - 'src/client/CMakeLists.txt' - 'src/client/partition_resolver.cpp' + - 'src/client/partition_resolver.h' - 'src/client/partition_resolver_manager.cpp' - 'src/client/partition_resolver_manager.h' - 'src/client/partition_resolver_simple.cpp' - 'src/client/partition_resolver_simple.h' - 'src/client/replication_ddl_client.cpp' + - 'src/client/replication_ddl_client.h' - 'src/common/CMakeLists.txt' - 'src/common/consensus.thrift' - 'src/common/fs_manager.cpp' + - 'src/common/gpid.h' + - 'src/common/json_helper.h' + - 'src/common/replica_envs.h' + - 'src/common/replication.codes.h' - 'src/common/replication_common.cpp' - 'src/common/replication_common.h' + - 'src/common/replication_enums.h' + - 'src/common/replication_other_types.h' + - 'src/common/serialization_helper/dsn_types.h' + - 'src/common/serialization_helper/thrift_helper.h' + - 'src/common/storage_serverlet.h' - 'src/common/test/CMakeLists.txt' - 'src/common/test/config-test.ini' - 'src/common/test/duplication_common_test.cpp' @@ -252,7 +167,12 @@ header: - 'src/common/test/run.sh' - 'src/failure_detector/CMakeLists.txt' - 'src/failure_detector/failure_detector.cpp' + - 'src/failure_detector/failure_detector.h' - 'src/failure_detector/failure_detector_multimaster.cpp' + - 'src/failure_detector/failure_detector_multimaster.h' + - 'src/failure_detector/fd.client.h' + - 'src/failure_detector/fd.code.definition.h' + - 'src/failure_detector/fd.server.h' - 'src/failure_detector/fd.thrift' - 'src/failure_detector/test/CMakeLists.txt' - 'src/failure_detector/test/clear.sh' @@ -280,6 +200,8 @@ header: - 'src/meta/meta_service.cpp' - 'src/meta/meta_service.h' - 'src/meta/meta_service_app.cpp' + - 'src/meta/meta_service_app.h' + - 'src/meta/meta_state_service.h' - 'src/meta/meta_state_service_simple.cpp' - 'src/meta/meta_state_service_simple.h' - 'src/meta/meta_state_service_zookeeper.cpp' @@ -322,6 +244,7 @@ header: - 'src/nfs/nfs_client_impl.h' - 'src/nfs/nfs_code_definition.h' - 'src/nfs/nfs_node.cpp' + - 'src/nfs/nfs_node.h' - 'src/nfs/nfs_node_simple.cpp' - 'src/nfs/nfs_node_simple.h' - 'src/nfs/nfs_server_impl.cpp' @@ -333,7 +256,10 @@ header: - 'src/nfs/test/run.sh' - 'src/perf_counter/CMakeLists.txt' - 'src/perf_counter/perf_counter.cpp' + - 'src/perf_counter/perf_counter.h' + - 'src/perf_counter/perf_counter_wrapper.h' - 'src/perf_counter/perf_counters.cpp' + - 'src/perf_counter/perf_counters.h' - 'src/perf_counter/test/CMakeLists.txt' - 'src/perf_counter/test/clear.sh' - 'src/perf_counter/test/perf_counter_test.cpp' @@ -363,6 +289,7 @@ header: - 'src/replica/replica.cpp' - 'src/replica/replica.h' - 'src/replica/replica_2pc.cpp' + - 'src/replica/replica_base.h' - 'src/replica/replica_check.cpp' - 'src/replica/replica_chkpt.cpp' - 'src/replica/replica_config.cpp' @@ -374,7 +301,9 @@ header: - 'src/replica/replica_stub.cpp' - 'src/replica/replica_stub.h' - 'src/replica/replication_app_base.cpp' + - 'src/replica/replication_app_base.h' - 'src/replica/replication_service_app.cpp' + - 'src/replica/replication_service_app.h' - 'src/replica/split/test/config-test.ini' - 'src/replica/split/test/run.sh' - 'src/replica/storage/CMakeLists.txt' @@ -521,103 +450,171 @@ header: - 'src/replica/test/replica_test_base.h' - 'src/replica/test/replication_service_test_app.h' - 'src/replica/test/run.sh' + - 'src/rpc/CMakeLists.txt' + - 'src/rpc/asio_net_provider.cpp' + - 'src/rpc/asio_net_provider.h' + - 'src/rpc/asio_rpc_session.cpp' + - 'src/rpc/asio_rpc_session.h' + - 'src/rpc/dsn_message_parser.cpp' + - 'src/rpc/dsn_message_parser.h' + - 'src/rpc/group_address.h' + - 'src/rpc/message_parser.cpp' + - 'src/rpc/message_parser.h' + - 'src/rpc/message_parser_manager.h' + - 'src/rpc/network.cpp' + - 'src/rpc/network.h' + - 'src/rpc/network.sim.cpp' + - 'src/rpc/network.sim.h' + - 'src/rpc/raw_message_parser.cpp' + - 'src/rpc/raw_message_parser.h' + - 'src/rpc/rpc_address.cpp' + - 'src/rpc/rpc_address.h' + - 'src/rpc/rpc_engine.cpp' + - 'src/rpc/rpc_engine.h' + - 'src/rpc/rpc_message.cpp' + - 'src/rpc/rpc_message.h' + - 'src/rpc/rpc_stream.h' + - 'src/rpc/rpc_task.cpp' + - 'src/rpc/serialization.h' + - 'src/rpc/test/address_test.cpp' + - 'src/rpc/test/corrupt_message_test.cpp' + - 'src/rpc/test/host_port_test.cpp' + - 'src/rpc/test/message_utils_test.cpp' + - 'src/rpc/test/net_provider_test.cpp' + - 'src/rpc/test/rpc_holder_test.cpp' + - 'src/rpc/test/rpc_message_test.cpp' + - 'src/rpc/test/rpc_test.cpp' + - 'src/rpc/thrift_message_parser.cpp' + - 'src/rpc/thrift_message_parser.h' - 'src/runtime/CMakeLists.txt' + - 'src/runtime/api_layer1.h' + - 'src/runtime/api_task.h' + - 'src/runtime/app_model.h' - 'src/runtime/core_main.cpp' - 'src/runtime/env.sim.cpp' - 'src/runtime/env.sim.h' + - 'src/runtime/env_provider.h' - 'src/runtime/fault_injector.cpp' + - 'src/runtime/fault_injector.h' - 'src/runtime/global_config.cpp' + - 'src/runtime/global_config.h' - 'src/runtime/message_utils.cpp' - 'src/runtime/nativerun.cpp' + - 'src/runtime/nativerun.h' + - 'src/runtime/node_scoper.h' - 'src/runtime/profiler.cpp' + - 'src/runtime/profiler.h' - 'src/runtime/profiler_header.h' - 'src/runtime/providers.common.cpp' - - 'src/runtime/rpc/CMakeLists.txt' - - 'src/runtime/rpc/asio_net_provider.cpp' - - 'src/runtime/rpc/asio_net_provider.h' - - 'src/runtime/rpc/asio_rpc_session.cpp' - - 'src/runtime/rpc/asio_rpc_session.h' - - 'src/runtime/rpc/dsn_message_parser.cpp' - - 'src/runtime/rpc/dsn_message_parser.h' - - 'src/runtime/rpc/message_parser.cpp' - - 'src/runtime/rpc/message_parser_manager.h' - - 'src/runtime/rpc/network.cpp' - - 'src/runtime/rpc/network.sim.cpp' - - 'src/runtime/rpc/network.sim.h' - - 'src/runtime/rpc/raw_message_parser.cpp' - - 'src/runtime/rpc/raw_message_parser.h' - - 'src/runtime/rpc/rpc_engine.cpp' - - 'src/runtime/rpc/rpc_engine.h' - - 'src/runtime/rpc/rpc_message.cpp' - - 'src/runtime/rpc/rpc_task.cpp' - - 'src/runtime/rpc/thrift_message_parser.cpp' - - 'src/runtime/rpc/thrift_message_parser.h' + - 'src/runtime/providers.common.h' - 'src/runtime/scheduler.cpp' - 'src/runtime/scheduler.h' + - 'src/runtime/serverlet.h' - 'src/runtime/service_api_c.cpp' + - 'src/runtime/service_app.h' - 'src/runtime/service_engine.cpp' - 'src/runtime/service_engine.h' - 'src/runtime/simulator.cpp' - - 'src/runtime/task/CMakeLists.txt' - - 'src/runtime/task/hpc_task_queue.cpp' - - 'src/runtime/task/hpc_task_queue.h' - - 'src/runtime/task/simple_task_queue.cpp' - - 'src/runtime/task/simple_task_queue.h' - - 'src/runtime/task/task.cpp' - - 'src/runtime/task/task_code.cpp' - - 'src/runtime/task/task_engine.cpp' - - 'src/runtime/task/task_engine.h' - - 'src/runtime/task/task_engine.sim.cpp' - - 'src/runtime/task/task_engine.sim.h' - - 'src/runtime/task/task_queue.cpp' - - 'src/runtime/task/task_spec.cpp' - - 'src/runtime/task/task_tracker.cpp' - - 'src/runtime/task/task_worker.cpp' + - 'src/runtime/simulator.h' - 'src/runtime/test/CMakeLists.txt' - 'src/runtime/test/address_test.cpp' - 'src/runtime/test/async_call.cpp' - 'src/runtime/test/clear.sh' - - 'src/runtime/test/config-test-corrupt-message.ini' - 'src/runtime/test/config-test-sim.ini' - 'src/runtime/test/config-test.ini' - 'src/runtime/test/corrupt_message.cpp' - 'src/runtime/test/lpc.cpp' - 'src/runtime/test/main.cpp' - - 'src/runtime/test/message_utils_test.cpp' - - 'src/runtime/test/netprovider.cpp' - 'src/runtime/test/pipeline_test.cpp' - - 'src/runtime/test/rpc.cpp' - - 'src/runtime/test/rpc_holder_test.cpp' - - 'src/runtime/test/rpc_message.cpp' - 'src/runtime/test/run.sh' - 'src/runtime/test/service_api_c.cpp' - 'src/runtime/test/sim_lock.cpp' - - 'src/runtime/test/task_engine.cpp' - 'src/runtime/test_utils.h' - 'src/runtime/threadpool_code.cpp' - 'src/runtime/tool_api.cpp' + - 'src/runtime/tool_api.h' - 'src/runtime/tracer.cpp' + - 'src/runtime/tracer.h' - 'src/runtime/zlocks.cpp' + - 'src/task/CMakeLists.txt' + - 'src/task/async_calls.h' + - 'src/task/future_types.h' + - 'src/task/hpc_task_queue.cpp' + - 'src/task/hpc_task_queue.h' + - 'src/task/simple_task_queue.cpp' + - 'src/task/simple_task_queue.h' + - 'src/task/task.cpp' + - 'src/task/task.h' + - 'src/task/task_code.cpp' + - 'src/task/task_code.h' + - 'src/task/task_engine.cpp' + - 'src/task/task_engine.h' + - 'src/task/task_engine.sim.cpp' + - 'src/task/task_engine.sim.h' + - 'src/task/task_queue.cpp' + - 'src/task/task_queue.h' + - 'src/task/task_spec.cpp' + - 'src/task/task_spec.h' + - 'src/task/task_tracker.cpp' + - 'src/task/task_tracker.h' + - 'src/task/task_worker.cpp' + - 'src/task/task_worker.h' + - 'src/task/tests/async_call_test.cpp' + - 'src/task/tests/lpc_test.cpp' + - 'src/task/tests/task_engine_test.cpp' + - 'src/task/timer_service.h' - 'src/tools/CMakeLists.txt' - 'src/tools/mutation_log_tool.cpp' + - 'src/tools/mutation_log_tool.h' - 'src/utils/CMakeLists.txt' + - 'src/utils/api_utilities.h' + - 'src/utils/autoref_ptr.h' - 'src/utils/binary_reader.cpp' + - 'src/utils/binary_reader.h' - 'src/utils/binary_writer.cpp' + - 'src/utils/binary_writer.h' + - 'src/utils/blob.h' + - 'src/utils/chrono_literals.h' - 'src/utils/command_manager.cpp' + - 'src/utils/command_manager.h' - 'src/utils/config_api.cpp' + - 'src/utils/config_api.h' + - 'src/utils/config_helper.h' - 'src/utils/configuration.cpp' + - 'src/utils/configuration.h' - 'src/utils/coredump.h' - 'src/utils/coredump.posix.cpp' - 'src/utils/crc.cpp' + - 'src/utils/crc.h' + - 'src/utils/customizable_id.h' + - 'src/utils/distributed_lock_service.h' + - 'src/utils/enum_helper.h' - 'src/utils/error_code.cpp' + - 'src/utils/error_code.h' + - 'src/utils/errors.h' + - 'src/utils/exp_delay.h' + - 'src/utils/extensible_object.h' + - 'src/utils/factory_store.h' - 'src/utils/filesystem.cpp' + - 'src/utils/filesystem.h' + - 'src/utils/fixed_size_buffer_pool.h' + - 'src/utils/function_traits.h' - 'src/utils/gpid.cpp' + - 'src/utils/join_point.h' + - 'src/utils/link.h' - 'src/utils/lockp.std.h' - 'src/utils/logging.cpp' + - 'src/utils/logging_provider.h' + - 'src/utils/optional.h' + - 'src/utils/ports.h' + - 'src/utils/priority_queue.h' - 'src/utils/shared_io_service.h' - 'src/utils/simple_logger.cpp' - 'src/utils/simple_logger.h' + - 'src/utils/singleton_store.h' - 'src/utils/strings.cpp' + - 'src/utils/strings.h' + - 'src/utils/synchronize.h' - 'src/utils/test/CMakeLists.txt' - 'src/utils/test/clear.sh' - 'src/utils/test/config-bad-section.ini' @@ -645,8 +642,16 @@ header: - 'src/utils/test/time_utils_test.cpp' - 'src/utils/test/utils.cpp' - 'src/utils/thread_access_checker.cpp' + - 'src/utils/thread_access_checker.h' + - 'src/utils/threadpool_code.h' + - 'src/utils/threadpool_spec.h' + - 'src/utils/time_utils.h' + - 'src/utils/uniq_timestamp_us.h' - 'src/utils/utils.cpp' + - 'src/utils/utils.h' + - 'src/utils/work_queue.h' - 'src/utils/zlock_provider.h' + - 'src/utils/zlocks.h' - 'src/zookeeper/CMakeLists.txt' - 'src/zookeeper/distributed_lock_service_zookeeper.cpp' - 'src/zookeeper/distributed_lock_service_zookeeper.h' @@ -665,5 +670,13 @@ header: - 'src/zookeeper/zookeeper_session.h' - 'src/zookeeper/zookeeper_session_mgr.cpp' - 'src/zookeeper/zookeeper_session_mgr.h' + # Apache License, Version 2.0, Copyright 2018 Google LLC + - 'src/gutil/test/map_traits_test.cpp' + - 'src/gutil/test/map_util_test.h' + - 'src/gutil/test/map_util_unittest.cpp' + - 'src/gutil/test/no_destructor_test.cpp' + - 'src/gutil/map_traits.h' + - 'src/gutil/map_util.h' + - 'src/gutil/no_destructor.h' comment: on-failure diff --git a/CMakeLists.txt b/CMakeLists.txt index 5cc4c70044..0ee9d60405 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,8 +35,12 @@ include(BaseFunctions) set(CMAKE_EXPORT_COMPILE_COMMANDS TRUE) set(PROJECT_ROOT ${CMAKE_CURRENT_LIST_DIR}) -set(THIRDPARTY_ROOT ${PROJECT_ROOT}/thirdparty) -set(THIRDPARTY_INSTALL_DIR ${PROJECT_ROOT}/thirdparty/output) +if ("$ENV{THIRDPARTY_ROOT}" STREQUAL "") + set(THIRDPARTY_ROOT ${PROJECT_ROOT}/thirdparty) +else() + set(THIRDPARTY_ROOT $ENV{THIRDPARTY_ROOT}) +endif() +set(THIRDPARTY_INSTALL_DIR ${THIRDPARTY_ROOT}/output) message(STATUS "THIRDPARTY_INSTALL_DIR = ${THIRDPARTY_INSTALL_DIR}") set(BUILD_DIR ${PROJECT_ROOT}/src/builder) diff --git a/LICENSE b/LICENSE index c89dd134d9..eb22d54de3 100644 --- a/LICENSE +++ b/LICENSE @@ -514,7 +514,7 @@ RESULTING FROM THE USE OF THIS SOFTWARE. -------------------------------------------------------------------------------- -scripts/run-clang-format.py - MIT License +build_tools/run-clang-format.py - MIT License MIT License @@ -570,3 +570,26 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- + +src/gutil/test/map_traits_test.cpp +src/gutil/test/map_util_test.h +src/gutil/test/map_util_unittest.cpp +src/gutil/test/no_destructor_test.cpp +src/gutil/map_traits.h +src/gutil/map_util.h +src/gutil/no_destructor.h + +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/admin-cli/go.mod b/admin-cli/go.mod index cd3f25b726..bb60429db3 100644 --- a/admin-cli/go.mod +++ b/admin-cli/go.mod @@ -32,7 +32,7 @@ require ( github.com/stretchr/testify v1.6.1 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3 + k8s.io/apimachinery v0.16.13 ) require ( @@ -66,9 +66,9 @@ require ( github.com/tidwall/gjson v1.14.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect gopkg.in/ini.v1 v1.62.0 // indirect gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect gopkg.in/yaml.v3 v3.0.0 // indirect diff --git a/admin-cli/go.sum b/admin-cli/go.sum index 78480e874c..d873287c8b 100644 --- a/admin-cli/go.sum +++ b/admin-cli/go.sum @@ -363,9 +363,11 @@ github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6 github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= @@ -580,8 +582,8 @@ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -613,6 +615,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -630,16 +633,16 @@ golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -738,6 +741,7 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -751,12 +755,14 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3 h1:FErmbNIJruD5GT2oVEjtPn5Ar5+rcWJsC8/PPUkR0s4= k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/apimachinery v0.16.13 h1:E40YK/NhqhUubG44ZHQULa4Pn+8NnXMAE6awvQ97Pyg= +k8s.io/apimachinery v0.16.13/go.mod h1:4HMHS3mDHtVttspuuhrJ1GGr/0S9B6iWYWZ57KnnZqQ= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/kube-openapi v0.0.0-20200410163147-594e756bea31/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/scripts/config_hdfs.sh b/admin_tools/config_hdfs.sh similarity index 100% rename from scripts/config_hdfs.sh rename to admin_tools/config_hdfs.sh diff --git a/scripts/downgrade_node.sh b/admin_tools/downgrade_node.sh similarity index 98% rename from scripts/downgrade_node.sh rename to admin_tools/downgrade_node.sh index 3a405876cd..5d8c59c541 100755 --- a/scripts/downgrade_node.sh +++ b/admin_tools/downgrade_node.sh @@ -16,8 +16,6 @@ # specific language governing permissions and limitations # under the License. -set -e - PID=$$ function usage() @@ -63,7 +61,7 @@ echo "UID=$UID" echo "PID=$PID" echo -if [ [ "$cluster" != "" ]; then +if [ "$cluster" != "" ]; then echo "set_meta_level steady" | ./run.sh shell --cluster $cluster &>/tmp/$UID.$PID.pegasus.set_meta_level echo ls | ./run.sh shell --cluster $cluster &>/tmp/$UID.$PID.pegasus.ls else diff --git a/scripts/learn_stat.py b/admin_tools/learn_stat.py similarity index 100% rename from scripts/learn_stat.py rename to admin_tools/learn_stat.py diff --git a/scripts/migrate_node.sh b/admin_tools/migrate_node.sh similarity index 100% rename from scripts/migrate_node.sh rename to admin_tools/migrate_node.sh diff --git a/scripts/minos_common.sh b/admin_tools/minos_common.sh similarity index 100% rename from scripts/minos_common.sh rename to admin_tools/minos_common.sh diff --git a/scripts/pegasus_add_node_list.sh b/admin_tools/pegasus_add_node_list.sh similarity index 92% rename from scripts/pegasus_add_node_list.sh rename to admin_tools/pegasus_add_node_list.sh index ed40a8b6d2..1665d5a6df 100755 --- a/scripts/pegasus_add_node_list.sh +++ b/admin_tools/pegasus_add_node_list.sh @@ -52,7 +52,7 @@ shell_dir="$( cd $pwd/.. && pwd )" cd $shell_dir echo "Check the argument..." -source ./scripts/pegasus_check_arguments.sh add_node_list $cluster $meta_list $replica_task_id_list +source ./admin_tools/pegasus_check_arguments.sh add_node_list $cluster $meta_list $replica_task_id_list if [ $? -ne 0 ]; then echo "ERROR: the argument check failed" @@ -78,7 +78,7 @@ do echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" done -./scripts/pegasus_rebalance_cluster.sh $cluster $meta_list true $nfs_rate_megabytes_per_disk +./admin_tools/pegasus_rebalance_cluster.sh $cluster $meta_list true $nfs_rate_megabytes_per_disk echo "Finish time: `date`" add_node_finish_time=$((`date +%s`)) diff --git a/scripts/pegasus_bench_run.sh b/admin_tools/pegasus_bench_run.sh similarity index 100% rename from scripts/pegasus_bench_run.sh rename to admin_tools/pegasus_bench_run.sh diff --git a/scripts/pegasus_check_arguments.sh b/admin_tools/pegasus_check_arguments.sh similarity index 98% rename from scripts/pegasus_check_arguments.sh rename to admin_tools/pegasus_check_arguments.sh index 4b0231d87c..9574051c58 100755 --- a/scripts/pegasus_check_arguments.sh +++ b/admin_tools/pegasus_check_arguments.sh @@ -41,7 +41,7 @@ if [ "$check_type" != "add_node_list" -a "$check_type" != "offline_node_list" ]; exit 1 fi -source ./scripts/minos_common.sh +source ./admin_tools/minos_common.sh find_cluster $cluster if [ $? -ne 0 ]; then echo "ERROR: cluster \"$cluster\" not found" diff --git a/scripts/pegasus_kill_test.sh b/admin_tools/pegasus_kill_test.sh similarity index 100% rename from scripts/pegasus_kill_test.sh rename to admin_tools/pegasus_kill_test.sh diff --git a/scripts/pegasus_manual_compact.sh b/admin_tools/pegasus_manual_compact.sh similarity index 100% rename from scripts/pegasus_manual_compact.sh rename to admin_tools/pegasus_manual_compact.sh diff --git a/scripts/pegasus_migrate_zookeeper.sh b/admin_tools/pegasus_migrate_zookeeper.sh similarity index 99% rename from scripts/pegasus_migrate_zookeeper.sh rename to admin_tools/pegasus_migrate_zookeeper.sh index 52e7c0952f..d4b78f5e24 100755 --- a/scripts/pegasus_migrate_zookeeper.sh +++ b/admin_tools/pegasus_migrate_zookeeper.sh @@ -39,7 +39,7 @@ pwd="$( cd "$( dirname "$0" )" && pwd )" shell_dir="$( cd $pwd/.. && pwd )" cd $shell_dir -source ./scripts/minos_common.sh +source ./admin_tools/minos_common.sh find_cluster $cluster if [ $? -ne 0 ]; then echo "ERROR: cluster \"$cluster\" not found" diff --git a/scripts/pegasus_offline_node.sh b/admin_tools/pegasus_offline_node.sh similarity index 99% rename from scripts/pegasus_offline_node.sh rename to admin_tools/pegasus_offline_node.sh index d16f368f1a..fee4107af4 100755 --- a/scripts/pegasus_offline_node.sh +++ b/admin_tools/pegasus_offline_node.sh @@ -39,7 +39,7 @@ pwd="$( cd "$( dirname "$0" )" && pwd )" shell_dir="$( cd $pwd/.. && pwd )" cd $shell_dir -source ./scripts/minos_common.sh +source ./admin_tools/minos_common.sh find_cluster $cluster if [ $? -ne 0 ]; then echo "ERROR: cluster \"$cluster\" not found" diff --git a/scripts/pegasus_offline_node_list.sh b/admin_tools/pegasus_offline_node_list.sh similarity index 96% rename from scripts/pegasus_offline_node_list.sh rename to admin_tools/pegasus_offline_node_list.sh index 7a2483f491..4da739c698 100755 --- a/scripts/pegasus_offline_node_list.sh +++ b/admin_tools/pegasus_offline_node_list.sh @@ -52,7 +52,7 @@ shell_dir="$( cd $pwd/.. && pwd )" cd $shell_dir echo "Check the argument..." -source ./scripts/pegasus_check_arguments.sh offline_node_list $cluster $meta_list $replica_task_id_list +source ./admin_tools/pegasus_check_arguments.sh offline_node_list $cluster $meta_list $replica_task_id_list if [ $? -ne 0 ]; then echo "ERROR: the argument check failed" @@ -93,7 +93,7 @@ echo for id in $id_list do echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" - ./scripts/pegasus_offline_node.sh $cluster $meta_list $id + ./admin_tools/pegasus_offline_node.sh $cluster $meta_list $id if [ $? -ne 0 ]; then echo "ERROR: offline replica task $id failed" exit 1 diff --git a/scripts/pegasus_rebalance_cluster.sh b/admin_tools/pegasus_rebalance_cluster.sh similarity index 100% rename from scripts/pegasus_rebalance_cluster.sh rename to admin_tools/pegasus_rebalance_cluster.sh diff --git a/scripts/pegasus_replica_thread.sh b/admin_tools/pegasus_replica_thread.sh similarity index 100% rename from scripts/pegasus_replica_thread.sh rename to admin_tools/pegasus_replica_thread.sh diff --git a/scripts/pegasus_restart_node.sh b/admin_tools/pegasus_restart_node.sh similarity index 99% rename from scripts/pegasus_restart_node.sh rename to admin_tools/pegasus_restart_node.sh index b65e6ec18a..27d3588c1c 100755 --- a/scripts/pegasus_restart_node.sh +++ b/admin_tools/pegasus_restart_node.sh @@ -39,7 +39,7 @@ pwd="$( cd "$( dirname "$0" )" && pwd )" shell_dir="$( cd $pwd/.. && pwd )" cd $shell_dir -source ./scripts/minos_common.sh +source ./admin_tools/minos_common.sh find_cluster $cluster if [ $? -ne 0 ]; then echo "ERROR: cluster \"$cluster\" not found" diff --git a/scripts/pegasus_rolling_update.sh b/admin_tools/pegasus_rolling_update.sh similarity index 98% rename from scripts/pegasus_rolling_update.sh rename to admin_tools/pegasus_rolling_update.sh index 9e207fc303..c805426db4 100755 --- a/scripts/pegasus_rolling_update.sh +++ b/admin_tools/pegasus_rolling_update.sh @@ -72,7 +72,7 @@ pwd="$( cd "$( dirname "$0" )" && pwd )" shell_dir="$( cd $pwd/.. && pwd )" cd $shell_dir -source ./scripts/minos_common.sh +source ./admin_tools/minos_common.sh find_cluster $cluster if [ $? -ne 0 ]; then echo "ERROR: cluster \"$cluster\" not found" @@ -338,7 +338,7 @@ fi if [ "$rebalance_cluster_after_rolling" == "true" ]; then echo "Start to rebalance cluster..." - ./scripts/pegasus_rebalance_cluster.sh $cluster $meta_list $rebalance_only_move_primary + ./admin_tools/pegasus_rebalance_cluster.sh $cluster $meta_list $rebalance_only_move_primary fi echo "Finish time: `date`" diff --git a/scripts/pegasus_set_usage_scenario.sh b/admin_tools/pegasus_set_usage_scenario.sh similarity index 100% rename from scripts/pegasus_set_usage_scenario.sh rename to admin_tools/pegasus_set_usage_scenario.sh diff --git a/scripts/pegasus_stat_available.sh b/admin_tools/pegasus_stat_available.sh similarity index 100% rename from scripts/pegasus_stat_available.sh rename to admin_tools/pegasus_stat_available.sh diff --git a/scripts/pegasus_update_ingest_behind.sh b/admin_tools/pegasus_update_ingest_behind.sh similarity index 99% rename from scripts/pegasus_update_ingest_behind.sh rename to admin_tools/pegasus_update_ingest_behind.sh index 8932f667e8..1000a058a5 100755 --- a/scripts/pegasus_update_ingest_behind.sh +++ b/admin_tools/pegasus_update_ingest_behind.sh @@ -327,7 +327,7 @@ echo if [ "$rebalance_cluster_after_rolling" == "true" ]; then echo "Start to rebalance cluster..." - ./scripts/pegasus_rebalance_cluster.sh $cluster $meta_list $rebalance_only_move_primary + ./admin_tools/pegasus_rebalance_cluster.sh $cluster $meta_list $rebalance_only_move_primary fi echo "Finish time: `date`" diff --git a/scripts/sendmail.sh b/admin_tools/sendmail.sh similarity index 100% rename from scripts/sendmail.sh rename to admin_tools/sendmail.sh diff --git a/scripts/bump_version.sh b/build_tools/bump_version.sh similarity index 100% rename from scripts/bump_version.sh rename to build_tools/bump_version.sh diff --git a/scripts/check_license.py b/build_tools/check_license.py similarity index 100% rename from scripts/check_license.py rename to build_tools/check_license.py diff --git a/build_tools/clang_tidy.py b/build_tools/clang_tidy.py new file mode 100755 index 0000000000..b2ef72d9bb --- /dev/null +++ b/build_tools/clang_tidy.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python3 +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Most of the code are inspired by https://github.com/apache/kudu/blob/856fa3404b00ee02bd3bc1d77d414ede2b2cd02e/build-support/clang_tidy_gerrit.py + +import argparse +import collections +import json +import multiprocessing +from multiprocessing.pool import ThreadPool +import os +import re +import subprocess +import sys +import tempfile + +ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) + +BUILD_PATH = os.path.join(ROOT, "build", "latest") + +def run_tidy(sha="HEAD", is_rev_range=False): + diff_cmdline = ["git", "diff" if is_rev_range else "show", sha] + + # Figure out which paths changed in the given diff. + changed_paths = subprocess.check_output(diff_cmdline + ["--name-only", "--pretty=format:"]).splitlines() + changed_paths = [p for p in changed_paths if p] + + # Produce a separate diff for each file and run clang-tidy-diff on it + # in parallel. + # + # Note: this will incorporate any configuration from .clang-tidy. + def tidy_on_path(path): + patch_file = tempfile.NamedTemporaryFile() + cmd = diff_cmdline + [ + "--src-prefix=%s/" % ROOT, + "--dst-prefix=%s/" % ROOT, + "--", + path] + subprocess.check_call(cmd, stdout=patch_file, cwd=ROOT) + # TODO(yingchun): some checks could be disabled before we fix them. + # "-checks=-llvm-include-order,-modernize-concat-nested-namespaces,-cppcoreguidelines-macro-usage,-cppcoreguidelines-special-member-functions,-hicpp-special-member-functions,-bugprone-easily-swappable-parameters,-google-readability-avoid-underscore-in-googletest-name,-cppcoreguidelines-avoid-c-arrays,-hicpp-avoid-c-arrays,-modernize-avoid-c-arrays,-llvm-header-guard,-cppcoreguidelines-pro-bounds-pointer-arithmetic", + cmdline = ["clang-tidy-diff", + "-clang-tidy-binary", + "clang-tidy", + "-p0", + "-path", BUILD_PATH, + # Disable some checks that are not useful for us now. + # They are sorted by names, and should be consistent to .clang-tidy. + "-checks=-bugprone-easily-swappable-parameters," + "-bugprone-lambda-function-name," + "-bugprone-macro-parentheses," + "-cert-err58-cpp," + "-concurrency-mt-unsafe," + "-cppcoreguidelines-avoid-c-arrays," + "-cppcoreguidelines-avoid-magic-numbers," + "-cppcoreguidelines-avoid-non-const-global-variables," + "-cppcoreguidelines-macro-usage," + "-cppcoreguidelines-non-private-member-variables-in-classes," + "-cppcoreguidelines-owning-memory," + "-cppcoreguidelines-pro-bounds-array-to-pointer-decay," + "-cppcoreguidelines-pro-bounds-pointer-arithmetic," + "-cppcoreguidelines-pro-type-const-cast," + "-cppcoreguidelines-pro-type-union-access," + "-fuchsia-default-arguments-calls," + "-fuchsia-overloaded-operator," + "-fuchsia-statically-constructed-objects," + "-google-readability-avoid-underscore-in-googletest-name," + "-hicpp-avoid-c-arrays," + "-hicpp-named-parameter," + "-hicpp-no-array-decay," + "-llvm-include-order," + "-misc-definitions-in-headers," + "-misc-non-private-member-variables-in-classes," + "-modernize-avoid-c-arrays," + "-modernize-replace-disallow-copy-and-assign-macro," + "-modernize-use-trailing-return-type," + "-readability-function-cognitive-complexity," + "-readability-identifier-length," + "-readability-magic-numbers," + "-readability-named-parameter", + "-extra-arg=-language=c++", + "-extra-arg=-std=c++17", + "-extra-arg=-Ithirdparty/output/include"] + return subprocess.check_output( + cmdline, + stdin=open(patch_file.name), + cwd=ROOT).decode() + pool = ThreadPool(multiprocessing.cpu_count()) + try: + return "".join(pool.imap(tidy_on_path, changed_paths)) + except KeyboardInterrupt as ki: + sys.exit(1) + finally: + pool.terminate() + pool.join() + + +if __name__ == "__main__": + # Basic setup and argument parsing. + parser = argparse.ArgumentParser(description="Run clang-tidy on a patch") + parser.add_argument("--rev-range", action="store_true", + default=False, + help="Whether the revision specifies the 'rev..' range") + parser.add_argument('rev', help="The git revision (or range of revisions) to process") + args = parser.parse_args() + + # Run clang-tidy and parse the output. + clang_output = run_tidy(args.rev, args.rev_range) + parsed = re.match(r'.+(warning|error): .+', clang_output, re.MULTILINE | re.DOTALL) + print(clang_output, file=sys.stderr) + if not parsed: + print("No warnings", file=sys.stderr) + sys.exit(0) + sys.exit(1) + diff --git a/scripts/clear_zk.sh b/build_tools/clear_zk.sh similarity index 100% rename from scripts/clear_zk.sh rename to build_tools/clear_zk.sh diff --git a/scripts/collector_table_counter_gen.py b/build_tools/collector_table_counter_gen.py similarity index 98% rename from scripts/collector_table_counter_gen.py rename to build_tools/collector_table_counter_gen.py index 03a3f9f297..0cb96b66c1 100755 --- a/scripts/collector_table_counter_gen.py +++ b/build_tools/collector_table_counter_gen.py @@ -118,7 +118,7 @@ def generate_code_in_command_helper_header(replica_counter): # python3 ./collector_table_counter_gen.py counter1,counter2 -# please use `./scripts/format_files.sh` to format after generate code +# please use `./build_tools/format_files.sh` to format after generate code if __name__ == '__main__': if len(sys.argv) != 2: print("python3 ./collector_table_counter_gen.py {counter1,counter2..}") diff --git a/scripts/compile_thrift.py b/build_tools/compile_thrift.py similarity index 96% rename from scripts/compile_thrift.py rename to build_tools/compile_thrift.py index 376a3c50c0..db7a1c7618 100755 --- a/scripts/compile_thrift.py +++ b/build_tools/compile_thrift.py @@ -39,7 +39,7 @@ "path": "idl", "include_fix": { "_types.h": { - "add": ["\"runtime/rpc/serialization.h\""], + "add": ["\"rpc/serialization.h\""], "remove": ["dsn_types.h"] }, "_types.cpp": { @@ -132,7 +132,7 @@ def compile_thrift_file(thrift_info): print(cmd) # TODO(wutao1): code format files - # os.system("clang-format-3.9 -i output/*") + # os.system("clang-format-14 -i output/*") if "include_fix" in thrift_info: fix_include(thrift_name, thrift_info["include_fix"]) @@ -220,13 +220,13 @@ def add_hook(name, path, func, args): if __name__ == "__main__": root_dir = os.getcwd() - thrift_exe = root_dir + "/thirdparty/output/bin/thrift" + thrift_exe = os.environ['THIRDPARTY_ROOT'] + "/output/bin/thrift" print("thrift_exe = " + thrift_exe) print("root_dir = " + root_dir) if not os.path.isfile(thrift_exe): - print("Error: can't find compiler %s\nPlease build thrift in thirdparty/" % thrift_exe) - sys.exit() + print("Error: can't find compiler %s\nPlease build thrift in %s/" % (thrift_exe, os.environ['THIRDPARTY_ROOT'])) + sys.exit(1) ctor_kv_pair = " kv_pair(const std::string& _key, const std::string& _val): key(_key), value(_val) {\n }" ctor_configuration_proposal_action = " configuration_proposal_action(::dsn::rpc_address t, ::dsn::rpc_address n, config_type::type tp): target(t), node(n), type(tp) {}" diff --git a/scripts/download_hadoop.sh b/build_tools/download_hadoop.sh similarity index 100% rename from scripts/download_hadoop.sh rename to build_tools/download_hadoop.sh diff --git a/scripts/download_package.sh b/build_tools/download_package.sh similarity index 100% rename from scripts/download_package.sh rename to build_tools/download_package.sh diff --git a/scripts/download_zk.sh b/build_tools/download_zk.sh similarity index 100% rename from scripts/download_zk.sh rename to build_tools/download_zk.sh diff --git a/scripts/format_files.sh b/build_tools/format_files.sh similarity index 96% rename from scripts/format_files.sh rename to build_tools/format_files.sh index 5697d720b2..62e34ebdcd 100755 --- a/scripts/format_files.sh +++ b/build_tools/format_files.sh @@ -29,13 +29,13 @@ thirdparty=./thirdparty if [ $# -eq 0 ]; then echo "formating all .h/.cpp files in $root_dir ..." find . -type f -not \( -wholename "$linenoise/*" -o -wholename "$sds/*" -o -wholename "$thirdparty/*" \) \ - -regextype posix-egrep -regex ".*\.(cpp|h)" | xargs clang-format-3.9 -i -style=file + -regextype posix-egrep -regex ".*\.(cpp|h)" | xargs clang-format-14 -i -style=file elif [ $1 = "-h" ]; then echo "USAGE: ./format-files.sh [] -- format .h/.cpp files in $root_dir/relative_path" echo " ./format-files.sh means format all .h/.cpp files in $root_dir" else echo "formating all .h/.cpp files in $root_dir/$1 ..." find ./$1 -type f -not \( -wholename "$linenoise/*" -o -wholename "$sds/*" -o -wholename "$thirdparty/*" \) \ - -regextype posix-egrep -regex ".*\.(cpp|h)" | xargs clang-format-3.9 -i -style=file + -regextype posix-egrep -regex ".*\.(cpp|h)" | xargs clang-format-14 -i -style=file fi diff --git a/scripts/pack_client.sh b/build_tools/pack_client.sh similarity index 93% rename from scripts/pack_client.sh rename to build_tools/pack_client.sh index e15212cb9a..c28bb7dfd3 100755 --- a/scripts/pack_client.sh +++ b/build_tools/pack_client.sh @@ -39,12 +39,6 @@ then exit 1 fi -if [ ! -f ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server ] -then - echo "ERROR: ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server not found" - exit 1 -fi - if [ ! -f ${BUILD_LATEST_DIR}/CMakeCache.txt ] then echo "ERROR: ${BUILD_LATEST_DIR}/CMakeCache.txt not found" @@ -109,7 +103,7 @@ mkdir -p ${pack}/lib copy_file ${BUILD_LATEST_DIR}/output/lib/libpegasus_client_static.a ${pack}/lib # TODO(yingchun): make sure shared lib works well too # copy_file ${BUILD_LATEST_DIR}/output/lib/libpegasus_client_shared.so ${pack}/lib -copy_file ./thirdparty/output/lib/libboost*.so.1.69.0 ${pack}/lib +copy_file ${THIRDPARTY_ROOT}/output/lib/libboost*.so.1.69.0 ${pack}/lib ln -sf `ls ${pack}/lib | grep libboost_system` ${pack}/lib/libboost_system.so ln -sf `ls ${pack}/lib | grep libboost_filesystem` ${pack}/lib/libboost_filesystem.so ln -sf `ls ${pack}/lib | grep libboost_regex` ${pack}/lib/libboost_regex.so diff --git a/scripts/pack_common.sh b/build_tools/pack_common.sh similarity index 93% rename from scripts/pack_common.sh rename to build_tools/pack_common.sh index d22555d330..d1080f48dc 100755 --- a/scripts/pack_common.sh +++ b/build_tools/pack_common.sh @@ -20,7 +20,11 @@ set -e function get_stdcpp_lib() { - libname=`ldd ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server 2>/dev/null | grep libstdc++` + if [[ $2 == "false" ]]; then + libname=`ldd ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server 2>/dev/null | grep libstdc++` + else + libname=`ldd ${BUILD_LATEST_DIR}/output/bin/pegasus_meta_server/pegasus_meta_server 2>/dev/null | grep libstdc++` + fi libname=`echo $libname | cut -f1 -d" "` if [ $1 = "true" ]; then gcc_path=`which gcc` diff --git a/scripts/pack_server.sh b/build_tools/pack_server.sh similarity index 78% rename from scripts/pack_server.sh rename to build_tools/pack_server.sh index 2ff6e8446d..1c27ac32fb 100755 --- a/scripts/pack_server.sh +++ b/build_tools/pack_server.sh @@ -27,6 +27,7 @@ function usage() { echo " -g|--custom-gcc" echo " -k|--keytab-file" echo " -j|--use-jemalloc" + echo " -s|--separate_servers" exit 0 } @@ -39,11 +40,6 @@ if [ ! -f src/include/pegasus/git_commit.h ]; then exit 1 fi -if [ ! -f ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server ]; then - echo "ERROR: ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server not found" - exit 1 -fi - if [ ! -f ${BUILD_LATEST_DIR}/CMakeCache.txt ]; then echo "ERROR: ${BUILD_LATEST_DIR}/CMakeCache.txt not found" exit 1 @@ -77,7 +73,8 @@ fi custom_gcc="false" keytab_file="" -use_jemalloc="off" +use_jemalloc="false" +separate_servers="false" while [[ $# > 0 ]]; do option_key="$1" @@ -97,7 +94,10 @@ while [[ $# > 0 ]]; do shift ;; -j | --use-jemalloc) - use_jemalloc="on" + use_jemalloc="true" + ;; + -s | --separate_servers) + separate_servers="true" ;; *) echo "ERROR: unknown option \"$option_key\"" @@ -110,34 +110,43 @@ while [[ $# > 0 ]]; do done mkdir -p ${pack}/bin -copy_file ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server ${pack}/bin +if [[ $separate_servers == "false" ]]; then + copy_file ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server ${pack}/bin +else + copy_file ${BUILD_LATEST_DIR}/output/bin/pegasus_meta_server/pegasus_meta_server ${pack}/bin + copy_file ${BUILD_LATEST_DIR}/output/bin/pegasus_replica_server/pegasus_replica_server ${pack}/bin +fi copy_file ${BUILD_LATEST_DIR}/output/lib/libdsn_meta_server.so ${pack}/bin copy_file ${BUILD_LATEST_DIR}/output/lib/libdsn_replica_server.so ${pack}/bin copy_file ${BUILD_LATEST_DIR}/output/lib/libdsn_utils.so ${pack}/bin -if [ "$use_jemalloc" == "on" ]; then - copy_file ./thirdparty/output/lib/libjemalloc.so.2 ${pack}/bin - copy_file ./thirdparty/output/lib/libprofiler.so.0 ${pack}/bin +if [ "$use_jemalloc" == "true" ]; then + copy_file ${THIRDPARTY_ROOT}/output/lib/libjemalloc.so.2 ${pack}/bin + copy_file ${THIRDPARTY_ROOT}/output/lib/libprofiler.so.0 ${pack}/bin else - copy_file ./thirdparty/output/lib/libtcmalloc_and_profiler.so.4 ${pack}/bin + copy_file ${THIRDPARTY_ROOT}/output/lib/libtcmalloc_and_profiler.so.4 ${pack}/bin fi -copy_file ./thirdparty/output/lib/libboost*.so.1.69.0 ${pack}/bin -copy_file ./thirdparty/output/lib/libhdfs* ${pack}/bin -copy_file ./thirdparty/output/lib/librocksdb.so.8 ${pack}/bin -copy_file ./scripts/sendmail.sh ${pack}/bin +copy_file ${THIRDPARTY_ROOT}/output/lib/libboost*.so.1.69.0 ${pack}/bin +copy_file ${THIRDPARTY_ROOT}/output/lib/libhdfs* ${pack}/bin +copy_file ${THIRDPARTY_ROOT}/output/lib/librocksdb.so.8 ${pack}/bin +copy_file ./admin_tools/config_hdfs.sh ${pack}/bin +copy_file ./admin_tools/sendmail.sh ${pack}/bin copy_file ./src/server/config.ini ${pack}/bin copy_file ./src/server/config.min.ini ${pack}/bin -copy_file ./scripts/config_hdfs.sh ${pack}/bin -copy_file "$(get_stdcpp_lib $custom_gcc)" "${pack}/bin" +copy_file "$(get_stdcpp_lib $custom_gcc $separate_servers)" "${pack}/bin" pack_server_lib() { - pack_system_lib "${pack}/bin" server "$1" + if [[ $2 == "false" ]]; then + pack_system_lib "${pack}/bin" server "$1" + else + pack_system_lib "${pack}/bin" meta_server "$1" + fi } -pack_server_lib crypto -pack_server_lib ssl +pack_server_lib crypto $separate_servers +pack_server_lib ssl $separate_servers # Pack hadoop-related files. # If you want to use hdfs service to backup/restore/bulkload pegasus tables, diff --git a/scripts/pack_tools.sh b/build_tools/pack_tools.sh similarity index 74% rename from scripts/pack_tools.sh rename to build_tools/pack_tools.sh index ce386b3a49..24d78f2efe 100755 --- a/scripts/pack_tools.sh +++ b/build_tools/pack_tools.sh @@ -27,6 +27,7 @@ function usage() echo " -p|--update-package-template " echo " -g|--custom-gcc" echo " -j|--use-jemalloc" + echo " -s|--separate_servers" exit 0 } @@ -82,7 +83,8 @@ if [ -n "$MINOS_CONFIG_FILE" ]; then fi custom_gcc="false" -use_jemalloc="off" +use_jemalloc="false" +separate_servers="false" while [[ $# > 0 ]]; do option_key="$1" @@ -98,7 +100,10 @@ while [[ $# > 0 ]]; do usage ;; -j|--use-jemalloc) - use_jemalloc="on" + use_jemalloc="true" + ;; + -s | --separate_servers) + separate_servers="true" ;; *) echo "ERROR: unknown option \"$option_key\"" @@ -114,7 +119,12 @@ mkdir -p ${pack} copy_file ./run.sh ${pack}/ mkdir -p ${pack}/bin -cp -v -r ${BUILD_LATEST_DIR}/output/bin/pegasus_server ${pack}/bin/ +if [[ $separate_servers == "false" ]]; then + copy_file ${BUILD_LATEST_DIR}/output/bin/pegasus_server/pegasus_server ${pack}/bin +else + copy_file ${BUILD_LATEST_DIR}/output/bin/pegasus_meta_server/pegasus_meta_server ${pack}/bin + copy_file ${BUILD_LATEST_DIR}/output/bin/pegasus_replica_server/pegasus_replica_server ${pack}/bin +fi cp -v -r ${BUILD_LATEST_DIR}/output/bin/pegasus_shell ${pack}/bin/ cp -v -r ${BUILD_LATEST_DIR}/output/bin/pegasus_bench ${pack}/bin/ cp -v -r ${BUILD_LATEST_DIR}/output/bin/pegasus_kill_test ${pack}/bin/ @@ -124,30 +134,35 @@ cp -v -r ${BUILD_LATEST_DIR}/output/bin/pegasus_pressureclient ${pack}/bin/ mkdir -p ${pack}/lib copy_file ${BUILD_LATEST_DIR}/output/lib/*.so* ${pack}/lib/ -if [ "$use_jemalloc" == "on" ]; then - copy_file ./thirdparty/output/lib/libjemalloc.so.2 ${pack}/lib/ - copy_file ./thirdparty/output/lib/libprofiler.so.0 ${pack}/lib/ +if [ "$use_jemalloc" == "true" ]; then + copy_file ${THIRDPARTY_ROOT}/output/lib/libjemalloc.so.2 ${pack}/lib/ + copy_file ${THIRDPARTY_ROOT}/output/lib/libprofiler.so.0 ${pack}/lib/ else - copy_file ./thirdparty/output/lib/libtcmalloc_and_profiler.so.4 ${pack}/lib/ + copy_file ${THIRDPARTY_ROOT}/output/lib/libtcmalloc_and_profiler.so.4 ${pack}/lib/ fi -copy_file ./thirdparty/output/lib/libboost*.so.1.69.0 ${pack}/lib/ -copy_file ./thirdparty/output/lib/libhdfs* ${pack}/lib/ -copy_file ./thirdparty/output/lib/librocksdb.so.8 ${pack}/lib/ -copy_file `get_stdcpp_lib $custom_gcc` ${pack}/lib/ +copy_file ${THIRDPARTY_ROOT}/output/lib/libboost*.so.1.69.0 ${pack}/lib/ +copy_file ${THIRDPARTY_ROOT}/output/lib/libhdfs* ${pack}/lib/ +copy_file ${THIRDPARTY_ROOT}/output/lib/librocksdb.so.8 ${pack}/lib/ +copy_file `get_stdcpp_lib $custom_gcc $separate_servers` ${pack}/lib/ pack_tools_lib() { pack_system_lib "${pack}/lib" shell "$1" } -pack_tools_lib crypto -pack_tools_lib ssl +pack_tools_lib crypto $separate_servers +pack_tools_lib ssl $separate_servers chmod -x ${pack}/lib/* -mkdir -p ${pack}/scripts -copy_file ./scripts/* ${pack}/scripts/ -chmod +x ${pack}/scripts/*.sh +mkdir -p ${pack}/admin_tools +copy_file ./admin_tools/* ${pack}/admin_tools/ +chmod +x ${pack}/admin_tools/*.sh + +mkdir -p ${pack}/build_tools +copy_file ./build_tools/download_*.sh ${pack}/build_tools/ +copy_file ./build_tools/*_zk.sh ${pack}/build_tools/ +chmod +x ${pack}/build_tools/*.sh mkdir -p ${pack}/src/server copy_file ./src/server/*.ini ${pack}/src/server/ diff --git a/scripts/recompile_thrift.sh b/build_tools/recompile_thrift.sh similarity index 81% rename from scripts/recompile_thrift.sh rename to build_tools/recompile_thrift.sh index 52e5397be2..9cec796c31 100755 --- a/scripts/recompile_thrift.sh +++ b/build_tools/recompile_thrift.sh @@ -17,7 +17,6 @@ # under the License. cd `dirname $0` -THIRDPARTY_ROOT=../thirdparty if [ ! -d "$THIRDPARTY_ROOT" ]; then echo "ERROR: THIRDPARTY_ROOT not set" @@ -30,7 +29,7 @@ rm -rf $TMP_DIR mkdir -p $TMP_DIR $THIRDPARTY_ROOT/output/bin/thrift --gen cpp:moveable_types -out $TMP_DIR ../idl/rrdb.thrift -sed 's/#include "dsn_types.h"/#include "runtime\/rpc\/rpc_address.h"\n#include "runtime\/rpc\/rpc_host_port.h"\n#include "runtime\/task\/task_code.h"\n#include "utils\/blob.h"/' $TMP_DIR/rrdb_types.h > ../src/include/rrdb/rrdb_types.h +sed 's/#include "dsn_types.h"/#include "rpc\/rpc_address.h"\n#include "rpc\/rpc_host_port.h"\n#include "task\/task_code.h"\n#include "utils\/blob.h"/' $TMP_DIR/rrdb_types.h > ../src/include/rrdb/rrdb_types.h sed 's/#include "rrdb_types.h"/#include /' $TMP_DIR/rrdb_types.cpp > ../src/base/rrdb_types.cpp rm -rf $TMP_DIR diff --git a/scripts/redis_proto_check.py b/build_tools/redis_proto_check.py similarity index 100% rename from scripts/redis_proto_check.py rename to build_tools/redis_proto_check.py diff --git a/scripts/run-clang-format.py b/build_tools/run-clang-format.py similarity index 99% rename from scripts/run-clang-format.py rename to build_tools/run-clang-format.py index 4c069a944c..ce2263ccdd 100755 --- a/scripts/run-clang-format.py +++ b/build_tools/run-clang-format.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # MIT License # diff --git a/scripts/start_zk.sh b/build_tools/start_zk.sh similarity index 100% rename from scripts/start_zk.sh rename to build_tools/start_zk.sh diff --git a/scripts/stop_zk.sh b/build_tools/stop_zk.sh similarity index 100% rename from scripts/stop_zk.sh rename to build_tools/stop_zk.sh diff --git a/cmake_modules/BaseFunctions.cmake b/cmake_modules/BaseFunctions.cmake index 33c819fbb5..9b784dc441 100644 --- a/cmake_modules/BaseFunctions.cmake +++ b/cmake_modules/BaseFunctions.cmake @@ -193,12 +193,20 @@ endfunction() function(dsn_setup_compiler_flags) if(CMAKE_BUILD_TYPE STREQUAL "Debug") add_definitions(-DDSN_BUILD_TYPE=Debug) - add_definitions(-g) else() - add_definitions(-g) add_definitions(-O2) add_definitions(-DDSN_BUILD_TYPE=Release) endif() + + if("$ENV{GITHUB_ACTION}" STREQUAL "" OR APPLE) + add_definitions(-g) + else() + # Reduce the target size when build on GitHub actions and non-macOS. + message(WARNING "Running GitHub actions, the target size will be reduced!") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Os -ffunction-sections -fdata-sections -fno-unwind-tables -fno-asynchronous-unwind-tables") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-s -Wl,--gc-sections") + endif() + cmake_host_system_information(RESULT BUILD_HOSTNAME QUERY HOSTNAME) add_definitions(-DDSN_BUILD_HOSTNAME=${BUILD_HOSTNAME}) diff --git a/collector/go.mod b/collector/go.mod index 2dea77fec9..c3b674e325 100644 --- a/collector/go.mod +++ b/collector/go.mod @@ -29,7 +29,7 @@ require ( github.com/tidwall/gjson v1.14.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 - k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3 + k8s.io/apimachinery v0.16.13 ) require ( @@ -89,9 +89,9 @@ require ( github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/yosssi/ace v0.0.5 // indirect - golang.org/x/crypto v0.17.0 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.15.0 // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/protobuf v1.33.0 // indirect diff --git a/collector/go.sum b/collector/go.sum index aa5059f9f8..4b684ae3d3 100644 --- a/collector/go.sum +++ b/collector/go.sum @@ -193,6 +193,7 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -262,8 +263,10 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= @@ -388,8 +391,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -431,8 +434,8 @@ golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191105084925-a882066a44e0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -460,6 +463,7 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -473,8 +477,8 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -568,6 +572,7 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -578,12 +583,14 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3 h1:FErmbNIJruD5GT2oVEjtPn5Ar5+rcWJsC8/PPUkR0s4= k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/apimachinery v0.16.13 h1:E40YK/NhqhUubG44ZHQULa4Pn+8NnXMAE6awvQ97Pyg= +k8s.io/apimachinery v0.16.13/go.mod h1:4HMHS3mDHtVttspuuhrJ1GGr/0S9B6iWYWZ57KnnZqQ= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/kube-openapi v0.0.0-20200410163147-594e756bea31/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= moul.io/http2curl/v2 v2.3.0 h1:9r3JfDzWPcbIklMOs2TnIFzDYvfAZvjeavG6EzP7jYs= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= diff --git a/docker/pegasus-build-env/centos7/Dockerfile b/docker/pegasus-build-env/centos7/Dockerfile index 790268202e..12db0544f8 100644 --- a/docker/pegasus-build-env/centos7/Dockerfile +++ b/docker/pegasus-build-env/centos7/Dockerfile @@ -19,10 +19,16 @@ FROM centos:7.5.1804 LABEL maintainer=wutao -RUN yum -y install centos-release-scl \ +RUN sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo && \ + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo && \ + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo && \ + yum -y install centos-release-scl \ scl-utils \ - epel-release; \ - yum -y install devtoolset-7-gcc \ + epel-release && \ + sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo && \ + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo && \ + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo && \ + yum -y install devtoolset-7-gcc \ devtoolset-7-gcc-c++ \ java-1.8.0-openjdk-devel.x86_64 \ python3 \ @@ -48,10 +54,10 @@ RUN yum -y install centos-release-scl \ flex \ krb5-devel \ cyrus-sasl-devel \ - patch; \ - yum -y install ca-certificates; \ - yum clean all; \ - rm -rf /var/cache/yum; + patch && \ + yum -y install ca-certificates && \ + yum clean all && \ + rm -rf /var/cache/yum; ENV PATH="/opt/rh/devtoolset-7/root/bin/:${PATH}" diff --git a/docker/thirdparties-bin/Dockerfile b/docker/thirdparties-bin/Dockerfile index 85137791bc..f080da3f03 100644 --- a/docker/thirdparties-bin/Dockerfile +++ b/docker/thirdparties-bin/Dockerfile @@ -35,8 +35,8 @@ RUN git clone --depth=1 --branch=${GITHUB_BRANCH} ${GITHUB_REPOSITORY_URL} \ && unzip /root/thirdparties-src.zip -d . \ && cmake -DCMAKE_BUILD_TYPE=Release -DROCKSDB_PORTABLE=${ROCKSDB_PORTABLE} -DUSE_JEMALLOC=${USE_JEMALLOC} -B build/ . \ && cmake --build build/ -j $(($(nproc)/2+1)) \ - && ../scripts/download_hadoop.sh ${HADOOP_BIN_PATH} \ - && ../scripts/download_zk.sh ${ZOOKEEPER_BIN_PATH} \ + && ../build_tools/download_hadoop.sh ${HADOOP_BIN_PATH} \ + && ../build_tools/download_zk.sh ${ZOOKEEPER_BIN_PATH} \ && zip -r ~/thirdparties-bin.zip output/ build/Source/rocksdb/cmake build/Source/http-parser build/Source/hadoop build/Download/zookeeper ${HADOOP_BIN_PATH} ${ZOOKEEPER_BIN_PATH} \ && cd ~ \ && rm -rf incubator-pegasus; diff --git a/go-client/admin/client_test.go b/go-client/admin/client_test.go index 6d8b12adec..ac15550770 100644 --- a/go-client/admin/client_test.go +++ b/go-client/admin/client_test.go @@ -175,7 +175,7 @@ func TestAdmin_ListNodes(t *testing.T) { for i, node := range nodes { // Each node should be alive. assert.Equal(t, admin.NodeStatus_NS_ALIVE, node.Status) - actualReplicaServerPorts[i] = node.Address.GetPort() + actualReplicaServerPorts[i] = node.GetNode().GetPort() } // Match elements without extra ordering. diff --git a/go-client/go.mod b/go-client/go.mod index 39902ac27b..e99bc4ed75 100644 --- a/go-client/go.mod +++ b/go-client/go.mod @@ -28,7 +28,7 @@ require ( github.com/stretchr/testify v1.4.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 - k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3 + k8s.io/apimachinery v0.16.13 ) require ( @@ -38,5 +38,5 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect golang.org/x/net v0.23.0 // indirect golang.org/x/sys v0.18.0 // indirect - gopkg.in/yaml.v2 v2.2.4 // indirect + gopkg.in/yaml.v2 v2.2.8 // indirect ) diff --git a/go-client/go.sum b/go-client/go.sum index 7f9cbe8484..8fa7e76074 100644 --- a/go-client/go.sum +++ b/go-client/go.sum @@ -30,7 +30,7 @@ github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5 github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -39,7 +39,7 @@ github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsC github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= @@ -59,9 +59,9 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -108,13 +108,13 @@ gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3 h1:FErmbNIJruD5GT2oVEjtPn5Ar5+rcWJsC8/PPUkR0s4= -k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +k8s.io/apimachinery v0.16.13 h1:E40YK/NhqhUubG44ZHQULa4Pn+8NnXMAE6awvQ97Pyg= +k8s.io/apimachinery v0.16.13/go.mod h1:4HMHS3mDHtVttspuuhrJ1GGr/0S9B6iWYWZ57KnnZqQ= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/kube-openapi v0.0.0-20200410163147-594e756bea31/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/go-client/idl/admin/GoUnusedProtection__.go b/go-client/idl/admin/GoUnusedProtection__.go new file mode 100644 index 0000000000..86e6c7e055 --- /dev/null +++ b/go-client/idl/admin/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +var GoUnusedProtection__ int diff --git a/go-client/idl/admin/backup-consts.go b/go-client/idl/admin/backup-consts.go new file mode 100644 index 0000000000..757b943ef3 --- /dev/null +++ b/go-client/idl/admin/backup-consts.go @@ -0,0 +1,27 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/admin/backup.go b/go-client/idl/admin/backup.go new file mode 100644 index 0000000000..268cb2a5ab --- /dev/null +++ b/go-client/idl/admin/backup.go @@ -0,0 +1,5299 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +// Attributes: +// - PolicyName +// - BackupProviderType +type PolicyInfo struct { + PolicyName string `thrift:"policy_name,1" db:"policy_name" json:"policy_name"` + BackupProviderType string `thrift:"backup_provider_type,2" db:"backup_provider_type" json:"backup_provider_type"` +} + +func NewPolicyInfo() *PolicyInfo { + return &PolicyInfo{} +} + +func (p *PolicyInfo) GetPolicyName() string { + return p.PolicyName +} + +func (p *PolicyInfo) GetBackupProviderType() string { + return p.BackupProviderType +} +func (p *PolicyInfo) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *PolicyInfo) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.PolicyName = v + } + return nil +} + +func (p *PolicyInfo) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.BackupProviderType = v + } + return nil +} + +func (p *PolicyInfo) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("policy_info"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *PolicyInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:policy_name: ", p), err) + } + if err := oprot.WriteString(string(p.PolicyName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.policy_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:policy_name: ", p), err) + } + return err +} + +func (p *PolicyInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_provider_type", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:backup_provider_type: ", p), err) + } + if err := oprot.WriteString(string(p.BackupProviderType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_provider_type (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:backup_provider_type: ", p), err) + } + return err +} + +func (p *PolicyInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("PolicyInfo(%+v)", *p) +} + +// Attributes: +// - ClusterName +// - PolicyName +// - TimeStamp +// - AppName +// - AppID +// - NewAppName_ +// - BackupProviderName +// - SkipBadPartition +// - RestorePath +type ConfigurationRestoreRequest struct { + ClusterName string `thrift:"cluster_name,1" db:"cluster_name" json:"cluster_name"` + PolicyName string `thrift:"policy_name,2" db:"policy_name" json:"policy_name"` + TimeStamp int64 `thrift:"time_stamp,3" db:"time_stamp" json:"time_stamp"` + AppName string `thrift:"app_name,4" db:"app_name" json:"app_name"` + AppID int32 `thrift:"app_id,5" db:"app_id" json:"app_id"` + NewAppName_ string `thrift:"new_app_name,6" db:"new_app_name" json:"new_app_name"` + BackupProviderName string `thrift:"backup_provider_name,7" db:"backup_provider_name" json:"backup_provider_name"` + SkipBadPartition bool `thrift:"skip_bad_partition,8" db:"skip_bad_partition" json:"skip_bad_partition"` + RestorePath *string `thrift:"restore_path,9" db:"restore_path" json:"restore_path,omitempty"` +} + +func NewConfigurationRestoreRequest() *ConfigurationRestoreRequest { + return &ConfigurationRestoreRequest{} +} + +func (p *ConfigurationRestoreRequest) GetClusterName() string { + return p.ClusterName +} + +func (p *ConfigurationRestoreRequest) GetPolicyName() string { + return p.PolicyName +} + +func (p *ConfigurationRestoreRequest) GetTimeStamp() int64 { + return p.TimeStamp +} + +func (p *ConfigurationRestoreRequest) GetAppName() string { + return p.AppName +} + +func (p *ConfigurationRestoreRequest) GetAppID() int32 { + return p.AppID +} + +func (p *ConfigurationRestoreRequest) GetNewAppName_() string { + return p.NewAppName_ +} + +func (p *ConfigurationRestoreRequest) GetBackupProviderName() string { + return p.BackupProviderName +} + +func (p *ConfigurationRestoreRequest) GetSkipBadPartition() bool { + return p.SkipBadPartition +} + +var ConfigurationRestoreRequest_RestorePath_DEFAULT string + +func (p *ConfigurationRestoreRequest) GetRestorePath() string { + if !p.IsSetRestorePath() { + return ConfigurationRestoreRequest_RestorePath_DEFAULT + } + return *p.RestorePath +} +func (p *ConfigurationRestoreRequest) IsSetRestorePath() bool { + return p.RestorePath != nil +} + +func (p *ConfigurationRestoreRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRING { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.STRING { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.ClusterName = v + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.PolicyName = v + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.TimeStamp = v + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.NewAppName_ = v + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.BackupProviderName = v + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.SkipBadPartition = v + } + return nil +} + +func (p *ConfigurationRestoreRequest) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.RestorePath = &v + } + return nil +} + +func (p *ConfigurationRestoreRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_restore_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationRestoreRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("cluster_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:cluster_name: ", p), err) + } + if err := oprot.WriteString(string(p.ClusterName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.cluster_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:cluster_name: ", p), err) + } + return err +} + +func (p *ConfigurationRestoreRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:policy_name: ", p), err) + } + if err := oprot.WriteString(string(p.PolicyName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.policy_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:policy_name: ", p), err) + } + return err +} + +func (p *ConfigurationRestoreRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("time_stamp", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:time_stamp: ", p), err) + } + if err := oprot.WriteI64(int64(p.TimeStamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.time_stamp (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:time_stamp: ", p), err) + } + return err +} + +func (p *ConfigurationRestoreRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:app_name: ", p), err) + } + return err +} + +func (p *ConfigurationRestoreRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:app_id: ", p), err) + } + return err +} + +func (p *ConfigurationRestoreRequest) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("new_app_name", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:new_app_name: ", p), err) + } + if err := oprot.WriteString(string(p.NewAppName_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.new_app_name (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:new_app_name: ", p), err) + } + return err +} + +func (p *ConfigurationRestoreRequest) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_provider_name", thrift.STRING, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:backup_provider_name: ", p), err) + } + if err := oprot.WriteString(string(p.BackupProviderName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_provider_name (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:backup_provider_name: ", p), err) + } + return err +} + +func (p *ConfigurationRestoreRequest) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("skip_bad_partition", thrift.BOOL, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:skip_bad_partition: ", p), err) + } + if err := oprot.WriteBool(bool(p.SkipBadPartition)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.skip_bad_partition (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:skip_bad_partition: ", p), err) + } + return err +} + +func (p *ConfigurationRestoreRequest) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetRestorePath() { + if err := oprot.WriteFieldBegin("restore_path", thrift.STRING, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:restore_path: ", p), err) + } + if err := oprot.WriteString(string(*p.RestorePath)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.restore_path (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:restore_path: ", p), err) + } + } + return err +} + +func (p *ConfigurationRestoreRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationRestoreRequest(%+v)", *p) +} + +// Attributes: +// - Pid +// - Policy +// - AppName +// - BackupID +// - BackupPath +type BackupRequest struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + Policy *PolicyInfo `thrift:"policy,2" db:"policy" json:"policy"` + AppName string `thrift:"app_name,3" db:"app_name" json:"app_name"` + BackupID int64 `thrift:"backup_id,4" db:"backup_id" json:"backup_id"` + BackupPath *string `thrift:"backup_path,5" db:"backup_path" json:"backup_path,omitempty"` +} + +func NewBackupRequest() *BackupRequest { + return &BackupRequest{} +} + +var BackupRequest_Pid_DEFAULT *base.Gpid + +func (p *BackupRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return BackupRequest_Pid_DEFAULT + } + return p.Pid +} + +var BackupRequest_Policy_DEFAULT *PolicyInfo + +func (p *BackupRequest) GetPolicy() *PolicyInfo { + if !p.IsSetPolicy() { + return BackupRequest_Policy_DEFAULT + } + return p.Policy +} + +func (p *BackupRequest) GetAppName() string { + return p.AppName +} + +func (p *BackupRequest) GetBackupID() int64 { + return p.BackupID +} + +var BackupRequest_BackupPath_DEFAULT string + +func (p *BackupRequest) GetBackupPath() string { + if !p.IsSetBackupPath() { + return BackupRequest_BackupPath_DEFAULT + } + return *p.BackupPath +} +func (p *BackupRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *BackupRequest) IsSetPolicy() bool { + return p.Policy != nil +} + +func (p *BackupRequest) IsSetBackupPath() bool { + return p.BackupPath != nil +} + +func (p *BackupRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BackupRequest) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *BackupRequest) ReadField2(iprot thrift.TProtocol) error { + p.Policy = &PolicyInfo{} + if err := p.Policy.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Policy), err) + } + return nil +} + +func (p *BackupRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *BackupRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.BackupID = v + } + return nil +} + +func (p *BackupRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.BackupPath = &v + } + return nil +} + +func (p *BackupRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("backup_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BackupRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *BackupRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:policy: ", p), err) + } + if err := p.Policy.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Policy), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:policy: ", p), err) + } + return err +} + +func (p *BackupRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_name: ", p), err) + } + return err +} + +func (p *BackupRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_id", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:backup_id: ", p), err) + } + if err := oprot.WriteI64(int64(p.BackupID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_id (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:backup_id: ", p), err) + } + return err +} + +func (p *BackupRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetBackupPath() { + if err := oprot.WriteFieldBegin("backup_path", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:backup_path: ", p), err) + } + if err := oprot.WriteString(string(*p.BackupPath)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_path (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:backup_path: ", p), err) + } + } + return err +} + +func (p *BackupRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackupRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Pid +// - Progress +// - PolicyName +// - BackupID +// - CheckpointTotalSize +type BackupResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Pid *base.Gpid `thrift:"pid,2" db:"pid" json:"pid"` + Progress int32 `thrift:"progress,3" db:"progress" json:"progress"` + PolicyName string `thrift:"policy_name,4" db:"policy_name" json:"policy_name"` + BackupID int64 `thrift:"backup_id,5" db:"backup_id" json:"backup_id"` + CheckpointTotalSize int64 `thrift:"checkpoint_total_size,6" db:"checkpoint_total_size" json:"checkpoint_total_size"` +} + +func NewBackupResponse() *BackupResponse { + return &BackupResponse{} +} + +var BackupResponse_Err_DEFAULT *base.ErrorCode + +func (p *BackupResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return BackupResponse_Err_DEFAULT + } + return p.Err +} + +var BackupResponse_Pid_DEFAULT *base.Gpid + +func (p *BackupResponse) GetPid() *base.Gpid { + if !p.IsSetPid() { + return BackupResponse_Pid_DEFAULT + } + return p.Pid +} + +func (p *BackupResponse) GetProgress() int32 { + return p.Progress +} + +func (p *BackupResponse) GetPolicyName() string { + return p.PolicyName +} + +func (p *BackupResponse) GetBackupID() int64 { + return p.BackupID +} + +func (p *BackupResponse) GetCheckpointTotalSize() int64 { + return p.CheckpointTotalSize +} +func (p *BackupResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *BackupResponse) IsSetPid() bool { + return p.Pid != nil +} + +func (p *BackupResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BackupResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *BackupResponse) ReadField2(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *BackupResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Progress = v + } + return nil +} + +func (p *BackupResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PolicyName = v + } + return nil +} + +func (p *BackupResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.BackupID = v + } + return nil +} + +func (p *BackupResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.CheckpointTotalSize = v + } + return nil +} + +func (p *BackupResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("backup_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BackupResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *BackupResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:pid: ", p), err) + } + return err +} + +func (p *BackupResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("progress", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:progress: ", p), err) + } + if err := oprot.WriteI32(int32(p.Progress)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.progress (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:progress: ", p), err) + } + return err +} + +func (p *BackupResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy_name", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:policy_name: ", p), err) + } + if err := oprot.WriteString(string(p.PolicyName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.policy_name (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:policy_name: ", p), err) + } + return err +} + +func (p *BackupResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_id", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:backup_id: ", p), err) + } + if err := oprot.WriteI64(int64(p.BackupID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_id (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:backup_id: ", p), err) + } + return err +} + +func (p *BackupResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("checkpoint_total_size", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:checkpoint_total_size: ", p), err) + } + if err := oprot.WriteI64(int64(p.CheckpointTotalSize)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.checkpoint_total_size (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:checkpoint_total_size: ", p), err) + } + return err +} + +func (p *BackupResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackupResponse(%+v)", *p) +} + +// Attributes: +// - Pid +// - PolicyName +type BackupClearRequest struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + PolicyName string `thrift:"policy_name,2" db:"policy_name" json:"policy_name"` +} + +func NewBackupClearRequest() *BackupClearRequest { + return &BackupClearRequest{} +} + +var BackupClearRequest_Pid_DEFAULT *base.Gpid + +func (p *BackupClearRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return BackupClearRequest_Pid_DEFAULT + } + return p.Pid +} + +func (p *BackupClearRequest) GetPolicyName() string { + return p.PolicyName +} +func (p *BackupClearRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *BackupClearRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BackupClearRequest) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *BackupClearRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.PolicyName = v + } + return nil +} + +func (p *BackupClearRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("backup_clear_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BackupClearRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *BackupClearRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:policy_name: ", p), err) + } + if err := oprot.WriteString(string(p.PolicyName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.policy_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:policy_name: ", p), err) + } + return err +} + +func (p *BackupClearRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackupClearRequest(%+v)", *p) +} + +// Attributes: +// - PolicyName +// - AddAppids +// - RemovalAppids +// - NewBackupIntervalSec_ +// - BackupHistoryCountToKeep +// - IsDisable +// - StartTime +type ConfigurationModifyBackupPolicyRequest struct { + PolicyName string `thrift:"policy_name,1" db:"policy_name" json:"policy_name"` + AddAppids []int32 `thrift:"add_appids,2" db:"add_appids" json:"add_appids,omitempty"` + RemovalAppids []int32 `thrift:"removal_appids,3" db:"removal_appids" json:"removal_appids,omitempty"` + NewBackupIntervalSec_ *int64 `thrift:"new_backup_interval_sec,4" db:"new_backup_interval_sec" json:"new_backup_interval_sec,omitempty"` + BackupHistoryCountToKeep *int32 `thrift:"backup_history_count_to_keep,5" db:"backup_history_count_to_keep" json:"backup_history_count_to_keep,omitempty"` + IsDisable *bool `thrift:"is_disable,6" db:"is_disable" json:"is_disable,omitempty"` + StartTime *string `thrift:"start_time,7" db:"start_time" json:"start_time,omitempty"` +} + +func NewConfigurationModifyBackupPolicyRequest() *ConfigurationModifyBackupPolicyRequest { + return &ConfigurationModifyBackupPolicyRequest{} +} + +func (p *ConfigurationModifyBackupPolicyRequest) GetPolicyName() string { + return p.PolicyName +} + +var ConfigurationModifyBackupPolicyRequest_AddAppids_DEFAULT []int32 + +func (p *ConfigurationModifyBackupPolicyRequest) GetAddAppids() []int32 { + return p.AddAppids +} + +var ConfigurationModifyBackupPolicyRequest_RemovalAppids_DEFAULT []int32 + +func (p *ConfigurationModifyBackupPolicyRequest) GetRemovalAppids() []int32 { + return p.RemovalAppids +} + +var ConfigurationModifyBackupPolicyRequest_NewBackupIntervalSec__DEFAULT int64 + +func (p *ConfigurationModifyBackupPolicyRequest) GetNewBackupIntervalSec_() int64 { + if !p.IsSetNewBackupIntervalSec_() { + return ConfigurationModifyBackupPolicyRequest_NewBackupIntervalSec__DEFAULT + } + return *p.NewBackupIntervalSec_ +} + +var ConfigurationModifyBackupPolicyRequest_BackupHistoryCountToKeep_DEFAULT int32 + +func (p *ConfigurationModifyBackupPolicyRequest) GetBackupHistoryCountToKeep() int32 { + if !p.IsSetBackupHistoryCountToKeep() { + return ConfigurationModifyBackupPolicyRequest_BackupHistoryCountToKeep_DEFAULT + } + return *p.BackupHistoryCountToKeep +} + +var ConfigurationModifyBackupPolicyRequest_IsDisable_DEFAULT bool + +func (p *ConfigurationModifyBackupPolicyRequest) GetIsDisable() bool { + if !p.IsSetIsDisable() { + return ConfigurationModifyBackupPolicyRequest_IsDisable_DEFAULT + } + return *p.IsDisable +} + +var ConfigurationModifyBackupPolicyRequest_StartTime_DEFAULT string + +func (p *ConfigurationModifyBackupPolicyRequest) GetStartTime() string { + if !p.IsSetStartTime() { + return ConfigurationModifyBackupPolicyRequest_StartTime_DEFAULT + } + return *p.StartTime +} +func (p *ConfigurationModifyBackupPolicyRequest) IsSetAddAppids() bool { + return p.AddAppids != nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) IsSetRemovalAppids() bool { + return p.RemovalAppids != nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) IsSetNewBackupIntervalSec_() bool { + return p.NewBackupIntervalSec_ != nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) IsSetBackupHistoryCountToKeep() bool { + return p.BackupHistoryCountToKeep != nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) IsSetIsDisable() bool { + return p.IsDisable != nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) IsSetStartTime() bool { + return p.StartTime != nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRING { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.PolicyName = v + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]int32, 0, size) + p.AddAppids = tSlice + for i := 0; i < size; i++ { + var _elem0 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem0 = v + } + p.AddAppids = append(p.AddAppids, _elem0) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]int32, 0, size) + p.RemovalAppids = tSlice + for i := 0; i < size; i++ { + var _elem1 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem1 = v + } + p.RemovalAppids = append(p.RemovalAppids, _elem1) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.NewBackupIntervalSec_ = &v + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.BackupHistoryCountToKeep = &v + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.IsDisable = &v + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.StartTime = &v + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_modify_backup_policy_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:policy_name: ", p), err) + } + if err := oprot.WriteString(string(p.PolicyName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.policy_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:policy_name: ", p), err) + } + return err +} + +func (p *ConfigurationModifyBackupPolicyRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetAddAppids() { + if err := oprot.WriteFieldBegin("add_appids", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:add_appids: ", p), err) + } + if err := oprot.WriteListBegin(thrift.I32, len(p.AddAppids)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.AddAppids { + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:add_appids: ", p), err) + } + } + return err +} + +func (p *ConfigurationModifyBackupPolicyRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetRemovalAppids() { + if err := oprot.WriteFieldBegin("removal_appids", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:removal_appids: ", p), err) + } + if err := oprot.WriteListBegin(thrift.I32, len(p.RemovalAppids)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.RemovalAppids { + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:removal_appids: ", p), err) + } + } + return err +} + +func (p *ConfigurationModifyBackupPolicyRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetNewBackupIntervalSec_() { + if err := oprot.WriteFieldBegin("new_backup_interval_sec", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:new_backup_interval_sec: ", p), err) + } + if err := oprot.WriteI64(int64(*p.NewBackupIntervalSec_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.new_backup_interval_sec (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:new_backup_interval_sec: ", p), err) + } + } + return err +} + +func (p *ConfigurationModifyBackupPolicyRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetBackupHistoryCountToKeep() { + if err := oprot.WriteFieldBegin("backup_history_count_to_keep", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:backup_history_count_to_keep: ", p), err) + } + if err := oprot.WriteI32(int32(*p.BackupHistoryCountToKeep)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_history_count_to_keep (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:backup_history_count_to_keep: ", p), err) + } + } + return err +} + +func (p *ConfigurationModifyBackupPolicyRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetIsDisable() { + if err := oprot.WriteFieldBegin("is_disable", thrift.BOOL, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:is_disable: ", p), err) + } + if err := oprot.WriteBool(bool(*p.IsDisable)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_disable (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:is_disable: ", p), err) + } + } + return err +} + +func (p *ConfigurationModifyBackupPolicyRequest) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetStartTime() { + if err := oprot.WriteFieldBegin("start_time", thrift.STRING, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:start_time: ", p), err) + } + if err := oprot.WriteString(string(*p.StartTime)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.start_time (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:start_time: ", p), err) + } + } + return err +} + +func (p *ConfigurationModifyBackupPolicyRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationModifyBackupPolicyRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMessage +type ConfigurationModifyBackupPolicyResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMessage string `thrift:"hint_message,2" db:"hint_message" json:"hint_message"` +} + +func NewConfigurationModifyBackupPolicyResponse() *ConfigurationModifyBackupPolicyResponse { + return &ConfigurationModifyBackupPolicyResponse{} +} + +var ConfigurationModifyBackupPolicyResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationModifyBackupPolicyResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationModifyBackupPolicyResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationModifyBackupPolicyResponse) GetHintMessage() string { + return p.HintMessage +} +func (p *ConfigurationModifyBackupPolicyResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationModifyBackupPolicyResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_modify_backup_policy_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationModifyBackupPolicyResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationModifyBackupPolicyResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_message: ", p), err) + } + return err +} + +func (p *ConfigurationModifyBackupPolicyResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationModifyBackupPolicyResponse(%+v)", *p) +} + +// Attributes: +// - BackupProviderType +// - PolicyName +// - AppIds +// - BackupIntervalSeconds +// - BackupHistoryCountToKeep +// - StartTime +type ConfigurationAddBackupPolicyRequest struct { + BackupProviderType string `thrift:"backup_provider_type,1" db:"backup_provider_type" json:"backup_provider_type"` + PolicyName string `thrift:"policy_name,2" db:"policy_name" json:"policy_name"` + AppIds []int32 `thrift:"app_ids,3" db:"app_ids" json:"app_ids"` + BackupIntervalSeconds int64 `thrift:"backup_interval_seconds,4" db:"backup_interval_seconds" json:"backup_interval_seconds"` + BackupHistoryCountToKeep int32 `thrift:"backup_history_count_to_keep,5" db:"backup_history_count_to_keep" json:"backup_history_count_to_keep"` + StartTime string `thrift:"start_time,6" db:"start_time" json:"start_time"` +} + +func NewConfigurationAddBackupPolicyRequest() *ConfigurationAddBackupPolicyRequest { + return &ConfigurationAddBackupPolicyRequest{} +} + +func (p *ConfigurationAddBackupPolicyRequest) GetBackupProviderType() string { + return p.BackupProviderType +} + +func (p *ConfigurationAddBackupPolicyRequest) GetPolicyName() string { + return p.PolicyName +} + +func (p *ConfigurationAddBackupPolicyRequest) GetAppIds() []int32 { + return p.AppIds +} + +func (p *ConfigurationAddBackupPolicyRequest) GetBackupIntervalSeconds() int64 { + return p.BackupIntervalSeconds +} + +func (p *ConfigurationAddBackupPolicyRequest) GetBackupHistoryCountToKeep() int32 { + return p.BackupHistoryCountToKeep +} + +func (p *ConfigurationAddBackupPolicyRequest) GetStartTime() string { + return p.StartTime +} +func (p *ConfigurationAddBackupPolicyRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationAddBackupPolicyRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.BackupProviderType = v + } + return nil +} + +func (p *ConfigurationAddBackupPolicyRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.PolicyName = v + } + return nil +} + +func (p *ConfigurationAddBackupPolicyRequest) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]int32, 0, size) + p.AppIds = tSlice + for i := 0; i < size; i++ { + var _elem2 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem2 = v + } + p.AppIds = append(p.AppIds, _elem2) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationAddBackupPolicyRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.BackupIntervalSeconds = v + } + return nil +} + +func (p *ConfigurationAddBackupPolicyRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.BackupHistoryCountToKeep = v + } + return nil +} + +func (p *ConfigurationAddBackupPolicyRequest) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.StartTime = v + } + return nil +} + +func (p *ConfigurationAddBackupPolicyRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_add_backup_policy_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationAddBackupPolicyRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_provider_type", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:backup_provider_type: ", p), err) + } + if err := oprot.WriteString(string(p.BackupProviderType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_provider_type (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:backup_provider_type: ", p), err) + } + return err +} + +func (p *ConfigurationAddBackupPolicyRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:policy_name: ", p), err) + } + if err := oprot.WriteString(string(p.PolicyName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.policy_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:policy_name: ", p), err) + } + return err +} + +func (p *ConfigurationAddBackupPolicyRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_ids", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_ids: ", p), err) + } + if err := oprot.WriteListBegin(thrift.I32, len(p.AppIds)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.AppIds { + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_ids: ", p), err) + } + return err +} + +func (p *ConfigurationAddBackupPolicyRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_interval_seconds", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:backup_interval_seconds: ", p), err) + } + if err := oprot.WriteI64(int64(p.BackupIntervalSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_interval_seconds (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:backup_interval_seconds: ", p), err) + } + return err +} + +func (p *ConfigurationAddBackupPolicyRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_history_count_to_keep", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:backup_history_count_to_keep: ", p), err) + } + if err := oprot.WriteI32(int32(p.BackupHistoryCountToKeep)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_history_count_to_keep (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:backup_history_count_to_keep: ", p), err) + } + return err +} + +func (p *ConfigurationAddBackupPolicyRequest) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("start_time", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:start_time: ", p), err) + } + if err := oprot.WriteString(string(p.StartTime)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.start_time (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:start_time: ", p), err) + } + return err +} + +func (p *ConfigurationAddBackupPolicyRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationAddBackupPolicyRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMessage +type ConfigurationAddBackupPolicyResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMessage string `thrift:"hint_message,2" db:"hint_message" json:"hint_message"` +} + +func NewConfigurationAddBackupPolicyResponse() *ConfigurationAddBackupPolicyResponse { + return &ConfigurationAddBackupPolicyResponse{} +} + +var ConfigurationAddBackupPolicyResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationAddBackupPolicyResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationAddBackupPolicyResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationAddBackupPolicyResponse) GetHintMessage() string { + return p.HintMessage +} +func (p *ConfigurationAddBackupPolicyResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationAddBackupPolicyResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationAddBackupPolicyResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationAddBackupPolicyResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *ConfigurationAddBackupPolicyResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_add_backup_policy_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationAddBackupPolicyResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationAddBackupPolicyResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_message: ", p), err) + } + return err +} + +func (p *ConfigurationAddBackupPolicyResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationAddBackupPolicyResponse(%+v)", *p) +} + +// Attributes: +// - PolicyName +// - BackupProviderType +// - BackupIntervalSeconds +// - AppIds +// - BackupHistoryCountToKeep +// - StartTime +// - IsDisable +type PolicyEntry struct { + PolicyName string `thrift:"policy_name,1" db:"policy_name" json:"policy_name"` + BackupProviderType string `thrift:"backup_provider_type,2" db:"backup_provider_type" json:"backup_provider_type"` + BackupIntervalSeconds string `thrift:"backup_interval_seconds,3" db:"backup_interval_seconds" json:"backup_interval_seconds"` + AppIds []int32 `thrift:"app_ids,4" db:"app_ids" json:"app_ids"` + BackupHistoryCountToKeep int32 `thrift:"backup_history_count_to_keep,5" db:"backup_history_count_to_keep" json:"backup_history_count_to_keep"` + StartTime string `thrift:"start_time,6" db:"start_time" json:"start_time"` + IsDisable bool `thrift:"is_disable,7" db:"is_disable" json:"is_disable"` +} + +func NewPolicyEntry() *PolicyEntry { + return &PolicyEntry{} +} + +func (p *PolicyEntry) GetPolicyName() string { + return p.PolicyName +} + +func (p *PolicyEntry) GetBackupProviderType() string { + return p.BackupProviderType +} + +func (p *PolicyEntry) GetBackupIntervalSeconds() string { + return p.BackupIntervalSeconds +} + +func (p *PolicyEntry) GetAppIds() []int32 { + return p.AppIds +} + +func (p *PolicyEntry) GetBackupHistoryCountToKeep() int32 { + return p.BackupHistoryCountToKeep +} + +func (p *PolicyEntry) GetStartTime() string { + return p.StartTime +} + +func (p *PolicyEntry) GetIsDisable() bool { + return p.IsDisable +} +func (p *PolicyEntry) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.SET { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *PolicyEntry) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.PolicyName = v + } + return nil +} + +func (p *PolicyEntry) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.BackupProviderType = v + } + return nil +} + +func (p *PolicyEntry) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.BackupIntervalSeconds = v + } + return nil +} + +func (p *PolicyEntry) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadSetBegin() + if err != nil { + return thrift.PrependError("error reading set begin: ", err) + } + tSet := make([]int32, 0, size) + p.AppIds = tSet + for i := 0; i < size; i++ { + var _elem3 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem3 = v + } + p.AppIds = append(p.AppIds, _elem3) + } + if err := iprot.ReadSetEnd(); err != nil { + return thrift.PrependError("error reading set end: ", err) + } + return nil +} + +func (p *PolicyEntry) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.BackupHistoryCountToKeep = v + } + return nil +} + +func (p *PolicyEntry) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.StartTime = v + } + return nil +} + +func (p *PolicyEntry) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.IsDisable = v + } + return nil +} + +func (p *PolicyEntry) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("policy_entry"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *PolicyEntry) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:policy_name: ", p), err) + } + if err := oprot.WriteString(string(p.PolicyName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.policy_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:policy_name: ", p), err) + } + return err +} + +func (p *PolicyEntry) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_provider_type", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:backup_provider_type: ", p), err) + } + if err := oprot.WriteString(string(p.BackupProviderType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_provider_type (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:backup_provider_type: ", p), err) + } + return err +} + +func (p *PolicyEntry) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_interval_seconds", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:backup_interval_seconds: ", p), err) + } + if err := oprot.WriteString(string(p.BackupIntervalSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_interval_seconds (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:backup_interval_seconds: ", p), err) + } + return err +} + +func (p *PolicyEntry) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_ids", thrift.SET, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:app_ids: ", p), err) + } + if err := oprot.WriteSetBegin(thrift.I32, len(p.AppIds)); err != nil { + return thrift.PrependError("error writing set begin: ", err) + } + for i := 0; i < len(p.AppIds); i++ { + for j := i + 1; j < len(p.AppIds); j++ { + if reflect.DeepEqual(p.AppIds[i], p.AppIds[j]) { + return thrift.PrependError("", fmt.Errorf("%T error writing set field: slice is not unique", p.AppIds[i])) + } + } + } + for _, v := range p.AppIds { + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteSetEnd(); err != nil { + return thrift.PrependError("error writing set end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:app_ids: ", p), err) + } + return err +} + +func (p *PolicyEntry) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_history_count_to_keep", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:backup_history_count_to_keep: ", p), err) + } + if err := oprot.WriteI32(int32(p.BackupHistoryCountToKeep)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_history_count_to_keep (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:backup_history_count_to_keep: ", p), err) + } + return err +} + +func (p *PolicyEntry) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("start_time", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:start_time: ", p), err) + } + if err := oprot.WriteString(string(p.StartTime)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.start_time (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:start_time: ", p), err) + } + return err +} + +func (p *PolicyEntry) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("is_disable", thrift.BOOL, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:is_disable: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsDisable)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_disable (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:is_disable: ", p), err) + } + return err +} + +func (p *PolicyEntry) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("PolicyEntry(%+v)", *p) +} + +// Attributes: +// - BackupID +// - StartTimeMs +// - EndTimeMs +// - AppIds +type BackupEntry struct { + BackupID int64 `thrift:"backup_id,1" db:"backup_id" json:"backup_id"` + StartTimeMs int64 `thrift:"start_time_ms,2" db:"start_time_ms" json:"start_time_ms"` + EndTimeMs int64 `thrift:"end_time_ms,3" db:"end_time_ms" json:"end_time_ms"` + AppIds []int32 `thrift:"app_ids,4" db:"app_ids" json:"app_ids"` +} + +func NewBackupEntry() *BackupEntry { + return &BackupEntry{} +} + +func (p *BackupEntry) GetBackupID() int64 { + return p.BackupID +} + +func (p *BackupEntry) GetStartTimeMs() int64 { + return p.StartTimeMs +} + +func (p *BackupEntry) GetEndTimeMs() int64 { + return p.EndTimeMs +} + +func (p *BackupEntry) GetAppIds() []int32 { + return p.AppIds +} +func (p *BackupEntry) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.SET { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BackupEntry) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.BackupID = v + } + return nil +} + +func (p *BackupEntry) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.StartTimeMs = v + } + return nil +} + +func (p *BackupEntry) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.EndTimeMs = v + } + return nil +} + +func (p *BackupEntry) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadSetBegin() + if err != nil { + return thrift.PrependError("error reading set begin: ", err) + } + tSet := make([]int32, 0, size) + p.AppIds = tSet + for i := 0; i < size; i++ { + var _elem4 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem4 = v + } + p.AppIds = append(p.AppIds, _elem4) + } + if err := iprot.ReadSetEnd(); err != nil { + return thrift.PrependError("error reading set end: ", err) + } + return nil +} + +func (p *BackupEntry) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("backup_entry"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BackupEntry) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:backup_id: ", p), err) + } + if err := oprot.WriteI64(int64(p.BackupID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:backup_id: ", p), err) + } + return err +} + +func (p *BackupEntry) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("start_time_ms", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:start_time_ms: ", p), err) + } + if err := oprot.WriteI64(int64(p.StartTimeMs)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.start_time_ms (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:start_time_ms: ", p), err) + } + return err +} + +func (p *BackupEntry) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("end_time_ms", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:end_time_ms: ", p), err) + } + if err := oprot.WriteI64(int64(p.EndTimeMs)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.end_time_ms (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:end_time_ms: ", p), err) + } + return err +} + +func (p *BackupEntry) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_ids", thrift.SET, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:app_ids: ", p), err) + } + if err := oprot.WriteSetBegin(thrift.I32, len(p.AppIds)); err != nil { + return thrift.PrependError("error writing set begin: ", err) + } + for i := 0; i < len(p.AppIds); i++ { + for j := i + 1; j < len(p.AppIds); j++ { + if reflect.DeepEqual(p.AppIds[i], p.AppIds[j]) { + return thrift.PrependError("", fmt.Errorf("%T error writing set field: slice is not unique", p.AppIds[i])) + } + } + } + for _, v := range p.AppIds { + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteSetEnd(); err != nil { + return thrift.PrependError("error writing set end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:app_ids: ", p), err) + } + return err +} + +func (p *BackupEntry) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackupEntry(%+v)", *p) +} + +// Attributes: +// - PolicyNames +// - BackupInfoCount +type ConfigurationQueryBackupPolicyRequest struct { + PolicyNames []string `thrift:"policy_names,1" db:"policy_names" json:"policy_names"` + BackupInfoCount int32 `thrift:"backup_info_count,2" db:"backup_info_count" json:"backup_info_count"` +} + +func NewConfigurationQueryBackupPolicyRequest() *ConfigurationQueryBackupPolicyRequest { + return &ConfigurationQueryBackupPolicyRequest{} +} + +func (p *ConfigurationQueryBackupPolicyRequest) GetPolicyNames() []string { + return p.PolicyNames +} + +func (p *ConfigurationQueryBackupPolicyRequest) GetBackupInfoCount() int32 { + return p.BackupInfoCount +} +func (p *ConfigurationQueryBackupPolicyRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyRequest) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.PolicyNames = tSlice + for i := 0; i < size; i++ { + var _elem5 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem5 = v + } + p.PolicyNames = append(p.PolicyNames, _elem5) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.BackupInfoCount = v + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_query_backup_policy_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policy_names", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:policy_names: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.PolicyNames)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.PolicyNames { + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:policy_names: ", p), err) + } + return err +} + +func (p *ConfigurationQueryBackupPolicyRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_info_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:backup_info_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.BackupInfoCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_info_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:backup_info_count: ", p), err) + } + return err +} + +func (p *ConfigurationQueryBackupPolicyRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationQueryBackupPolicyRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Policys +// - BackupInfos +// - HintMsg +type ConfigurationQueryBackupPolicyResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Policys []*PolicyEntry `thrift:"policys,2" db:"policys" json:"policys"` + BackupInfos [][]*BackupEntry `thrift:"backup_infos,3" db:"backup_infos" json:"backup_infos"` + HintMsg *string `thrift:"hint_msg,4" db:"hint_msg" json:"hint_msg,omitempty"` +} + +func NewConfigurationQueryBackupPolicyResponse() *ConfigurationQueryBackupPolicyResponse { + return &ConfigurationQueryBackupPolicyResponse{} +} + +var ConfigurationQueryBackupPolicyResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationQueryBackupPolicyResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationQueryBackupPolicyResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationQueryBackupPolicyResponse) GetPolicys() []*PolicyEntry { + return p.Policys +} + +func (p *ConfigurationQueryBackupPolicyResponse) GetBackupInfos() [][]*BackupEntry { + return p.BackupInfos +} + +var ConfigurationQueryBackupPolicyResponse_HintMsg_DEFAULT string + +func (p *ConfigurationQueryBackupPolicyResponse) GetHintMsg() string { + if !p.IsSetHintMsg() { + return ConfigurationQueryBackupPolicyResponse_HintMsg_DEFAULT + } + return *p.HintMsg +} +func (p *ConfigurationQueryBackupPolicyResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationQueryBackupPolicyResponse) IsSetHintMsg() bool { + return p.HintMsg != nil +} + +func (p *ConfigurationQueryBackupPolicyResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*PolicyEntry, 0, size) + p.Policys = tSlice + for i := 0; i < size; i++ { + _elem6 := &PolicyEntry{} + if err := _elem6.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err) + } + p.Policys = append(p.Policys, _elem6) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyResponse) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([][]*BackupEntry, 0, size) + p.BackupInfos = tSlice + for i := 0; i < size; i++ { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BackupEntry, 0, size) + _elem7 := tSlice + for i := 0; i < size; i++ { + _elem8 := &BackupEntry{} + if err := _elem8.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err) + } + _elem7 = append(_elem7, _elem8) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + p.BackupInfos = append(p.BackupInfos, _elem7) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.HintMsg = &v + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_query_backup_policy_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationQueryBackupPolicyResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationQueryBackupPolicyResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("policys", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:policys: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Policys)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Policys { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:policys: ", p), err) + } + return err +} + +func (p *ConfigurationQueryBackupPolicyResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_infos", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:backup_infos: ", p), err) + } + if err := oprot.WriteListBegin(thrift.LIST, len(p.BackupInfos)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.BackupInfos { + if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:backup_infos: ", p), err) + } + return err +} + +func (p *ConfigurationQueryBackupPolicyResponse) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetHintMsg() { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(*p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:hint_msg: ", p), err) + } + } + return err +} + +func (p *ConfigurationQueryBackupPolicyResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationQueryBackupPolicyResponse(%+v)", *p) +} + +// Attributes: +// - Pid +// - RestoreStatus +// - Progress +// - Reason +type ConfigurationReportRestoreStatusRequest struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + RestoreStatus *base.ErrorCode `thrift:"restore_status,2" db:"restore_status" json:"restore_status"` + Progress int32 `thrift:"progress,3" db:"progress" json:"progress"` + Reason *string `thrift:"reason,4" db:"reason" json:"reason,omitempty"` +} + +func NewConfigurationReportRestoreStatusRequest() *ConfigurationReportRestoreStatusRequest { + return &ConfigurationReportRestoreStatusRequest{} +} + +var ConfigurationReportRestoreStatusRequest_Pid_DEFAULT *base.Gpid + +func (p *ConfigurationReportRestoreStatusRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return ConfigurationReportRestoreStatusRequest_Pid_DEFAULT + } + return p.Pid +} + +var ConfigurationReportRestoreStatusRequest_RestoreStatus_DEFAULT *base.ErrorCode + +func (p *ConfigurationReportRestoreStatusRequest) GetRestoreStatus() *base.ErrorCode { + if !p.IsSetRestoreStatus() { + return ConfigurationReportRestoreStatusRequest_RestoreStatus_DEFAULT + } + return p.RestoreStatus +} + +func (p *ConfigurationReportRestoreStatusRequest) GetProgress() int32 { + return p.Progress +} + +var ConfigurationReportRestoreStatusRequest_Reason_DEFAULT string + +func (p *ConfigurationReportRestoreStatusRequest) GetReason() string { + if !p.IsSetReason() { + return ConfigurationReportRestoreStatusRequest_Reason_DEFAULT + } + return *p.Reason +} +func (p *ConfigurationReportRestoreStatusRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *ConfigurationReportRestoreStatusRequest) IsSetRestoreStatus() bool { + return p.RestoreStatus != nil +} + +func (p *ConfigurationReportRestoreStatusRequest) IsSetReason() bool { + return p.Reason != nil +} + +func (p *ConfigurationReportRestoreStatusRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationReportRestoreStatusRequest) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *ConfigurationReportRestoreStatusRequest) ReadField2(iprot thrift.TProtocol) error { + p.RestoreStatus = &base.ErrorCode{} + if err := p.RestoreStatus.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RestoreStatus), err) + } + return nil +} + +func (p *ConfigurationReportRestoreStatusRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Progress = v + } + return nil +} + +func (p *ConfigurationReportRestoreStatusRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Reason = &v + } + return nil +} + +func (p *ConfigurationReportRestoreStatusRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_report_restore_status_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationReportRestoreStatusRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *ConfigurationReportRestoreStatusRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("restore_status", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:restore_status: ", p), err) + } + if err := p.RestoreStatus.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RestoreStatus), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:restore_status: ", p), err) + } + return err +} + +func (p *ConfigurationReportRestoreStatusRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("progress", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:progress: ", p), err) + } + if err := oprot.WriteI32(int32(p.Progress)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.progress (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:progress: ", p), err) + } + return err +} + +func (p *ConfigurationReportRestoreStatusRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetReason() { + if err := oprot.WriteFieldBegin("reason", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:reason: ", p), err) + } + if err := oprot.WriteString(string(*p.Reason)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.reason (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:reason: ", p), err) + } + } + return err +} + +func (p *ConfigurationReportRestoreStatusRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationReportRestoreStatusRequest(%+v)", *p) +} + +// Attributes: +// - Err +type ConfigurationReportRestoreStatusResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` +} + +func NewConfigurationReportRestoreStatusResponse() *ConfigurationReportRestoreStatusResponse { + return &ConfigurationReportRestoreStatusResponse{} +} + +var ConfigurationReportRestoreStatusResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationReportRestoreStatusResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationReportRestoreStatusResponse_Err_DEFAULT + } + return p.Err +} +func (p *ConfigurationReportRestoreStatusResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationReportRestoreStatusResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationReportRestoreStatusResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationReportRestoreStatusResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_report_restore_status_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationReportRestoreStatusResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationReportRestoreStatusResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationReportRestoreStatusResponse(%+v)", *p) +} + +// Attributes: +// - RestoreAppID +type ConfigurationQueryRestoreRequest struct { + RestoreAppID int32 `thrift:"restore_app_id,1" db:"restore_app_id" json:"restore_app_id"` +} + +func NewConfigurationQueryRestoreRequest() *ConfigurationQueryRestoreRequest { + return &ConfigurationQueryRestoreRequest{} +} + +func (p *ConfigurationQueryRestoreRequest) GetRestoreAppID() int32 { + return p.RestoreAppID +} +func (p *ConfigurationQueryRestoreRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationQueryRestoreRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.RestoreAppID = v + } + return nil +} + +func (p *ConfigurationQueryRestoreRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_query_restore_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationQueryRestoreRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("restore_app_id", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:restore_app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.RestoreAppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.restore_app_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:restore_app_id: ", p), err) + } + return err +} + +func (p *ConfigurationQueryRestoreRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationQueryRestoreRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - RestoreStatus +// - RestoreProgress +type ConfigurationQueryRestoreResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + RestoreStatus []*base.ErrorCode `thrift:"restore_status,2" db:"restore_status" json:"restore_status"` + RestoreProgress []int32 `thrift:"restore_progress,3" db:"restore_progress" json:"restore_progress"` +} + +func NewConfigurationQueryRestoreResponse() *ConfigurationQueryRestoreResponse { + return &ConfigurationQueryRestoreResponse{} +} + +var ConfigurationQueryRestoreResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationQueryRestoreResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationQueryRestoreResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationQueryRestoreResponse) GetRestoreStatus() []*base.ErrorCode { + return p.RestoreStatus +} + +func (p *ConfigurationQueryRestoreResponse) GetRestoreProgress() []int32 { + return p.RestoreProgress +} +func (p *ConfigurationQueryRestoreResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationQueryRestoreResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationQueryRestoreResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationQueryRestoreResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.ErrorCode, 0, size) + p.RestoreStatus = tSlice + for i := 0; i < size; i++ { + _elem9 := &base.ErrorCode{} + if err := _elem9.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err) + } + p.RestoreStatus = append(p.RestoreStatus, _elem9) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationQueryRestoreResponse) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]int32, 0, size) + p.RestoreProgress = tSlice + for i := 0; i < size; i++ { + var _elem10 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem10 = v + } + p.RestoreProgress = append(p.RestoreProgress, _elem10) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationQueryRestoreResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_query_restore_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationQueryRestoreResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationQueryRestoreResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("restore_status", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:restore_status: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.RestoreStatus)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.RestoreStatus { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:restore_status: ", p), err) + } + return err +} + +func (p *ConfigurationQueryRestoreResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("restore_progress", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:restore_progress: ", p), err) + } + if err := oprot.WriteListBegin(thrift.I32, len(p.RestoreProgress)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.RestoreProgress { + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:restore_progress: ", p), err) + } + return err +} + +func (p *ConfigurationQueryRestoreResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationQueryRestoreResponse(%+v)", *p) +} + +// Attributes: +// - BackupProviderType +// - AppID +// - BackupPath +type StartBackupAppRequest struct { + BackupProviderType string `thrift:"backup_provider_type,1" db:"backup_provider_type" json:"backup_provider_type"` + AppID int32 `thrift:"app_id,2" db:"app_id" json:"app_id"` + BackupPath *string `thrift:"backup_path,3" db:"backup_path" json:"backup_path,omitempty"` +} + +func NewStartBackupAppRequest() *StartBackupAppRequest { + return &StartBackupAppRequest{} +} + +func (p *StartBackupAppRequest) GetBackupProviderType() string { + return p.BackupProviderType +} + +func (p *StartBackupAppRequest) GetAppID() int32 { + return p.AppID +} + +var StartBackupAppRequest_BackupPath_DEFAULT string + +func (p *StartBackupAppRequest) GetBackupPath() string { + if !p.IsSetBackupPath() { + return StartBackupAppRequest_BackupPath_DEFAULT + } + return *p.BackupPath +} +func (p *StartBackupAppRequest) IsSetBackupPath() bool { + return p.BackupPath != nil +} + +func (p *StartBackupAppRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *StartBackupAppRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.BackupProviderType = v + } + return nil +} + +func (p *StartBackupAppRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *StartBackupAppRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.BackupPath = &v + } + return nil +} + +func (p *StartBackupAppRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_backup_app_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *StartBackupAppRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_provider_type", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:backup_provider_type: ", p), err) + } + if err := oprot.WriteString(string(p.BackupProviderType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_provider_type (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:backup_provider_type: ", p), err) + } + return err +} + +func (p *StartBackupAppRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_id: ", p), err) + } + return err +} + +func (p *StartBackupAppRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetBackupPath() { + if err := oprot.WriteFieldBegin("backup_path", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:backup_path: ", p), err) + } + if err := oprot.WriteString(string(*p.BackupPath)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_path (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:backup_path: ", p), err) + } + } + return err +} + +func (p *StartBackupAppRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("StartBackupAppRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMessage +// - BackupID +type StartBackupAppResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMessage string `thrift:"hint_message,2" db:"hint_message" json:"hint_message"` + BackupID *int64 `thrift:"backup_id,3" db:"backup_id" json:"backup_id,omitempty"` +} + +func NewStartBackupAppResponse() *StartBackupAppResponse { + return &StartBackupAppResponse{} +} + +var StartBackupAppResponse_Err_DEFAULT *base.ErrorCode + +func (p *StartBackupAppResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return StartBackupAppResponse_Err_DEFAULT + } + return p.Err +} + +func (p *StartBackupAppResponse) GetHintMessage() string { + return p.HintMessage +} + +var StartBackupAppResponse_BackupID_DEFAULT int64 + +func (p *StartBackupAppResponse) GetBackupID() int64 { + if !p.IsSetBackupID() { + return StartBackupAppResponse_BackupID_DEFAULT + } + return *p.BackupID +} +func (p *StartBackupAppResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *StartBackupAppResponse) IsSetBackupID() bool { + return p.BackupID != nil +} + +func (p *StartBackupAppResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *StartBackupAppResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *StartBackupAppResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *StartBackupAppResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.BackupID = &v + } + return nil +} + +func (p *StartBackupAppResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_backup_app_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *StartBackupAppResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *StartBackupAppResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_message: ", p), err) + } + return err +} + +func (p *StartBackupAppResponse) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetBackupID() { + if err := oprot.WriteFieldBegin("backup_id", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:backup_id: ", p), err) + } + if err := oprot.WriteI64(int64(*p.BackupID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:backup_id: ", p), err) + } + } + return err +} + +func (p *StartBackupAppResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("StartBackupAppResponse(%+v)", *p) +} + +// Attributes: +// - BackupID +// - AppName +// - BackupProviderType +// - BackupPath +// - StartTimeMs +// - EndTimeMs +// - IsBackupFailed +type BackupItem struct { + BackupID int64 `thrift:"backup_id,1" db:"backup_id" json:"backup_id"` + AppName string `thrift:"app_name,2" db:"app_name" json:"app_name"` + BackupProviderType string `thrift:"backup_provider_type,3" db:"backup_provider_type" json:"backup_provider_type"` + BackupPath string `thrift:"backup_path,4" db:"backup_path" json:"backup_path"` + StartTimeMs int64 `thrift:"start_time_ms,5" db:"start_time_ms" json:"start_time_ms"` + EndTimeMs int64 `thrift:"end_time_ms,6" db:"end_time_ms" json:"end_time_ms"` + IsBackupFailed bool `thrift:"is_backup_failed,7" db:"is_backup_failed" json:"is_backup_failed"` +} + +func NewBackupItem() *BackupItem { + return &BackupItem{} +} + +func (p *BackupItem) GetBackupID() int64 { + return p.BackupID +} + +func (p *BackupItem) GetAppName() string { + return p.AppName +} + +func (p *BackupItem) GetBackupProviderType() string { + return p.BackupProviderType +} + +func (p *BackupItem) GetBackupPath() string { + return p.BackupPath +} + +func (p *BackupItem) GetStartTimeMs() int64 { + return p.StartTimeMs +} + +func (p *BackupItem) GetEndTimeMs() int64 { + return p.EndTimeMs +} + +func (p *BackupItem) GetIsBackupFailed() bool { + return p.IsBackupFailed +} +func (p *BackupItem) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BackupItem) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.BackupID = v + } + return nil +} + +func (p *BackupItem) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *BackupItem) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.BackupProviderType = v + } + return nil +} + +func (p *BackupItem) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.BackupPath = v + } + return nil +} + +func (p *BackupItem) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.StartTimeMs = v + } + return nil +} + +func (p *BackupItem) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.EndTimeMs = v + } + return nil +} + +func (p *BackupItem) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.IsBackupFailed = v + } + return nil +} + +func (p *BackupItem) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("backup_item"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BackupItem) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:backup_id: ", p), err) + } + if err := oprot.WriteI64(int64(p.BackupID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:backup_id: ", p), err) + } + return err +} + +func (p *BackupItem) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_name: ", p), err) + } + return err +} + +func (p *BackupItem) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_provider_type", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:backup_provider_type: ", p), err) + } + if err := oprot.WriteString(string(p.BackupProviderType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_provider_type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:backup_provider_type: ", p), err) + } + return err +} + +func (p *BackupItem) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("backup_path", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:backup_path: ", p), err) + } + if err := oprot.WriteString(string(p.BackupPath)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_path (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:backup_path: ", p), err) + } + return err +} + +func (p *BackupItem) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("start_time_ms", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:start_time_ms: ", p), err) + } + if err := oprot.WriteI64(int64(p.StartTimeMs)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.start_time_ms (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:start_time_ms: ", p), err) + } + return err +} + +func (p *BackupItem) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("end_time_ms", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:end_time_ms: ", p), err) + } + if err := oprot.WriteI64(int64(p.EndTimeMs)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.end_time_ms (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:end_time_ms: ", p), err) + } + return err +} + +func (p *BackupItem) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("is_backup_failed", thrift.BOOL, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:is_backup_failed: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsBackupFailed)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_backup_failed (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:is_backup_failed: ", p), err) + } + return err +} + +func (p *BackupItem) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackupItem(%+v)", *p) +} + +// Attributes: +// - AppID +// - BackupID +type QueryBackupStatusRequest struct { + AppID int32 `thrift:"app_id,1" db:"app_id" json:"app_id"` + BackupID *int64 `thrift:"backup_id,2" db:"backup_id" json:"backup_id,omitempty"` +} + +func NewQueryBackupStatusRequest() *QueryBackupStatusRequest { + return &QueryBackupStatusRequest{} +} + +func (p *QueryBackupStatusRequest) GetAppID() int32 { + return p.AppID +} + +var QueryBackupStatusRequest_BackupID_DEFAULT int64 + +func (p *QueryBackupStatusRequest) GetBackupID() int64 { + if !p.IsSetBackupID() { + return QueryBackupStatusRequest_BackupID_DEFAULT + } + return *p.BackupID +} +func (p *QueryBackupStatusRequest) IsSetBackupID() bool { + return p.BackupID != nil +} + +func (p *QueryBackupStatusRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryBackupStatusRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *QueryBackupStatusRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.BackupID = &v + } + return nil +} + +func (p *QueryBackupStatusRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_backup_status_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryBackupStatusRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_id: ", p), err) + } + return err +} + +func (p *QueryBackupStatusRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetBackupID() { + if err := oprot.WriteFieldBegin("backup_id", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:backup_id: ", p), err) + } + if err := oprot.WriteI64(int64(*p.BackupID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.backup_id (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:backup_id: ", p), err) + } + } + return err +} + +func (p *QueryBackupStatusRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryBackupStatusRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMessage +// - BackupItems +type QueryBackupStatusResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMessage string `thrift:"hint_message,2" db:"hint_message" json:"hint_message"` + BackupItems []*BackupItem `thrift:"backup_items,3" db:"backup_items" json:"backup_items,omitempty"` +} + +func NewQueryBackupStatusResponse() *QueryBackupStatusResponse { + return &QueryBackupStatusResponse{} +} + +var QueryBackupStatusResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryBackupStatusResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryBackupStatusResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QueryBackupStatusResponse) GetHintMessage() string { + return p.HintMessage +} + +var QueryBackupStatusResponse_BackupItems_DEFAULT []*BackupItem + +func (p *QueryBackupStatusResponse) GetBackupItems() []*BackupItem { + return p.BackupItems +} +func (p *QueryBackupStatusResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryBackupStatusResponse) IsSetBackupItems() bool { + return p.BackupItems != nil +} + +func (p *QueryBackupStatusResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryBackupStatusResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryBackupStatusResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *QueryBackupStatusResponse) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BackupItem, 0, size) + p.BackupItems = tSlice + for i := 0; i < size; i++ { + _elem11 := &BackupItem{} + if err := _elem11.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem11), err) + } + p.BackupItems = append(p.BackupItems, _elem11) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryBackupStatusResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_backup_status_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryBackupStatusResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryBackupStatusResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_message: ", p), err) + } + return err +} + +func (p *QueryBackupStatusResponse) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetBackupItems() { + if err := oprot.WriteFieldBegin("backup_items", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:backup_items: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.BackupItems)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.BackupItems { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:backup_items: ", p), err) + } + } + return err +} + +func (p *QueryBackupStatusResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryBackupStatusResponse(%+v)", *p) +} diff --git a/go-client/idl/admin/bulk_load-consts.go b/go-client/idl/admin/bulk_load-consts.go new file mode 100644 index 0000000000..757b943ef3 --- /dev/null +++ b/go-client/idl/admin/bulk_load-consts.go @@ -0,0 +1,27 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/admin/bulk_load.go b/go-client/idl/admin/bulk_load.go new file mode 100644 index 0000000000..f7529538c8 --- /dev/null +++ b/go-client/idl/admin/bulk_load.go @@ -0,0 +1,4536 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +type BulkLoadStatus int64 + +const ( + BulkLoadStatus_BLS_INVALID BulkLoadStatus = 0 + BulkLoadStatus_BLS_DOWNLOADING BulkLoadStatus = 1 + BulkLoadStatus_BLS_DOWNLOADED BulkLoadStatus = 2 + BulkLoadStatus_BLS_INGESTING BulkLoadStatus = 3 + BulkLoadStatus_BLS_SUCCEED BulkLoadStatus = 4 + BulkLoadStatus_BLS_FAILED BulkLoadStatus = 5 + BulkLoadStatus_BLS_PAUSING BulkLoadStatus = 6 + BulkLoadStatus_BLS_PAUSED BulkLoadStatus = 7 + BulkLoadStatus_BLS_CANCELED BulkLoadStatus = 8 +) + +func (p BulkLoadStatus) String() string { + switch p { + case BulkLoadStatus_BLS_INVALID: + return "BLS_INVALID" + case BulkLoadStatus_BLS_DOWNLOADING: + return "BLS_DOWNLOADING" + case BulkLoadStatus_BLS_DOWNLOADED: + return "BLS_DOWNLOADED" + case BulkLoadStatus_BLS_INGESTING: + return "BLS_INGESTING" + case BulkLoadStatus_BLS_SUCCEED: + return "BLS_SUCCEED" + case BulkLoadStatus_BLS_FAILED: + return "BLS_FAILED" + case BulkLoadStatus_BLS_PAUSING: + return "BLS_PAUSING" + case BulkLoadStatus_BLS_PAUSED: + return "BLS_PAUSED" + case BulkLoadStatus_BLS_CANCELED: + return "BLS_CANCELED" + } + return "" +} + +func BulkLoadStatusFromString(s string) (BulkLoadStatus, error) { + switch s { + case "BLS_INVALID": + return BulkLoadStatus_BLS_INVALID, nil + case "BLS_DOWNLOADING": + return BulkLoadStatus_BLS_DOWNLOADING, nil + case "BLS_DOWNLOADED": + return BulkLoadStatus_BLS_DOWNLOADED, nil + case "BLS_INGESTING": + return BulkLoadStatus_BLS_INGESTING, nil + case "BLS_SUCCEED": + return BulkLoadStatus_BLS_SUCCEED, nil + case "BLS_FAILED": + return BulkLoadStatus_BLS_FAILED, nil + case "BLS_PAUSING": + return BulkLoadStatus_BLS_PAUSING, nil + case "BLS_PAUSED": + return BulkLoadStatus_BLS_PAUSED, nil + case "BLS_CANCELED": + return BulkLoadStatus_BLS_CANCELED, nil + } + return BulkLoadStatus(0), fmt.Errorf("not a valid BulkLoadStatus string") +} + +func BulkLoadStatusPtr(v BulkLoadStatus) *BulkLoadStatus { return &v } + +func (p BulkLoadStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *BulkLoadStatus) UnmarshalText(text []byte) error { + q, err := BulkLoadStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *BulkLoadStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = BulkLoadStatus(v) + return nil +} + +func (p *BulkLoadStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type IngestionStatus int64 + +const ( + IngestionStatus_IS_INVALID IngestionStatus = 0 + IngestionStatus_IS_RUNNING IngestionStatus = 1 + IngestionStatus_IS_SUCCEED IngestionStatus = 2 + IngestionStatus_IS_FAILED IngestionStatus = 3 +) + +func (p IngestionStatus) String() string { + switch p { + case IngestionStatus_IS_INVALID: + return "IS_INVALID" + case IngestionStatus_IS_RUNNING: + return "IS_RUNNING" + case IngestionStatus_IS_SUCCEED: + return "IS_SUCCEED" + case IngestionStatus_IS_FAILED: + return "IS_FAILED" + } + return "" +} + +func IngestionStatusFromString(s string) (IngestionStatus, error) { + switch s { + case "IS_INVALID": + return IngestionStatus_IS_INVALID, nil + case "IS_RUNNING": + return IngestionStatus_IS_RUNNING, nil + case "IS_SUCCEED": + return IngestionStatus_IS_SUCCEED, nil + case "IS_FAILED": + return IngestionStatus_IS_FAILED, nil + } + return IngestionStatus(0), fmt.Errorf("not a valid IngestionStatus string") +} + +func IngestionStatusPtr(v IngestionStatus) *IngestionStatus { return &v } + +func (p IngestionStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *IngestionStatus) UnmarshalText(text []byte) error { + q, err := IngestionStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *IngestionStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = IngestionStatus(v) + return nil +} + +func (p *IngestionStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type BulkLoadControlType int64 + +const ( + BulkLoadControlType_BLC_PAUSE BulkLoadControlType = 0 + BulkLoadControlType_BLC_RESTART BulkLoadControlType = 1 + BulkLoadControlType_BLC_CANCEL BulkLoadControlType = 2 + BulkLoadControlType_BLC_FORCE_CANCEL BulkLoadControlType = 3 +) + +func (p BulkLoadControlType) String() string { + switch p { + case BulkLoadControlType_BLC_PAUSE: + return "BLC_PAUSE" + case BulkLoadControlType_BLC_RESTART: + return "BLC_RESTART" + case BulkLoadControlType_BLC_CANCEL: + return "BLC_CANCEL" + case BulkLoadControlType_BLC_FORCE_CANCEL: + return "BLC_FORCE_CANCEL" + } + return "" +} + +func BulkLoadControlTypeFromString(s string) (BulkLoadControlType, error) { + switch s { + case "BLC_PAUSE": + return BulkLoadControlType_BLC_PAUSE, nil + case "BLC_RESTART": + return BulkLoadControlType_BLC_RESTART, nil + case "BLC_CANCEL": + return BulkLoadControlType_BLC_CANCEL, nil + case "BLC_FORCE_CANCEL": + return BulkLoadControlType_BLC_FORCE_CANCEL, nil + } + return BulkLoadControlType(0), fmt.Errorf("not a valid BulkLoadControlType string") +} + +func BulkLoadControlTypePtr(v BulkLoadControlType) *BulkLoadControlType { return &v } + +func (p BulkLoadControlType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *BulkLoadControlType) UnmarshalText(text []byte) error { + q, err := BulkLoadControlTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *BulkLoadControlType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = BulkLoadControlType(v) + return nil +} + +func (p *BulkLoadControlType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - Files +// - FileTotalSize +type BulkLoadMetadata struct { + Files []*FileMeta `thrift:"files,1" db:"files" json:"files"` + FileTotalSize int64 `thrift:"file_total_size,2" db:"file_total_size" json:"file_total_size"` +} + +func NewBulkLoadMetadata() *BulkLoadMetadata { + return &BulkLoadMetadata{} +} + +func (p *BulkLoadMetadata) GetFiles() []*FileMeta { + return p.Files +} + +func (p *BulkLoadMetadata) GetFileTotalSize() int64 { + return p.FileTotalSize +} +func (p *BulkLoadMetadata) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BulkLoadMetadata) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*FileMeta, 0, size) + p.Files = tSlice + for i := 0; i < size; i++ { + _elem0 := &FileMeta{} + if err := _elem0.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Files = append(p.Files, _elem0) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *BulkLoadMetadata) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.FileTotalSize = v + } + return nil +} + +func (p *BulkLoadMetadata) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("bulk_load_metadata"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BulkLoadMetadata) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("files", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:files: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Files)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Files { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:files: ", p), err) + } + return err +} + +func (p *BulkLoadMetadata) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("file_total_size", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:file_total_size: ", p), err) + } + if err := oprot.WriteI64(int64(p.FileTotalSize)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.file_total_size (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:file_total_size: ", p), err) + } + return err +} + +func (p *BulkLoadMetadata) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BulkLoadMetadata(%+v)", *p) +} + +// Attributes: +// - AppName +// - ClusterName +// - FileProviderType +// - RemoteRootPath +// - IngestBehind +type StartBulkLoadRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + ClusterName string `thrift:"cluster_name,2" db:"cluster_name" json:"cluster_name"` + FileProviderType string `thrift:"file_provider_type,3" db:"file_provider_type" json:"file_provider_type"` + RemoteRootPath string `thrift:"remote_root_path,4" db:"remote_root_path" json:"remote_root_path"` + IngestBehind bool `thrift:"ingest_behind,5" db:"ingest_behind" json:"ingest_behind"` +} + +func NewStartBulkLoadRequest() *StartBulkLoadRequest { + return &StartBulkLoadRequest{} +} + +func (p *StartBulkLoadRequest) GetAppName() string { + return p.AppName +} + +func (p *StartBulkLoadRequest) GetClusterName() string { + return p.ClusterName +} + +func (p *StartBulkLoadRequest) GetFileProviderType() string { + return p.FileProviderType +} + +func (p *StartBulkLoadRequest) GetRemoteRootPath() string { + return p.RemoteRootPath +} + +func (p *StartBulkLoadRequest) GetIngestBehind() bool { + return p.IngestBehind +} +func (p *StartBulkLoadRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *StartBulkLoadRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *StartBulkLoadRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.ClusterName = v + } + return nil +} + +func (p *StartBulkLoadRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.FileProviderType = v + } + return nil +} + +func (p *StartBulkLoadRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.RemoteRootPath = v + } + return nil +} + +func (p *StartBulkLoadRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.IngestBehind = v + } + return nil +} + +func (p *StartBulkLoadRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_bulk_load_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *StartBulkLoadRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *StartBulkLoadRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("cluster_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:cluster_name: ", p), err) + } + if err := oprot.WriteString(string(p.ClusterName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.cluster_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:cluster_name: ", p), err) + } + return err +} + +func (p *StartBulkLoadRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("file_provider_type", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:file_provider_type: ", p), err) + } + if err := oprot.WriteString(string(p.FileProviderType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.file_provider_type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:file_provider_type: ", p), err) + } + return err +} + +func (p *StartBulkLoadRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("remote_root_path", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:remote_root_path: ", p), err) + } + if err := oprot.WriteString(string(p.RemoteRootPath)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_root_path (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:remote_root_path: ", p), err) + } + return err +} + +func (p *StartBulkLoadRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ingest_behind", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:ingest_behind: ", p), err) + } + if err := oprot.WriteBool(bool(p.IngestBehind)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ingest_behind (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:ingest_behind: ", p), err) + } + return err +} + +func (p *StartBulkLoadRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("StartBulkLoadRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMsg +type StartBulkLoadResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMsg string `thrift:"hint_msg,2" db:"hint_msg" json:"hint_msg"` +} + +func NewStartBulkLoadResponse() *StartBulkLoadResponse { + return &StartBulkLoadResponse{} +} + +var StartBulkLoadResponse_Err_DEFAULT *base.ErrorCode + +func (p *StartBulkLoadResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return StartBulkLoadResponse_Err_DEFAULT + } + return p.Err +} + +func (p *StartBulkLoadResponse) GetHintMsg() string { + return p.HintMsg +} +func (p *StartBulkLoadResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *StartBulkLoadResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *StartBulkLoadResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *StartBulkLoadResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMsg = v + } + return nil +} + +func (p *StartBulkLoadResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_bulk_load_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *StartBulkLoadResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *StartBulkLoadResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_msg: ", p), err) + } + return err +} + +func (p *StartBulkLoadResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("StartBulkLoadResponse(%+v)", *p) +} + +// Attributes: +// - DownloadProgress +// - DownloadStatus +// - IngestStatus +// - IsCleanedUp +// - IsPaused +type PartitionBulkLoadState struct { + DownloadProgress int32 `thrift:"download_progress,1" db:"download_progress" json:"download_progress"` + DownloadStatus *base.ErrorCode `thrift:"download_status,2" db:"download_status" json:"download_status,omitempty"` + IngestStatus IngestionStatus `thrift:"ingest_status,3" db:"ingest_status" json:"ingest_status"` + IsCleanedUp bool `thrift:"is_cleaned_up,4" db:"is_cleaned_up" json:"is_cleaned_up"` + IsPaused bool `thrift:"is_paused,5" db:"is_paused" json:"is_paused"` +} + +func NewPartitionBulkLoadState() *PartitionBulkLoadState { + return &PartitionBulkLoadState{ + IngestStatus: 0, + } +} + +var PartitionBulkLoadState_DownloadProgress_DEFAULT int32 = 0 + +func (p *PartitionBulkLoadState) GetDownloadProgress() int32 { + return p.DownloadProgress +} + +var PartitionBulkLoadState_DownloadStatus_DEFAULT *base.ErrorCode + +func (p *PartitionBulkLoadState) GetDownloadStatus() *base.ErrorCode { + if !p.IsSetDownloadStatus() { + return PartitionBulkLoadState_DownloadStatus_DEFAULT + } + return p.DownloadStatus +} + +var PartitionBulkLoadState_IngestStatus_DEFAULT IngestionStatus = 0 + +func (p *PartitionBulkLoadState) GetIngestStatus() IngestionStatus { + return p.IngestStatus +} + +var PartitionBulkLoadState_IsCleanedUp_DEFAULT bool = false + +func (p *PartitionBulkLoadState) GetIsCleanedUp() bool { + return p.IsCleanedUp +} + +var PartitionBulkLoadState_IsPaused_DEFAULT bool = false + +func (p *PartitionBulkLoadState) GetIsPaused() bool { + return p.IsPaused +} +func (p *PartitionBulkLoadState) IsSetDownloadProgress() bool { + return p.DownloadProgress != PartitionBulkLoadState_DownloadProgress_DEFAULT +} + +func (p *PartitionBulkLoadState) IsSetDownloadStatus() bool { + return p.DownloadStatus != nil +} + +func (p *PartitionBulkLoadState) IsSetIngestStatus() bool { + return p.IngestStatus != PartitionBulkLoadState_IngestStatus_DEFAULT +} + +func (p *PartitionBulkLoadState) IsSetIsCleanedUp() bool { + return p.IsCleanedUp != PartitionBulkLoadState_IsCleanedUp_DEFAULT +} + +func (p *PartitionBulkLoadState) IsSetIsPaused() bool { + return p.IsPaused != PartitionBulkLoadState_IsPaused_DEFAULT +} + +func (p *PartitionBulkLoadState) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *PartitionBulkLoadState) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.DownloadProgress = v + } + return nil +} + +func (p *PartitionBulkLoadState) ReadField2(iprot thrift.TProtocol) error { + p.DownloadStatus = &base.ErrorCode{} + if err := p.DownloadStatus.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DownloadStatus), err) + } + return nil +} + +func (p *PartitionBulkLoadState) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := IngestionStatus(v) + p.IngestStatus = temp + } + return nil +} + +func (p *PartitionBulkLoadState) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.IsCleanedUp = v + } + return nil +} + +func (p *PartitionBulkLoadState) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.IsPaused = v + } + return nil +} + +func (p *PartitionBulkLoadState) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("partition_bulk_load_state"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *PartitionBulkLoadState) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetDownloadProgress() { + if err := oprot.WriteFieldBegin("download_progress", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:download_progress: ", p), err) + } + if err := oprot.WriteI32(int32(p.DownloadProgress)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.download_progress (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:download_progress: ", p), err) + } + } + return err +} + +func (p *PartitionBulkLoadState) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDownloadStatus() { + if err := oprot.WriteFieldBegin("download_status", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:download_status: ", p), err) + } + if err := p.DownloadStatus.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DownloadStatus), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:download_status: ", p), err) + } + } + return err +} + +func (p *PartitionBulkLoadState) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetIngestStatus() { + if err := oprot.WriteFieldBegin("ingest_status", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:ingest_status: ", p), err) + } + if err := oprot.WriteI32(int32(p.IngestStatus)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ingest_status (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:ingest_status: ", p), err) + } + } + return err +} + +func (p *PartitionBulkLoadState) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetIsCleanedUp() { + if err := oprot.WriteFieldBegin("is_cleaned_up", thrift.BOOL, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:is_cleaned_up: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsCleanedUp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_cleaned_up (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:is_cleaned_up: ", p), err) + } + } + return err +} + +func (p *PartitionBulkLoadState) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetIsPaused() { + if err := oprot.WriteFieldBegin("is_paused", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:is_paused: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsPaused)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_paused (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:is_paused: ", p), err) + } + } + return err +} + +func (p *PartitionBulkLoadState) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("PartitionBulkLoadState(%+v)", *p) +} + +// Attributes: +// - Pid +// - AppName +// - Primary +// - RemoteProviderName +// - ClusterName +// - Ballot +// - MetaBulkLoadStatus +// - QueryBulkLoadMetadata +// - RemoteRootPath +// - HpPrimary +type BulkLoadRequest struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + AppName string `thrift:"app_name,2" db:"app_name" json:"app_name"` + Primary *base.RPCAddress `thrift:"primary,3" db:"primary" json:"primary"` + RemoteProviderName string `thrift:"remote_provider_name,4" db:"remote_provider_name" json:"remote_provider_name"` + ClusterName string `thrift:"cluster_name,5" db:"cluster_name" json:"cluster_name"` + Ballot int64 `thrift:"ballot,6" db:"ballot" json:"ballot"` + MetaBulkLoadStatus BulkLoadStatus `thrift:"meta_bulk_load_status,7" db:"meta_bulk_load_status" json:"meta_bulk_load_status"` + QueryBulkLoadMetadata bool `thrift:"query_bulk_load_metadata,8" db:"query_bulk_load_metadata" json:"query_bulk_load_metadata"` + RemoteRootPath string `thrift:"remote_root_path,9" db:"remote_root_path" json:"remote_root_path"` + HpPrimary *base.HostPort `thrift:"hp_primary,10" db:"hp_primary" json:"hp_primary,omitempty"` +} + +func NewBulkLoadRequest() *BulkLoadRequest { + return &BulkLoadRequest{} +} + +var BulkLoadRequest_Pid_DEFAULT *base.Gpid + +func (p *BulkLoadRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return BulkLoadRequest_Pid_DEFAULT + } + return p.Pid +} + +func (p *BulkLoadRequest) GetAppName() string { + return p.AppName +} + +var BulkLoadRequest_Primary_DEFAULT *base.RPCAddress + +func (p *BulkLoadRequest) GetPrimary() *base.RPCAddress { + if !p.IsSetPrimary() { + return BulkLoadRequest_Primary_DEFAULT + } + return p.Primary +} + +func (p *BulkLoadRequest) GetRemoteProviderName() string { + return p.RemoteProviderName +} + +func (p *BulkLoadRequest) GetClusterName() string { + return p.ClusterName +} + +func (p *BulkLoadRequest) GetBallot() int64 { + return p.Ballot +} + +func (p *BulkLoadRequest) GetMetaBulkLoadStatus() BulkLoadStatus { + return p.MetaBulkLoadStatus +} + +func (p *BulkLoadRequest) GetQueryBulkLoadMetadata() bool { + return p.QueryBulkLoadMetadata +} + +func (p *BulkLoadRequest) GetRemoteRootPath() string { + return p.RemoteRootPath +} + +var BulkLoadRequest_HpPrimary_DEFAULT *base.HostPort + +func (p *BulkLoadRequest) GetHpPrimary() *base.HostPort { + if !p.IsSetHpPrimary() { + return BulkLoadRequest_HpPrimary_DEFAULT + } + return p.HpPrimary +} +func (p *BulkLoadRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *BulkLoadRequest) IsSetPrimary() bool { + return p.Primary != nil +} + +func (p *BulkLoadRequest) IsSetHpPrimary() bool { + return p.HpPrimary != nil +} + +func (p *BulkLoadRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I32 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.STRING { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField10(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BulkLoadRequest) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *BulkLoadRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *BulkLoadRequest) ReadField3(iprot thrift.TProtocol) error { + p.Primary = &base.RPCAddress{} + if err := p.Primary.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Primary), err) + } + return nil +} + +func (p *BulkLoadRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.RemoteProviderName = v + } + return nil +} + +func (p *BulkLoadRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.ClusterName = v + } + return nil +} + +func (p *BulkLoadRequest) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Ballot = v + } + return nil +} + +func (p *BulkLoadRequest) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + temp := BulkLoadStatus(v) + p.MetaBulkLoadStatus = temp + } + return nil +} + +func (p *BulkLoadRequest) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.QueryBulkLoadMetadata = v + } + return nil +} + +func (p *BulkLoadRequest) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.RemoteRootPath = v + } + return nil +} + +func (p *BulkLoadRequest) ReadField10(iprot thrift.TProtocol) error { + p.HpPrimary = &base.HostPort{} + if err := p.HpPrimary.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpPrimary), err) + } + return nil +} + +func (p *BulkLoadRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("bulk_load_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + if err := p.writeField10(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BulkLoadRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_name: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("primary", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:primary: ", p), err) + } + if err := p.Primary.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Primary), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:primary: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("remote_provider_name", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:remote_provider_name: ", p), err) + } + if err := oprot.WriteString(string(p.RemoteProviderName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_provider_name (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:remote_provider_name: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("cluster_name", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:cluster_name: ", p), err) + } + if err := oprot.WriteString(string(p.ClusterName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.cluster_name (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:cluster_name: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ballot", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:ballot: ", p), err) + } + if err := oprot.WriteI64(int64(p.Ballot)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ballot (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:ballot: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("meta_bulk_load_status", thrift.I32, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:meta_bulk_load_status: ", p), err) + } + if err := oprot.WriteI32(int32(p.MetaBulkLoadStatus)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.meta_bulk_load_status (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:meta_bulk_load_status: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("query_bulk_load_metadata", thrift.BOOL, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:query_bulk_load_metadata: ", p), err) + } + if err := oprot.WriteBool(bool(p.QueryBulkLoadMetadata)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.query_bulk_load_metadata (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:query_bulk_load_metadata: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField9(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("remote_root_path", thrift.STRING, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:remote_root_path: ", p), err) + } + if err := oprot.WriteString(string(p.RemoteRootPath)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_root_path (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:remote_root_path: ", p), err) + } + return err +} + +func (p *BulkLoadRequest) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetHpPrimary() { + if err := oprot.WriteFieldBegin("hp_primary", thrift.STRUCT, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:hp_primary: ", p), err) + } + if err := p.HpPrimary.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpPrimary), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:hp_primary: ", p), err) + } + } + return err +} + +func (p *BulkLoadRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BulkLoadRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Pid +// - AppName +// - PrimaryBulkLoadStatus +// - GroupBulkLoadState +// - Metadata +// - TotalDownloadProgress +// - IsGroupIngestionFinished +// - IsGroupBulkLoadContextCleanedUp +// - IsGroupBulkLoadPaused +// - HpGroupBulkLoadState +type BulkLoadResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Pid *base.Gpid `thrift:"pid,2" db:"pid" json:"pid"` + AppName string `thrift:"app_name,3" db:"app_name" json:"app_name"` + PrimaryBulkLoadStatus BulkLoadStatus `thrift:"primary_bulk_load_status,4" db:"primary_bulk_load_status" json:"primary_bulk_load_status"` + GroupBulkLoadState map[*base.RPCAddress]*PartitionBulkLoadState `thrift:"group_bulk_load_state,5" db:"group_bulk_load_state" json:"group_bulk_load_state"` + Metadata *BulkLoadMetadata `thrift:"metadata,6" db:"metadata" json:"metadata,omitempty"` + TotalDownloadProgress *int32 `thrift:"total_download_progress,7" db:"total_download_progress" json:"total_download_progress,omitempty"` + IsGroupIngestionFinished *bool `thrift:"is_group_ingestion_finished,8" db:"is_group_ingestion_finished" json:"is_group_ingestion_finished,omitempty"` + IsGroupBulkLoadContextCleanedUp *bool `thrift:"is_group_bulk_load_context_cleaned_up,9" db:"is_group_bulk_load_context_cleaned_up" json:"is_group_bulk_load_context_cleaned_up,omitempty"` + IsGroupBulkLoadPaused *bool `thrift:"is_group_bulk_load_paused,10" db:"is_group_bulk_load_paused" json:"is_group_bulk_load_paused,omitempty"` + HpGroupBulkLoadState map[*base.HostPort]*PartitionBulkLoadState `thrift:"hp_group_bulk_load_state,11" db:"hp_group_bulk_load_state" json:"hp_group_bulk_load_state,omitempty"` +} + +func NewBulkLoadResponse() *BulkLoadResponse { + return &BulkLoadResponse{} +} + +var BulkLoadResponse_Err_DEFAULT *base.ErrorCode + +func (p *BulkLoadResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return BulkLoadResponse_Err_DEFAULT + } + return p.Err +} + +var BulkLoadResponse_Pid_DEFAULT *base.Gpid + +func (p *BulkLoadResponse) GetPid() *base.Gpid { + if !p.IsSetPid() { + return BulkLoadResponse_Pid_DEFAULT + } + return p.Pid +} + +func (p *BulkLoadResponse) GetAppName() string { + return p.AppName +} + +func (p *BulkLoadResponse) GetPrimaryBulkLoadStatus() BulkLoadStatus { + return p.PrimaryBulkLoadStatus +} + +func (p *BulkLoadResponse) GetGroupBulkLoadState() map[*base.RPCAddress]*PartitionBulkLoadState { + return p.GroupBulkLoadState +} + +var BulkLoadResponse_Metadata_DEFAULT *BulkLoadMetadata + +func (p *BulkLoadResponse) GetMetadata() *BulkLoadMetadata { + if !p.IsSetMetadata() { + return BulkLoadResponse_Metadata_DEFAULT + } + return p.Metadata +} + +var BulkLoadResponse_TotalDownloadProgress_DEFAULT int32 + +func (p *BulkLoadResponse) GetTotalDownloadProgress() int32 { + if !p.IsSetTotalDownloadProgress() { + return BulkLoadResponse_TotalDownloadProgress_DEFAULT + } + return *p.TotalDownloadProgress +} + +var BulkLoadResponse_IsGroupIngestionFinished_DEFAULT bool + +func (p *BulkLoadResponse) GetIsGroupIngestionFinished() bool { + if !p.IsSetIsGroupIngestionFinished() { + return BulkLoadResponse_IsGroupIngestionFinished_DEFAULT + } + return *p.IsGroupIngestionFinished +} + +var BulkLoadResponse_IsGroupBulkLoadContextCleanedUp_DEFAULT bool + +func (p *BulkLoadResponse) GetIsGroupBulkLoadContextCleanedUp() bool { + if !p.IsSetIsGroupBulkLoadContextCleanedUp() { + return BulkLoadResponse_IsGroupBulkLoadContextCleanedUp_DEFAULT + } + return *p.IsGroupBulkLoadContextCleanedUp +} + +var BulkLoadResponse_IsGroupBulkLoadPaused_DEFAULT bool + +func (p *BulkLoadResponse) GetIsGroupBulkLoadPaused() bool { + if !p.IsSetIsGroupBulkLoadPaused() { + return BulkLoadResponse_IsGroupBulkLoadPaused_DEFAULT + } + return *p.IsGroupBulkLoadPaused +} + +var BulkLoadResponse_HpGroupBulkLoadState_DEFAULT map[*base.HostPort]*PartitionBulkLoadState + +func (p *BulkLoadResponse) GetHpGroupBulkLoadState() map[*base.HostPort]*PartitionBulkLoadState { + return p.HpGroupBulkLoadState +} +func (p *BulkLoadResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *BulkLoadResponse) IsSetPid() bool { + return p.Pid != nil +} + +func (p *BulkLoadResponse) IsSetMetadata() bool { + return p.Metadata != nil +} + +func (p *BulkLoadResponse) IsSetTotalDownloadProgress() bool { + return p.TotalDownloadProgress != nil +} + +func (p *BulkLoadResponse) IsSetIsGroupIngestionFinished() bool { + return p.IsGroupIngestionFinished != nil +} + +func (p *BulkLoadResponse) IsSetIsGroupBulkLoadContextCleanedUp() bool { + return p.IsGroupBulkLoadContextCleanedUp != nil +} + +func (p *BulkLoadResponse) IsSetIsGroupBulkLoadPaused() bool { + return p.IsGroupBulkLoadPaused != nil +} + +func (p *BulkLoadResponse) IsSetHpGroupBulkLoadState() bool { + return p.HpGroupBulkLoadState != nil +} + +func (p *BulkLoadResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.MAP { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I32 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField10(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.MAP { + if err := p.ReadField11(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BulkLoadResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *BulkLoadResponse) ReadField2(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *BulkLoadResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *BulkLoadResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + temp := BulkLoadStatus(v) + p.PrimaryBulkLoadStatus = temp + } + return nil +} + +func (p *BulkLoadResponse) ReadField5(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[*base.RPCAddress]*PartitionBulkLoadState, size) + p.GroupBulkLoadState = tMap + for i := 0; i < size; i++ { + _key1 := &base.RPCAddress{} + if err := _key1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _key1), err) + } + _val2 := &PartitionBulkLoadState{ + IngestStatus: 0, + } + if err := _val2.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _val2), err) + } + p.GroupBulkLoadState[_key1] = _val2 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *BulkLoadResponse) ReadField6(iprot thrift.TProtocol) error { + p.Metadata = &BulkLoadMetadata{} + if err := p.Metadata.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Metadata), err) + } + return nil +} + +func (p *BulkLoadResponse) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.TotalDownloadProgress = &v + } + return nil +} + +func (p *BulkLoadResponse) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.IsGroupIngestionFinished = &v + } + return nil +} + +func (p *BulkLoadResponse) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.IsGroupBulkLoadContextCleanedUp = &v + } + return nil +} + +func (p *BulkLoadResponse) ReadField10(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 10: ", err) + } else { + p.IsGroupBulkLoadPaused = &v + } + return nil +} + +func (p *BulkLoadResponse) ReadField11(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[*base.HostPort]*PartitionBulkLoadState, size) + p.HpGroupBulkLoadState = tMap + for i := 0; i < size; i++ { + _key3 := &base.HostPort{} + if err := _key3.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _key3), err) + } + _val4 := &PartitionBulkLoadState{ + IngestStatus: 0, + } + if err := _val4.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _val4), err) + } + p.HpGroupBulkLoadState[_key3] = _val4 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *BulkLoadResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("bulk_load_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + if err := p.writeField10(oprot); err != nil { + return err + } + if err := p.writeField11(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BulkLoadResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *BulkLoadResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:pid: ", p), err) + } + return err +} + +func (p *BulkLoadResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_name: ", p), err) + } + return err +} + +func (p *BulkLoadResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("primary_bulk_load_status", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:primary_bulk_load_status: ", p), err) + } + if err := oprot.WriteI32(int32(p.PrimaryBulkLoadStatus)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.primary_bulk_load_status (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:primary_bulk_load_status: ", p), err) + } + return err +} + +func (p *BulkLoadResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("group_bulk_load_state", thrift.MAP, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:group_bulk_load_state: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.STRUCT, thrift.STRUCT, len(p.GroupBulkLoadState)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.GroupBulkLoadState { + if err := k.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", k), err) + } + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:group_bulk_load_state: ", p), err) + } + return err +} + +func (p *BulkLoadResponse) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetMetadata() { + if err := oprot.WriteFieldBegin("metadata", thrift.STRUCT, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:metadata: ", p), err) + } + if err := p.Metadata.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Metadata), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:metadata: ", p), err) + } + } + return err +} + +func (p *BulkLoadResponse) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetTotalDownloadProgress() { + if err := oprot.WriteFieldBegin("total_download_progress", thrift.I32, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:total_download_progress: ", p), err) + } + if err := oprot.WriteI32(int32(*p.TotalDownloadProgress)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.total_download_progress (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:total_download_progress: ", p), err) + } + } + return err +} + +func (p *BulkLoadResponse) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetIsGroupIngestionFinished() { + if err := oprot.WriteFieldBegin("is_group_ingestion_finished", thrift.BOOL, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:is_group_ingestion_finished: ", p), err) + } + if err := oprot.WriteBool(bool(*p.IsGroupIngestionFinished)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_group_ingestion_finished (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:is_group_ingestion_finished: ", p), err) + } + } + return err +} + +func (p *BulkLoadResponse) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetIsGroupBulkLoadContextCleanedUp() { + if err := oprot.WriteFieldBegin("is_group_bulk_load_context_cleaned_up", thrift.BOOL, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:is_group_bulk_load_context_cleaned_up: ", p), err) + } + if err := oprot.WriteBool(bool(*p.IsGroupBulkLoadContextCleanedUp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_group_bulk_load_context_cleaned_up (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:is_group_bulk_load_context_cleaned_up: ", p), err) + } + } + return err +} + +func (p *BulkLoadResponse) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetIsGroupBulkLoadPaused() { + if err := oprot.WriteFieldBegin("is_group_bulk_load_paused", thrift.BOOL, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:is_group_bulk_load_paused: ", p), err) + } + if err := oprot.WriteBool(bool(*p.IsGroupBulkLoadPaused)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_group_bulk_load_paused (10) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:is_group_bulk_load_paused: ", p), err) + } + } + return err +} + +func (p *BulkLoadResponse) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetHpGroupBulkLoadState() { + if err := oprot.WriteFieldBegin("hp_group_bulk_load_state", thrift.MAP, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:hp_group_bulk_load_state: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.STRUCT, thrift.STRUCT, len(p.HpGroupBulkLoadState)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.HpGroupBulkLoadState { + if err := k.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", k), err) + } + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:hp_group_bulk_load_state: ", p), err) + } + } + return err +} + +func (p *BulkLoadResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BulkLoadResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - Target +// - Config +// - ProviderName +// - ClusterName +// - MetaBulkLoadStatus +// - RemoteRootPath +// - HpTarget +type GroupBulkLoadRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + Target *base.RPCAddress `thrift:"target,2" db:"target" json:"target"` + Config *ReplicaConfiguration `thrift:"config,3" db:"config" json:"config"` + ProviderName string `thrift:"provider_name,4" db:"provider_name" json:"provider_name"` + ClusterName string `thrift:"cluster_name,5" db:"cluster_name" json:"cluster_name"` + MetaBulkLoadStatus BulkLoadStatus `thrift:"meta_bulk_load_status,6" db:"meta_bulk_load_status" json:"meta_bulk_load_status"` + RemoteRootPath string `thrift:"remote_root_path,7" db:"remote_root_path" json:"remote_root_path"` + HpTarget *base.HostPort `thrift:"hp_target,8" db:"hp_target" json:"hp_target,omitempty"` +} + +func NewGroupBulkLoadRequest() *GroupBulkLoadRequest { + return &GroupBulkLoadRequest{} +} + +func (p *GroupBulkLoadRequest) GetAppName() string { + return p.AppName +} + +var GroupBulkLoadRequest_Target_DEFAULT *base.RPCAddress + +func (p *GroupBulkLoadRequest) GetTarget() *base.RPCAddress { + if !p.IsSetTarget() { + return GroupBulkLoadRequest_Target_DEFAULT + } + return p.Target +} + +var GroupBulkLoadRequest_Config_DEFAULT *ReplicaConfiguration + +func (p *GroupBulkLoadRequest) GetConfig() *ReplicaConfiguration { + if !p.IsSetConfig() { + return GroupBulkLoadRequest_Config_DEFAULT + } + return p.Config +} + +func (p *GroupBulkLoadRequest) GetProviderName() string { + return p.ProviderName +} + +func (p *GroupBulkLoadRequest) GetClusterName() string { + return p.ClusterName +} + +func (p *GroupBulkLoadRequest) GetMetaBulkLoadStatus() BulkLoadStatus { + return p.MetaBulkLoadStatus +} + +func (p *GroupBulkLoadRequest) GetRemoteRootPath() string { + return p.RemoteRootPath +} + +var GroupBulkLoadRequest_HpTarget_DEFAULT *base.HostPort + +func (p *GroupBulkLoadRequest) GetHpTarget() *base.HostPort { + if !p.IsSetHpTarget() { + return GroupBulkLoadRequest_HpTarget_DEFAULT + } + return p.HpTarget +} +func (p *GroupBulkLoadRequest) IsSetTarget() bool { + return p.Target != nil +} + +func (p *GroupBulkLoadRequest) IsSetConfig() bool { + return p.Config != nil +} + +func (p *GroupBulkLoadRequest) IsSetHpTarget() bool { + return p.HpTarget != nil +} + +func (p *GroupBulkLoadRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I32 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRING { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *GroupBulkLoadRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *GroupBulkLoadRequest) ReadField2(iprot thrift.TProtocol) error { + p.Target = &base.RPCAddress{} + if err := p.Target.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Target), err) + } + return nil +} + +func (p *GroupBulkLoadRequest) ReadField3(iprot thrift.TProtocol) error { + p.Config = &ReplicaConfiguration{ + Status: 0, + } + if err := p.Config.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Config), err) + } + return nil +} + +func (p *GroupBulkLoadRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.ProviderName = v + } + return nil +} + +func (p *GroupBulkLoadRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.ClusterName = v + } + return nil +} + +func (p *GroupBulkLoadRequest) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + temp := BulkLoadStatus(v) + p.MetaBulkLoadStatus = temp + } + return nil +} + +func (p *GroupBulkLoadRequest) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.RemoteRootPath = v + } + return nil +} + +func (p *GroupBulkLoadRequest) ReadField8(iprot thrift.TProtocol) error { + p.HpTarget = &base.HostPort{} + if err := p.HpTarget.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpTarget), err) + } + return nil +} + +func (p *GroupBulkLoadRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("group_bulk_load_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *GroupBulkLoadRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *GroupBulkLoadRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("target", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:target: ", p), err) + } + if err := p.Target.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Target), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:target: ", p), err) + } + return err +} + +func (p *GroupBulkLoadRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("config", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:config: ", p), err) + } + if err := p.Config.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Config), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:config: ", p), err) + } + return err +} + +func (p *GroupBulkLoadRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("provider_name", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:provider_name: ", p), err) + } + if err := oprot.WriteString(string(p.ProviderName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.provider_name (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:provider_name: ", p), err) + } + return err +} + +func (p *GroupBulkLoadRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("cluster_name", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:cluster_name: ", p), err) + } + if err := oprot.WriteString(string(p.ClusterName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.cluster_name (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:cluster_name: ", p), err) + } + return err +} + +func (p *GroupBulkLoadRequest) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("meta_bulk_load_status", thrift.I32, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:meta_bulk_load_status: ", p), err) + } + if err := oprot.WriteI32(int32(p.MetaBulkLoadStatus)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.meta_bulk_load_status (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:meta_bulk_load_status: ", p), err) + } + return err +} + +func (p *GroupBulkLoadRequest) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("remote_root_path", thrift.STRING, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:remote_root_path: ", p), err) + } + if err := oprot.WriteString(string(p.RemoteRootPath)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_root_path (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:remote_root_path: ", p), err) + } + return err +} + +func (p *GroupBulkLoadRequest) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetHpTarget() { + if err := oprot.WriteFieldBegin("hp_target", thrift.STRUCT, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:hp_target: ", p), err) + } + if err := p.HpTarget.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpTarget), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:hp_target: ", p), err) + } + } + return err +} + +func (p *GroupBulkLoadRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("GroupBulkLoadRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Status +// - BulkLoadState +type GroupBulkLoadResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Status BulkLoadStatus `thrift:"status,2" db:"status" json:"status"` + BulkLoadState *PartitionBulkLoadState `thrift:"bulk_load_state,3" db:"bulk_load_state" json:"bulk_load_state"` +} + +func NewGroupBulkLoadResponse() *GroupBulkLoadResponse { + return &GroupBulkLoadResponse{} +} + +var GroupBulkLoadResponse_Err_DEFAULT *base.ErrorCode + +func (p *GroupBulkLoadResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return GroupBulkLoadResponse_Err_DEFAULT + } + return p.Err +} + +func (p *GroupBulkLoadResponse) GetStatus() BulkLoadStatus { + return p.Status +} + +var GroupBulkLoadResponse_BulkLoadState_DEFAULT *PartitionBulkLoadState + +func (p *GroupBulkLoadResponse) GetBulkLoadState() *PartitionBulkLoadState { + if !p.IsSetBulkLoadState() { + return GroupBulkLoadResponse_BulkLoadState_DEFAULT + } + return p.BulkLoadState +} +func (p *GroupBulkLoadResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *GroupBulkLoadResponse) IsSetBulkLoadState() bool { + return p.BulkLoadState != nil +} + +func (p *GroupBulkLoadResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *GroupBulkLoadResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *GroupBulkLoadResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + temp := BulkLoadStatus(v) + p.Status = temp + } + return nil +} + +func (p *GroupBulkLoadResponse) ReadField3(iprot thrift.TProtocol) error { + p.BulkLoadState = &PartitionBulkLoadState{ + IngestStatus: 0, + } + if err := p.BulkLoadState.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.BulkLoadState), err) + } + return nil +} + +func (p *GroupBulkLoadResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("group_bulk_load_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *GroupBulkLoadResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *GroupBulkLoadResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:status: ", p), err) + } + if err := oprot.WriteI32(int32(p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:status: ", p), err) + } + return err +} + +func (p *GroupBulkLoadResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("bulk_load_state", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:bulk_load_state: ", p), err) + } + if err := p.BulkLoadState.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.BulkLoadState), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:bulk_load_state: ", p), err) + } + return err +} + +func (p *GroupBulkLoadResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("GroupBulkLoadResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - Metadata +// - IngestBehind +// - Ballot +// - VerifyBeforeIngest +type IngestionRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + Metadata *BulkLoadMetadata `thrift:"metadata,2" db:"metadata" json:"metadata"` + IngestBehind bool `thrift:"ingest_behind,3" db:"ingest_behind" json:"ingest_behind"` + Ballot int64 `thrift:"ballot,4" db:"ballot" json:"ballot"` + VerifyBeforeIngest bool `thrift:"verify_before_ingest,5" db:"verify_before_ingest" json:"verify_before_ingest"` +} + +func NewIngestionRequest() *IngestionRequest { + return &IngestionRequest{} +} + +func (p *IngestionRequest) GetAppName() string { + return p.AppName +} + +var IngestionRequest_Metadata_DEFAULT *BulkLoadMetadata + +func (p *IngestionRequest) GetMetadata() *BulkLoadMetadata { + if !p.IsSetMetadata() { + return IngestionRequest_Metadata_DEFAULT + } + return p.Metadata +} + +func (p *IngestionRequest) GetIngestBehind() bool { + return p.IngestBehind +} + +func (p *IngestionRequest) GetBallot() int64 { + return p.Ballot +} + +func (p *IngestionRequest) GetVerifyBeforeIngest() bool { + return p.VerifyBeforeIngest +} +func (p *IngestionRequest) IsSetMetadata() bool { + return p.Metadata != nil +} + +func (p *IngestionRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *IngestionRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *IngestionRequest) ReadField2(iprot thrift.TProtocol) error { + p.Metadata = &BulkLoadMetadata{} + if err := p.Metadata.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Metadata), err) + } + return nil +} + +func (p *IngestionRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.IngestBehind = v + } + return nil +} + +func (p *IngestionRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Ballot = v + } + return nil +} + +func (p *IngestionRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.VerifyBeforeIngest = v + } + return nil +} + +func (p *IngestionRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ingestion_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *IngestionRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *IngestionRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("metadata", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:metadata: ", p), err) + } + if err := p.Metadata.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Metadata), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:metadata: ", p), err) + } + return err +} + +func (p *IngestionRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ingest_behind", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:ingest_behind: ", p), err) + } + if err := oprot.WriteBool(bool(p.IngestBehind)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ingest_behind (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:ingest_behind: ", p), err) + } + return err +} + +func (p *IngestionRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ballot", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ballot: ", p), err) + } + if err := oprot.WriteI64(int64(p.Ballot)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ballot (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ballot: ", p), err) + } + return err +} + +func (p *IngestionRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("verify_before_ingest", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:verify_before_ingest: ", p), err) + } + if err := oprot.WriteBool(bool(p.VerifyBeforeIngest)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.verify_before_ingest (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:verify_before_ingest: ", p), err) + } + return err +} + +func (p *IngestionRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("IngestionRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - RocksdbError +type IngestionResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + RocksdbError int32 `thrift:"rocksdb_error,2" db:"rocksdb_error" json:"rocksdb_error"` +} + +func NewIngestionResponse() *IngestionResponse { + return &IngestionResponse{} +} + +var IngestionResponse_Err_DEFAULT *base.ErrorCode + +func (p *IngestionResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return IngestionResponse_Err_DEFAULT + } + return p.Err +} + +func (p *IngestionResponse) GetRocksdbError() int32 { + return p.RocksdbError +} +func (p *IngestionResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *IngestionResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *IngestionResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *IngestionResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.RocksdbError = v + } + return nil +} + +func (p *IngestionResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ingestion_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *IngestionResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *IngestionResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("rocksdb_error", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:rocksdb_error: ", p), err) + } + if err := oprot.WriteI32(int32(p.RocksdbError)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.rocksdb_error (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:rocksdb_error: ", p), err) + } + return err +} + +func (p *IngestionResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("IngestionResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - Type +type ControlBulkLoadRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + Type BulkLoadControlType `thrift:"type,2" db:"type" json:"type"` +} + +func NewControlBulkLoadRequest() *ControlBulkLoadRequest { + return &ControlBulkLoadRequest{} +} + +func (p *ControlBulkLoadRequest) GetAppName() string { + return p.AppName +} + +func (p *ControlBulkLoadRequest) GetType() BulkLoadControlType { + return p.Type +} +func (p *ControlBulkLoadRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ControlBulkLoadRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ControlBulkLoadRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + temp := BulkLoadControlType(v) + p.Type = temp + } + return nil +} + +func (p *ControlBulkLoadRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("control_bulk_load_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ControlBulkLoadRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *ControlBulkLoadRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("type", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:type: ", p), err) + } + if err := oprot.WriteI32(int32(p.Type)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.type (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:type: ", p), err) + } + return err +} + +func (p *ControlBulkLoadRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ControlBulkLoadRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMsg +type ControlBulkLoadResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMsg *string `thrift:"hint_msg,2" db:"hint_msg" json:"hint_msg,omitempty"` +} + +func NewControlBulkLoadResponse() *ControlBulkLoadResponse { + return &ControlBulkLoadResponse{} +} + +var ControlBulkLoadResponse_Err_DEFAULT *base.ErrorCode + +func (p *ControlBulkLoadResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ControlBulkLoadResponse_Err_DEFAULT + } + return p.Err +} + +var ControlBulkLoadResponse_HintMsg_DEFAULT string + +func (p *ControlBulkLoadResponse) GetHintMsg() string { + if !p.IsSetHintMsg() { + return ControlBulkLoadResponse_HintMsg_DEFAULT + } + return *p.HintMsg +} +func (p *ControlBulkLoadResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ControlBulkLoadResponse) IsSetHintMsg() bool { + return p.HintMsg != nil +} + +func (p *ControlBulkLoadResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ControlBulkLoadResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ControlBulkLoadResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMsg = &v + } + return nil +} + +func (p *ControlBulkLoadResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("control_bulk_load_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ControlBulkLoadResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ControlBulkLoadResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetHintMsg() { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(*p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_msg: ", p), err) + } + } + return err +} + +func (p *ControlBulkLoadResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ControlBulkLoadResponse(%+v)", *p) +} + +// Attributes: +// - AppName +type QueryBulkLoadRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` +} + +func NewQueryBulkLoadRequest() *QueryBulkLoadRequest { + return &QueryBulkLoadRequest{} +} + +func (p *QueryBulkLoadRequest) GetAppName() string { + return p.AppName +} +func (p *QueryBulkLoadRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryBulkLoadRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *QueryBulkLoadRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_bulk_load_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryBulkLoadRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *QueryBulkLoadRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryBulkLoadRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - AppName +// - AppStatus +// - PartitionsStatus +// - MaxReplicaCount +// - BulkLoadStates +// - HintMsg +// - IsBulkLoading +// - HpBulkLoadStates +type QueryBulkLoadResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + AppName string `thrift:"app_name,2" db:"app_name" json:"app_name"` + AppStatus BulkLoadStatus `thrift:"app_status,3" db:"app_status" json:"app_status"` + PartitionsStatus []BulkLoadStatus `thrift:"partitions_status,4" db:"partitions_status" json:"partitions_status"` + MaxReplicaCount int32 `thrift:"max_replica_count,5" db:"max_replica_count" json:"max_replica_count"` + BulkLoadStates []map[*base.RPCAddress]*PartitionBulkLoadState `thrift:"bulk_load_states,6" db:"bulk_load_states" json:"bulk_load_states"` + HintMsg *string `thrift:"hint_msg,7" db:"hint_msg" json:"hint_msg,omitempty"` + IsBulkLoading *bool `thrift:"is_bulk_loading,8" db:"is_bulk_loading" json:"is_bulk_loading,omitempty"` + HpBulkLoadStates []map[*base.HostPort]*PartitionBulkLoadState `thrift:"hp_bulk_load_states,9" db:"hp_bulk_load_states" json:"hp_bulk_load_states,omitempty"` +} + +func NewQueryBulkLoadResponse() *QueryBulkLoadResponse { + return &QueryBulkLoadResponse{} +} + +var QueryBulkLoadResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryBulkLoadResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryBulkLoadResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QueryBulkLoadResponse) GetAppName() string { + return p.AppName +} + +func (p *QueryBulkLoadResponse) GetAppStatus() BulkLoadStatus { + return p.AppStatus +} + +func (p *QueryBulkLoadResponse) GetPartitionsStatus() []BulkLoadStatus { + return p.PartitionsStatus +} + +func (p *QueryBulkLoadResponse) GetMaxReplicaCount() int32 { + return p.MaxReplicaCount +} + +func (p *QueryBulkLoadResponse) GetBulkLoadStates() []map[*base.RPCAddress]*PartitionBulkLoadState { + return p.BulkLoadStates +} + +var QueryBulkLoadResponse_HintMsg_DEFAULT string + +func (p *QueryBulkLoadResponse) GetHintMsg() string { + if !p.IsSetHintMsg() { + return QueryBulkLoadResponse_HintMsg_DEFAULT + } + return *p.HintMsg +} + +var QueryBulkLoadResponse_IsBulkLoading_DEFAULT bool + +func (p *QueryBulkLoadResponse) GetIsBulkLoading() bool { + if !p.IsSetIsBulkLoading() { + return QueryBulkLoadResponse_IsBulkLoading_DEFAULT + } + return *p.IsBulkLoading +} + +var QueryBulkLoadResponse_HpBulkLoadStates_DEFAULT []map[*base.HostPort]*PartitionBulkLoadState + +func (p *QueryBulkLoadResponse) GetHpBulkLoadStates() []map[*base.HostPort]*PartitionBulkLoadState { + return p.HpBulkLoadStates +} +func (p *QueryBulkLoadResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryBulkLoadResponse) IsSetHintMsg() bool { + return p.HintMsg != nil +} + +func (p *QueryBulkLoadResponse) IsSetIsBulkLoading() bool { + return p.IsBulkLoading != nil +} + +func (p *QueryBulkLoadResponse) IsSetHpBulkLoadStates() bool { + return p.HpBulkLoadStates != nil +} + +func (p *QueryBulkLoadResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.LIST { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.LIST { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRING { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.LIST { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := BulkLoadStatus(v) + p.AppStatus = temp + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]BulkLoadStatus, 0, size) + p.PartitionsStatus = tSlice + for i := 0; i < size; i++ { + var _elem5 BulkLoadStatus + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + temp := BulkLoadStatus(v) + _elem5 = temp + } + p.PartitionsStatus = append(p.PartitionsStatus, _elem5) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.MaxReplicaCount = v + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]map[*base.RPCAddress]*PartitionBulkLoadState, 0, size) + p.BulkLoadStates = tSlice + for i := 0; i < size; i++ { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[*base.RPCAddress]*PartitionBulkLoadState, size) + _elem6 := tMap + for i := 0; i < size; i++ { + _key7 := &base.RPCAddress{} + if err := _key7.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _key7), err) + } + _val8 := &PartitionBulkLoadState{ + IngestStatus: 0, + } + if err := _val8.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _val8), err) + } + _elem6[_key7] = _val8 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + p.BulkLoadStates = append(p.BulkLoadStates, _elem6) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.HintMsg = &v + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.IsBulkLoading = &v + } + return nil +} + +func (p *QueryBulkLoadResponse) ReadField9(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]map[*base.HostPort]*PartitionBulkLoadState, 0, size) + p.HpBulkLoadStates = tSlice + for i := 0; i < size; i++ { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[*base.HostPort]*PartitionBulkLoadState, size) + _elem9 := tMap + for i := 0; i < size; i++ { + _key10 := &base.HostPort{} + if err := _key10.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _key10), err) + } + _val11 := &PartitionBulkLoadState{ + IngestStatus: 0, + } + if err := _val11.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _val11), err) + } + _elem9[_key10] = _val11 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + p.HpBulkLoadStates = append(p.HpBulkLoadStates, _elem9) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryBulkLoadResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_bulk_load_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryBulkLoadResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryBulkLoadResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_name: ", p), err) + } + return err +} + +func (p *QueryBulkLoadResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_status", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_status: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppStatus)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_status (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_status: ", p), err) + } + return err +} + +func (p *QueryBulkLoadResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partitions_status", thrift.LIST, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partitions_status: ", p), err) + } + if err := oprot.WriteListBegin(thrift.I32, len(p.PartitionsStatus)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.PartitionsStatus { + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partitions_status: ", p), err) + } + return err +} + +func (p *QueryBulkLoadResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("max_replica_count", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:max_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.MaxReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_replica_count (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:max_replica_count: ", p), err) + } + return err +} + +func (p *QueryBulkLoadResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("bulk_load_states", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:bulk_load_states: ", p), err) + } + if err := oprot.WriteListBegin(thrift.MAP, len(p.BulkLoadStates)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.BulkLoadStates { + if err := oprot.WriteMapBegin(thrift.STRUCT, thrift.STRUCT, len(v)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range v { + if err := k.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", k), err) + } + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:bulk_load_states: ", p), err) + } + return err +} + +func (p *QueryBulkLoadResponse) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetHintMsg() { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(*p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:hint_msg: ", p), err) + } + } + return err +} + +func (p *QueryBulkLoadResponse) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetIsBulkLoading() { + if err := oprot.WriteFieldBegin("is_bulk_loading", thrift.BOOL, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:is_bulk_loading: ", p), err) + } + if err := oprot.WriteBool(bool(*p.IsBulkLoading)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_bulk_loading (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:is_bulk_loading: ", p), err) + } + } + return err +} + +func (p *QueryBulkLoadResponse) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetHpBulkLoadStates() { + if err := oprot.WriteFieldBegin("hp_bulk_load_states", thrift.LIST, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:hp_bulk_load_states: ", p), err) + } + if err := oprot.WriteListBegin(thrift.MAP, len(p.HpBulkLoadStates)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.HpBulkLoadStates { + if err := oprot.WriteMapBegin(thrift.STRUCT, thrift.STRUCT, len(v)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range v { + if err := k.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", k), err) + } + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:hp_bulk_load_states: ", p), err) + } + } + return err +} + +func (p *QueryBulkLoadResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryBulkLoadResponse(%+v)", *p) +} + +// Attributes: +// - AppName +type ClearBulkLoadStateRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` +} + +func NewClearBulkLoadStateRequest() *ClearBulkLoadStateRequest { + return &ClearBulkLoadStateRequest{} +} + +func (p *ClearBulkLoadStateRequest) GetAppName() string { + return p.AppName +} +func (p *ClearBulkLoadStateRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ClearBulkLoadStateRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ClearBulkLoadStateRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("clear_bulk_load_state_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ClearBulkLoadStateRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *ClearBulkLoadStateRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ClearBulkLoadStateRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMsg +type ClearBulkLoadStateResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMsg string `thrift:"hint_msg,2" db:"hint_msg" json:"hint_msg"` +} + +func NewClearBulkLoadStateResponse() *ClearBulkLoadStateResponse { + return &ClearBulkLoadStateResponse{} +} + +var ClearBulkLoadStateResponse_Err_DEFAULT *base.ErrorCode + +func (p *ClearBulkLoadStateResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ClearBulkLoadStateResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ClearBulkLoadStateResponse) GetHintMsg() string { + return p.HintMsg +} +func (p *ClearBulkLoadStateResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ClearBulkLoadStateResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ClearBulkLoadStateResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ClearBulkLoadStateResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMsg = v + } + return nil +} + +func (p *ClearBulkLoadStateResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("clear_bulk_load_state_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ClearBulkLoadStateResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ClearBulkLoadStateResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_msg: ", p), err) + } + return err +} + +func (p *ClearBulkLoadStateResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ClearBulkLoadStateResponse(%+v)", *p) +} diff --git a/go-client/idl/admin/duplication-consts.go b/go-client/idl/admin/duplication-consts.go new file mode 100644 index 0000000000..757b943ef3 --- /dev/null +++ b/go-client/idl/admin/duplication-consts.go @@ -0,0 +1,27 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/admin/duplication.go b/go-client/idl/admin/duplication.go new file mode 100644 index 0000000000..fa55de71f6 --- /dev/null +++ b/go-client/idl/admin/duplication.go @@ -0,0 +1,2606 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +type DuplicationStatus int64 + +const ( + DuplicationStatus_DS_INIT DuplicationStatus = 0 + DuplicationStatus_DS_PREPARE DuplicationStatus = 1 + DuplicationStatus_DS_APP DuplicationStatus = 2 + DuplicationStatus_DS_LOG DuplicationStatus = 3 + DuplicationStatus_DS_PAUSE DuplicationStatus = 4 + DuplicationStatus_DS_REMOVED DuplicationStatus = 5 +) + +func (p DuplicationStatus) String() string { + switch p { + case DuplicationStatus_DS_INIT: + return "DS_INIT" + case DuplicationStatus_DS_PREPARE: + return "DS_PREPARE" + case DuplicationStatus_DS_APP: + return "DS_APP" + case DuplicationStatus_DS_LOG: + return "DS_LOG" + case DuplicationStatus_DS_PAUSE: + return "DS_PAUSE" + case DuplicationStatus_DS_REMOVED: + return "DS_REMOVED" + } + return "" +} + +func DuplicationStatusFromString(s string) (DuplicationStatus, error) { + switch s { + case "DS_INIT": + return DuplicationStatus_DS_INIT, nil + case "DS_PREPARE": + return DuplicationStatus_DS_PREPARE, nil + case "DS_APP": + return DuplicationStatus_DS_APP, nil + case "DS_LOG": + return DuplicationStatus_DS_LOG, nil + case "DS_PAUSE": + return DuplicationStatus_DS_PAUSE, nil + case "DS_REMOVED": + return DuplicationStatus_DS_REMOVED, nil + } + return DuplicationStatus(0), fmt.Errorf("not a valid DuplicationStatus string") +} + +func DuplicationStatusPtr(v DuplicationStatus) *DuplicationStatus { return &v } + +func (p DuplicationStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *DuplicationStatus) UnmarshalText(text []byte) error { + q, err := DuplicationStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *DuplicationStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = DuplicationStatus(v) + return nil +} + +func (p *DuplicationStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type DuplicationFailMode int64 + +const ( + DuplicationFailMode_FAIL_SLOW DuplicationFailMode = 0 + DuplicationFailMode_FAIL_SKIP DuplicationFailMode = 1 + DuplicationFailMode_FAIL_FAST DuplicationFailMode = 2 +) + +func (p DuplicationFailMode) String() string { + switch p { + case DuplicationFailMode_FAIL_SLOW: + return "FAIL_SLOW" + case DuplicationFailMode_FAIL_SKIP: + return "FAIL_SKIP" + case DuplicationFailMode_FAIL_FAST: + return "FAIL_FAST" + } + return "" +} + +func DuplicationFailModeFromString(s string) (DuplicationFailMode, error) { + switch s { + case "FAIL_SLOW": + return DuplicationFailMode_FAIL_SLOW, nil + case "FAIL_SKIP": + return DuplicationFailMode_FAIL_SKIP, nil + case "FAIL_FAST": + return DuplicationFailMode_FAIL_FAST, nil + } + return DuplicationFailMode(0), fmt.Errorf("not a valid DuplicationFailMode string") +} + +func DuplicationFailModePtr(v DuplicationFailMode) *DuplicationFailMode { return &v } + +func (p DuplicationFailMode) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *DuplicationFailMode) UnmarshalText(text []byte) error { + q, err := DuplicationFailModeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *DuplicationFailMode) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = DuplicationFailMode(v) + return nil +} + +func (p *DuplicationFailMode) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - AppName +// - RemoteClusterName +// - IsDuplicatingCheckpoint +// - RemoteAppName +// - RemoteReplicaCount +type DuplicationAddRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + RemoteClusterName string `thrift:"remote_cluster_name,2" db:"remote_cluster_name" json:"remote_cluster_name"` + IsDuplicatingCheckpoint bool `thrift:"is_duplicating_checkpoint,3" db:"is_duplicating_checkpoint" json:"is_duplicating_checkpoint"` + RemoteAppName *string `thrift:"remote_app_name,4" db:"remote_app_name" json:"remote_app_name,omitempty"` + RemoteReplicaCount *int32 `thrift:"remote_replica_count,5" db:"remote_replica_count" json:"remote_replica_count,omitempty"` +} + +func NewDuplicationAddRequest() *DuplicationAddRequest { + return &DuplicationAddRequest{ + IsDuplicatingCheckpoint: true, + } +} + +func (p *DuplicationAddRequest) GetAppName() string { + return p.AppName +} + +func (p *DuplicationAddRequest) GetRemoteClusterName() string { + return p.RemoteClusterName +} + +var DuplicationAddRequest_IsDuplicatingCheckpoint_DEFAULT bool = true + +func (p *DuplicationAddRequest) GetIsDuplicatingCheckpoint() bool { + return p.IsDuplicatingCheckpoint +} + +var DuplicationAddRequest_RemoteAppName_DEFAULT string + +func (p *DuplicationAddRequest) GetRemoteAppName() string { + if !p.IsSetRemoteAppName() { + return DuplicationAddRequest_RemoteAppName_DEFAULT + } + return *p.RemoteAppName +} + +var DuplicationAddRequest_RemoteReplicaCount_DEFAULT int32 + +func (p *DuplicationAddRequest) GetRemoteReplicaCount() int32 { + if !p.IsSetRemoteReplicaCount() { + return DuplicationAddRequest_RemoteReplicaCount_DEFAULT + } + return *p.RemoteReplicaCount +} +func (p *DuplicationAddRequest) IsSetIsDuplicatingCheckpoint() bool { + return p.IsDuplicatingCheckpoint != DuplicationAddRequest_IsDuplicatingCheckpoint_DEFAULT +} + +func (p *DuplicationAddRequest) IsSetRemoteAppName() bool { + return p.RemoteAppName != nil +} + +func (p *DuplicationAddRequest) IsSetRemoteReplicaCount() bool { + return p.RemoteReplicaCount != nil +} + +func (p *DuplicationAddRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationAddRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *DuplicationAddRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.RemoteClusterName = v + } + return nil +} + +func (p *DuplicationAddRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.IsDuplicatingCheckpoint = v + } + return nil +} + +func (p *DuplicationAddRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.RemoteAppName = &v + } + return nil +} + +func (p *DuplicationAddRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.RemoteReplicaCount = &v + } + return nil +} + +func (p *DuplicationAddRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_add_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationAddRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *DuplicationAddRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("remote_cluster_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:remote_cluster_name: ", p), err) + } + if err := oprot.WriteString(string(p.RemoteClusterName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_cluster_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:remote_cluster_name: ", p), err) + } + return err +} + +func (p *DuplicationAddRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetIsDuplicatingCheckpoint() { + if err := oprot.WriteFieldBegin("is_duplicating_checkpoint", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:is_duplicating_checkpoint: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsDuplicatingCheckpoint)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_duplicating_checkpoint (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:is_duplicating_checkpoint: ", p), err) + } + } + return err +} + +func (p *DuplicationAddRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteAppName() { + if err := oprot.WriteFieldBegin("remote_app_name", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:remote_app_name: ", p), err) + } + if err := oprot.WriteString(string(*p.RemoteAppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_app_name (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:remote_app_name: ", p), err) + } + } + return err +} + +func (p *DuplicationAddRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteReplicaCount() { + if err := oprot.WriteFieldBegin("remote_replica_count", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:remote_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(*p.RemoteReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_replica_count (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:remote_replica_count: ", p), err) + } + } + return err +} + +func (p *DuplicationAddRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationAddRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Appid +// - Dupid +// - Hint +// - RemoteAppName +// - RemoteReplicaCount +type DuplicationAddResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Appid int32 `thrift:"appid,2" db:"appid" json:"appid"` + Dupid int32 `thrift:"dupid,3" db:"dupid" json:"dupid"` + Hint *string `thrift:"hint,4" db:"hint" json:"hint,omitempty"` + RemoteAppName *string `thrift:"remote_app_name,5" db:"remote_app_name" json:"remote_app_name,omitempty"` + RemoteReplicaCount *int32 `thrift:"remote_replica_count,6" db:"remote_replica_count" json:"remote_replica_count,omitempty"` +} + +func NewDuplicationAddResponse() *DuplicationAddResponse { + return &DuplicationAddResponse{} +} + +var DuplicationAddResponse_Err_DEFAULT *base.ErrorCode + +func (p *DuplicationAddResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return DuplicationAddResponse_Err_DEFAULT + } + return p.Err +} + +func (p *DuplicationAddResponse) GetAppid() int32 { + return p.Appid +} + +func (p *DuplicationAddResponse) GetDupid() int32 { + return p.Dupid +} + +var DuplicationAddResponse_Hint_DEFAULT string + +func (p *DuplicationAddResponse) GetHint() string { + if !p.IsSetHint() { + return DuplicationAddResponse_Hint_DEFAULT + } + return *p.Hint +} + +var DuplicationAddResponse_RemoteAppName_DEFAULT string + +func (p *DuplicationAddResponse) GetRemoteAppName() string { + if !p.IsSetRemoteAppName() { + return DuplicationAddResponse_RemoteAppName_DEFAULT + } + return *p.RemoteAppName +} + +var DuplicationAddResponse_RemoteReplicaCount_DEFAULT int32 + +func (p *DuplicationAddResponse) GetRemoteReplicaCount() int32 { + if !p.IsSetRemoteReplicaCount() { + return DuplicationAddResponse_RemoteReplicaCount_DEFAULT + } + return *p.RemoteReplicaCount +} +func (p *DuplicationAddResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *DuplicationAddResponse) IsSetHint() bool { + return p.Hint != nil +} + +func (p *DuplicationAddResponse) IsSetRemoteAppName() bool { + return p.RemoteAppName != nil +} + +func (p *DuplicationAddResponse) IsSetRemoteReplicaCount() bool { + return p.RemoteReplicaCount != nil +} + +func (p *DuplicationAddResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I32 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationAddResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *DuplicationAddResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Appid = v + } + return nil +} + +func (p *DuplicationAddResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Dupid = v + } + return nil +} + +func (p *DuplicationAddResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Hint = &v + } + return nil +} + +func (p *DuplicationAddResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.RemoteAppName = &v + } + return nil +} + +func (p *DuplicationAddResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.RemoteReplicaCount = &v + } + return nil +} + +func (p *DuplicationAddResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_add_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationAddResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *DuplicationAddResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("appid", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:appid: ", p), err) + } + if err := oprot.WriteI32(int32(p.Appid)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.appid (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:appid: ", p), err) + } + return err +} + +func (p *DuplicationAddResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("dupid", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:dupid: ", p), err) + } + if err := oprot.WriteI32(int32(p.Dupid)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.dupid (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:dupid: ", p), err) + } + return err +} + +func (p *DuplicationAddResponse) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetHint() { + if err := oprot.WriteFieldBegin("hint", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:hint: ", p), err) + } + if err := oprot.WriteString(string(*p.Hint)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:hint: ", p), err) + } + } + return err +} + +func (p *DuplicationAddResponse) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteAppName() { + if err := oprot.WriteFieldBegin("remote_app_name", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:remote_app_name: ", p), err) + } + if err := oprot.WriteString(string(*p.RemoteAppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_app_name (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:remote_app_name: ", p), err) + } + } + return err +} + +func (p *DuplicationAddResponse) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteReplicaCount() { + if err := oprot.WriteFieldBegin("remote_replica_count", thrift.I32, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:remote_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(*p.RemoteReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_replica_count (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:remote_replica_count: ", p), err) + } + } + return err +} + +func (p *DuplicationAddResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationAddResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - Dupid +// - Status +// - FailMode +type DuplicationModifyRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + Dupid int32 `thrift:"dupid,2" db:"dupid" json:"dupid"` + Status *DuplicationStatus `thrift:"status,3" db:"status" json:"status,omitempty"` + FailMode *DuplicationFailMode `thrift:"fail_mode,4" db:"fail_mode" json:"fail_mode,omitempty"` +} + +func NewDuplicationModifyRequest() *DuplicationModifyRequest { + return &DuplicationModifyRequest{} +} + +func (p *DuplicationModifyRequest) GetAppName() string { + return p.AppName +} + +func (p *DuplicationModifyRequest) GetDupid() int32 { + return p.Dupid +} + +var DuplicationModifyRequest_Status_DEFAULT DuplicationStatus + +func (p *DuplicationModifyRequest) GetStatus() DuplicationStatus { + if !p.IsSetStatus() { + return DuplicationModifyRequest_Status_DEFAULT + } + return *p.Status +} + +var DuplicationModifyRequest_FailMode_DEFAULT DuplicationFailMode + +func (p *DuplicationModifyRequest) GetFailMode() DuplicationFailMode { + if !p.IsSetFailMode() { + return DuplicationModifyRequest_FailMode_DEFAULT + } + return *p.FailMode +} +func (p *DuplicationModifyRequest) IsSetStatus() bool { + return p.Status != nil +} + +func (p *DuplicationModifyRequest) IsSetFailMode() bool { + return p.FailMode != nil +} + +func (p *DuplicationModifyRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationModifyRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *DuplicationModifyRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Dupid = v + } + return nil +} + +func (p *DuplicationModifyRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := DuplicationStatus(v) + p.Status = &temp + } + return nil +} + +func (p *DuplicationModifyRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + temp := DuplicationFailMode(v) + p.FailMode = &temp + } + return nil +} + +func (p *DuplicationModifyRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_modify_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationModifyRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *DuplicationModifyRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("dupid", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:dupid: ", p), err) + } + if err := oprot.WriteI32(int32(p.Dupid)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.dupid (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:dupid: ", p), err) + } + return err +} + +func (p *DuplicationModifyRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetStatus() { + if err := oprot.WriteFieldBegin("status", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:status: ", p), err) + } + if err := oprot.WriteI32(int32(*p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:status: ", p), err) + } + } + return err +} + +func (p *DuplicationModifyRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetFailMode() { + if err := oprot.WriteFieldBegin("fail_mode", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:fail_mode: ", p), err) + } + if err := oprot.WriteI32(int32(*p.FailMode)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.fail_mode (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:fail_mode: ", p), err) + } + } + return err +} + +func (p *DuplicationModifyRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationModifyRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Appid +type DuplicationModifyResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Appid int32 `thrift:"appid,2" db:"appid" json:"appid"` +} + +func NewDuplicationModifyResponse() *DuplicationModifyResponse { + return &DuplicationModifyResponse{} +} + +var DuplicationModifyResponse_Err_DEFAULT *base.ErrorCode + +func (p *DuplicationModifyResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return DuplicationModifyResponse_Err_DEFAULT + } + return p.Err +} + +func (p *DuplicationModifyResponse) GetAppid() int32 { + return p.Appid +} +func (p *DuplicationModifyResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *DuplicationModifyResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationModifyResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *DuplicationModifyResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Appid = v + } + return nil +} + +func (p *DuplicationModifyResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_modify_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationModifyResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *DuplicationModifyResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("appid", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:appid: ", p), err) + } + if err := oprot.WriteI32(int32(p.Appid)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.appid (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:appid: ", p), err) + } + return err +} + +func (p *DuplicationModifyResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationModifyResponse(%+v)", *p) +} + +// Attributes: +// - Dupid +// - Status +// - Remote +// - CreateTs +// - Progress +// - FailMode +// - RemoteAppName +// - RemoteReplicaCount +type DuplicationEntry struct { + Dupid int32 `thrift:"dupid,1" db:"dupid" json:"dupid"` + Status DuplicationStatus `thrift:"status,2" db:"status" json:"status"` + Remote string `thrift:"remote,3" db:"remote" json:"remote"` + CreateTs int64 `thrift:"create_ts,4" db:"create_ts" json:"create_ts"` + Progress map[int32]int64 `thrift:"progress,5" db:"progress" json:"progress,omitempty"` + // unused field # 6 + FailMode *DuplicationFailMode `thrift:"fail_mode,7" db:"fail_mode" json:"fail_mode,omitempty"` + RemoteAppName *string `thrift:"remote_app_name,8" db:"remote_app_name" json:"remote_app_name,omitempty"` + RemoteReplicaCount *int32 `thrift:"remote_replica_count,9" db:"remote_replica_count" json:"remote_replica_count,omitempty"` +} + +func NewDuplicationEntry() *DuplicationEntry { + return &DuplicationEntry{} +} + +func (p *DuplicationEntry) GetDupid() int32 { + return p.Dupid +} + +func (p *DuplicationEntry) GetStatus() DuplicationStatus { + return p.Status +} + +func (p *DuplicationEntry) GetRemote() string { + return p.Remote +} + +func (p *DuplicationEntry) GetCreateTs() int64 { + return p.CreateTs +} + +var DuplicationEntry_Progress_DEFAULT map[int32]int64 + +func (p *DuplicationEntry) GetProgress() map[int32]int64 { + return p.Progress +} + +var DuplicationEntry_FailMode_DEFAULT DuplicationFailMode + +func (p *DuplicationEntry) GetFailMode() DuplicationFailMode { + if !p.IsSetFailMode() { + return DuplicationEntry_FailMode_DEFAULT + } + return *p.FailMode +} + +var DuplicationEntry_RemoteAppName_DEFAULT string + +func (p *DuplicationEntry) GetRemoteAppName() string { + if !p.IsSetRemoteAppName() { + return DuplicationEntry_RemoteAppName_DEFAULT + } + return *p.RemoteAppName +} + +var DuplicationEntry_RemoteReplicaCount_DEFAULT int32 + +func (p *DuplicationEntry) GetRemoteReplicaCount() int32 { + if !p.IsSetRemoteReplicaCount() { + return DuplicationEntry_RemoteReplicaCount_DEFAULT + } + return *p.RemoteReplicaCount +} +func (p *DuplicationEntry) IsSetProgress() bool { + return p.Progress != nil +} + +func (p *DuplicationEntry) IsSetFailMode() bool { + return p.FailMode != nil +} + +func (p *DuplicationEntry) IsSetRemoteAppName() bool { + return p.RemoteAppName != nil +} + +func (p *DuplicationEntry) IsSetRemoteReplicaCount() bool { + return p.RemoteReplicaCount != nil +} + +func (p *DuplicationEntry) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.MAP { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I32 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRING { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.I32 { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationEntry) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Dupid = v + } + return nil +} + +func (p *DuplicationEntry) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + temp := DuplicationStatus(v) + p.Status = temp + } + return nil +} + +func (p *DuplicationEntry) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Remote = v + } + return nil +} + +func (p *DuplicationEntry) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.CreateTs = v + } + return nil +} + +func (p *DuplicationEntry) ReadField5(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[int32]int64, size) + p.Progress = tMap + for i := 0; i < size; i++ { + var _key0 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key0 = v + } + var _val1 int64 + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _val1 = v + } + p.Progress[_key0] = _val1 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *DuplicationEntry) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + temp := DuplicationFailMode(v) + p.FailMode = &temp + } + return nil +} + +func (p *DuplicationEntry) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.RemoteAppName = &v + } + return nil +} + +func (p *DuplicationEntry) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.RemoteReplicaCount = &v + } + return nil +} + +func (p *DuplicationEntry) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_entry"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationEntry) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("dupid", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:dupid: ", p), err) + } + if err := oprot.WriteI32(int32(p.Dupid)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.dupid (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:dupid: ", p), err) + } + return err +} + +func (p *DuplicationEntry) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:status: ", p), err) + } + if err := oprot.WriteI32(int32(p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:status: ", p), err) + } + return err +} + +func (p *DuplicationEntry) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("remote", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:remote: ", p), err) + } + if err := oprot.WriteString(string(p.Remote)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:remote: ", p), err) + } + return err +} + +func (p *DuplicationEntry) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("create_ts", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:create_ts: ", p), err) + } + if err := oprot.WriteI64(int64(p.CreateTs)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.create_ts (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:create_ts: ", p), err) + } + return err +} + +func (p *DuplicationEntry) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetProgress() { + if err := oprot.WriteFieldBegin("progress", thrift.MAP, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:progress: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.I64, len(p.Progress)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.Progress { + if err := oprot.WriteI32(int32(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := oprot.WriteI64(int64(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:progress: ", p), err) + } + } + return err +} + +func (p *DuplicationEntry) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetFailMode() { + if err := oprot.WriteFieldBegin("fail_mode", thrift.I32, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:fail_mode: ", p), err) + } + if err := oprot.WriteI32(int32(*p.FailMode)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.fail_mode (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:fail_mode: ", p), err) + } + } + return err +} + +func (p *DuplicationEntry) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteAppName() { + if err := oprot.WriteFieldBegin("remote_app_name", thrift.STRING, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:remote_app_name: ", p), err) + } + if err := oprot.WriteString(string(*p.RemoteAppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_app_name (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:remote_app_name: ", p), err) + } + } + return err +} + +func (p *DuplicationEntry) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetRemoteReplicaCount() { + if err := oprot.WriteFieldBegin("remote_replica_count", thrift.I32, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:remote_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(*p.RemoteReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.remote_replica_count (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:remote_replica_count: ", p), err) + } + } + return err +} + +func (p *DuplicationEntry) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationEntry(%+v)", *p) +} + +// Attributes: +// - AppName +type DuplicationQueryRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` +} + +func NewDuplicationQueryRequest() *DuplicationQueryRequest { + return &DuplicationQueryRequest{} +} + +func (p *DuplicationQueryRequest) GetAppName() string { + return p.AppName +} +func (p *DuplicationQueryRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationQueryRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *DuplicationQueryRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_query_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationQueryRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *DuplicationQueryRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationQueryRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Appid +// - EntryList +type DuplicationQueryResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + // unused field # 2 + Appid int32 `thrift:"appid,3" db:"appid" json:"appid"` + EntryList []*DuplicationEntry `thrift:"entry_list,4" db:"entry_list" json:"entry_list"` +} + +func NewDuplicationQueryResponse() *DuplicationQueryResponse { + return &DuplicationQueryResponse{} +} + +var DuplicationQueryResponse_Err_DEFAULT *base.ErrorCode + +func (p *DuplicationQueryResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return DuplicationQueryResponse_Err_DEFAULT + } + return p.Err +} + +func (p *DuplicationQueryResponse) GetAppid() int32 { + return p.Appid +} + +func (p *DuplicationQueryResponse) GetEntryList() []*DuplicationEntry { + return p.EntryList +} +func (p *DuplicationQueryResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *DuplicationQueryResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.LIST { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationQueryResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *DuplicationQueryResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Appid = v + } + return nil +} + +func (p *DuplicationQueryResponse) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*DuplicationEntry, 0, size) + p.EntryList = tSlice + for i := 0; i < size; i++ { + _elem2 := &DuplicationEntry{} + if err := _elem2.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) + } + p.EntryList = append(p.EntryList, _elem2) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *DuplicationQueryResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_query_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationQueryResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *DuplicationQueryResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("appid", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:appid: ", p), err) + } + if err := oprot.WriteI32(int32(p.Appid)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.appid (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:appid: ", p), err) + } + return err +} + +func (p *DuplicationQueryResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("entry_list", thrift.LIST, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:entry_list: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.EntryList)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.EntryList { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:entry_list: ", p), err) + } + return err +} + +func (p *DuplicationQueryResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationQueryResponse(%+v)", *p) +} + +// Attributes: +// - Dupid +// - ConfirmedDecree +// - CheckpointPrepared +type DuplicationConfirmEntry struct { + Dupid int32 `thrift:"dupid,1" db:"dupid" json:"dupid"` + ConfirmedDecree int64 `thrift:"confirmed_decree,2" db:"confirmed_decree" json:"confirmed_decree"` + CheckpointPrepared bool `thrift:"checkpoint_prepared,3" db:"checkpoint_prepared" json:"checkpoint_prepared"` +} + +func NewDuplicationConfirmEntry() *DuplicationConfirmEntry { + return &DuplicationConfirmEntry{} +} + +func (p *DuplicationConfirmEntry) GetDupid() int32 { + return p.Dupid +} + +func (p *DuplicationConfirmEntry) GetConfirmedDecree() int64 { + return p.ConfirmedDecree +} + +var DuplicationConfirmEntry_CheckpointPrepared_DEFAULT bool = false + +func (p *DuplicationConfirmEntry) GetCheckpointPrepared() bool { + return p.CheckpointPrepared +} +func (p *DuplicationConfirmEntry) IsSetCheckpointPrepared() bool { + return p.CheckpointPrepared != DuplicationConfirmEntry_CheckpointPrepared_DEFAULT +} + +func (p *DuplicationConfirmEntry) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationConfirmEntry) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Dupid = v + } + return nil +} + +func (p *DuplicationConfirmEntry) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.ConfirmedDecree = v + } + return nil +} + +func (p *DuplicationConfirmEntry) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.CheckpointPrepared = v + } + return nil +} + +func (p *DuplicationConfirmEntry) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_confirm_entry"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationConfirmEntry) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("dupid", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:dupid: ", p), err) + } + if err := oprot.WriteI32(int32(p.Dupid)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.dupid (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:dupid: ", p), err) + } + return err +} + +func (p *DuplicationConfirmEntry) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("confirmed_decree", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:confirmed_decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.ConfirmedDecree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.confirmed_decree (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:confirmed_decree: ", p), err) + } + return err +} + +func (p *DuplicationConfirmEntry) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetCheckpointPrepared() { + if err := oprot.WriteFieldBegin("checkpoint_prepared", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:checkpoint_prepared: ", p), err) + } + if err := oprot.WriteBool(bool(p.CheckpointPrepared)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.checkpoint_prepared (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:checkpoint_prepared: ", p), err) + } + } + return err +} + +func (p *DuplicationConfirmEntry) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationConfirmEntry(%+v)", *p) +} + +// Attributes: +// - Node +// - ConfirmList +// - HpNode +type DuplicationSyncRequest struct { + Node *base.RPCAddress `thrift:"node,1" db:"node" json:"node"` + ConfirmList map[*base.Gpid][]*DuplicationConfirmEntry `thrift:"confirm_list,2" db:"confirm_list" json:"confirm_list"` + HpNode *base.HostPort `thrift:"hp_node,3" db:"hp_node" json:"hp_node"` +} + +func NewDuplicationSyncRequest() *DuplicationSyncRequest { + return &DuplicationSyncRequest{} +} + +var DuplicationSyncRequest_Node_DEFAULT *base.RPCAddress + +func (p *DuplicationSyncRequest) GetNode() *base.RPCAddress { + if !p.IsSetNode() { + return DuplicationSyncRequest_Node_DEFAULT + } + return p.Node +} + +func (p *DuplicationSyncRequest) GetConfirmList() map[*base.Gpid][]*DuplicationConfirmEntry { + return p.ConfirmList +} + +var DuplicationSyncRequest_HpNode_DEFAULT *base.HostPort + +func (p *DuplicationSyncRequest) GetHpNode() *base.HostPort { + if !p.IsSetHpNode() { + return DuplicationSyncRequest_HpNode_DEFAULT + } + return p.HpNode +} +func (p *DuplicationSyncRequest) IsSetNode() bool { + return p.Node != nil +} + +func (p *DuplicationSyncRequest) IsSetHpNode() bool { + return p.HpNode != nil +} + +func (p *DuplicationSyncRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.MAP { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationSyncRequest) ReadField1(iprot thrift.TProtocol) error { + p.Node = &base.RPCAddress{} + if err := p.Node.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node), err) + } + return nil +} + +func (p *DuplicationSyncRequest) ReadField2(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[*base.Gpid][]*DuplicationConfirmEntry, size) + p.ConfirmList = tMap + for i := 0; i < size; i++ { + _key3 := &base.Gpid{} + if err := _key3.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _key3), err) + } + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*DuplicationConfirmEntry, 0, size) + _val4 := tSlice + for i := 0; i < size; i++ { + _elem5 := &DuplicationConfirmEntry{} + if err := _elem5.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) + } + _val4 = append(_val4, _elem5) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + p.ConfirmList[_key3] = _val4 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *DuplicationSyncRequest) ReadField3(iprot thrift.TProtocol) error { + p.HpNode = &base.HostPort{} + if err := p.HpNode.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode), err) + } + return nil +} + +func (p *DuplicationSyncRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_sync_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationSyncRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:node: ", p), err) + } + if err := p.Node.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:node: ", p), err) + } + return err +} + +func (p *DuplicationSyncRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("confirm_list", thrift.MAP, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:confirm_list: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.STRUCT, thrift.LIST, len(p.ConfirmList)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.ConfirmList { + if err := k.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", k), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:confirm_list: ", p), err) + } + return err +} + +func (p *DuplicationSyncRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hp_node", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:hp_node: ", p), err) + } + if err := p.HpNode.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:hp_node: ", p), err) + } + return err +} + +func (p *DuplicationSyncRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationSyncRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - DupMap +type DuplicationSyncResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + DupMap map[int32]map[int32]*DuplicationEntry `thrift:"dup_map,2" db:"dup_map" json:"dup_map"` +} + +func NewDuplicationSyncResponse() *DuplicationSyncResponse { + return &DuplicationSyncResponse{} +} + +var DuplicationSyncResponse_Err_DEFAULT *base.ErrorCode + +func (p *DuplicationSyncResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return DuplicationSyncResponse_Err_DEFAULT + } + return p.Err +} + +func (p *DuplicationSyncResponse) GetDupMap() map[int32]map[int32]*DuplicationEntry { + return p.DupMap +} +func (p *DuplicationSyncResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *DuplicationSyncResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.MAP { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DuplicationSyncResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *DuplicationSyncResponse) ReadField2(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[int32]map[int32]*DuplicationEntry, size) + p.DupMap = tMap + for i := 0; i < size; i++ { + var _key6 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key6 = v + } + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[int32]*DuplicationEntry, size) + _val7 := tMap + for i := 0; i < size; i++ { + var _key8 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key8 = v + } + _val9 := &DuplicationEntry{} + if err := _val9.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _val9), err) + } + _val7[_key8] = _val9 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + p.DupMap[_key6] = _val7 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *DuplicationSyncResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("duplication_sync_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DuplicationSyncResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *DuplicationSyncResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("dup_map", thrift.MAP, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:dup_map: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.MAP, len(p.DupMap)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.DupMap { + if err := oprot.WriteI32(int32(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.STRUCT, len(v)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range v { + if err := oprot.WriteI32(int32(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:dup_map: ", p), err) + } + return err +} + +func (p *DuplicationSyncResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DuplicationSyncResponse(%+v)", *p) +} diff --git a/go-client/idl/admin/meta_admin-consts.go b/go-client/idl/admin/meta_admin-consts.go new file mode 100644 index 0000000000..757b943ef3 --- /dev/null +++ b/go-client/idl/admin/meta_admin-consts.go @@ -0,0 +1,27 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/admin/meta_admin.go b/go-client/idl/admin/meta_admin.go new file mode 100644 index 0000000000..4a335e282d --- /dev/null +++ b/go-client/idl/admin/meta_admin.go @@ -0,0 +1,16081 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +type ConfigType int64 + +const ( + ConfigType_CT_INVALID ConfigType = 0 + ConfigType_CT_ASSIGN_PRIMARY ConfigType = 1 + ConfigType_CT_UPGRADE_TO_PRIMARY ConfigType = 2 + ConfigType_CT_ADD_SECONDARY ConfigType = 3 + ConfigType_CT_UPGRADE_TO_SECONDARY ConfigType = 4 + ConfigType_CT_DOWNGRADE_TO_SECONDARY ConfigType = 5 + ConfigType_CT_DOWNGRADE_TO_INACTIVE ConfigType = 6 + ConfigType_CT_REMOVE ConfigType = 7 + ConfigType_CT_ADD_SECONDARY_FOR_LB ConfigType = 8 + ConfigType_CT_PRIMARY_FORCE_UPDATE_BALLOT ConfigType = 9 + ConfigType_CT_DROP_PARTITION ConfigType = 10 + ConfigType_CT_REGISTER_CHILD ConfigType = 11 +) + +func (p ConfigType) String() string { + switch p { + case ConfigType_CT_INVALID: + return "CT_INVALID" + case ConfigType_CT_ASSIGN_PRIMARY: + return "CT_ASSIGN_PRIMARY" + case ConfigType_CT_UPGRADE_TO_PRIMARY: + return "CT_UPGRADE_TO_PRIMARY" + case ConfigType_CT_ADD_SECONDARY: + return "CT_ADD_SECONDARY" + case ConfigType_CT_UPGRADE_TO_SECONDARY: + return "CT_UPGRADE_TO_SECONDARY" + case ConfigType_CT_DOWNGRADE_TO_SECONDARY: + return "CT_DOWNGRADE_TO_SECONDARY" + case ConfigType_CT_DOWNGRADE_TO_INACTIVE: + return "CT_DOWNGRADE_TO_INACTIVE" + case ConfigType_CT_REMOVE: + return "CT_REMOVE" + case ConfigType_CT_ADD_SECONDARY_FOR_LB: + return "CT_ADD_SECONDARY_FOR_LB" + case ConfigType_CT_PRIMARY_FORCE_UPDATE_BALLOT: + return "CT_PRIMARY_FORCE_UPDATE_BALLOT" + case ConfigType_CT_DROP_PARTITION: + return "CT_DROP_PARTITION" + case ConfigType_CT_REGISTER_CHILD: + return "CT_REGISTER_CHILD" + } + return "" +} + +func ConfigTypeFromString(s string) (ConfigType, error) { + switch s { + case "CT_INVALID": + return ConfigType_CT_INVALID, nil + case "CT_ASSIGN_PRIMARY": + return ConfigType_CT_ASSIGN_PRIMARY, nil + case "CT_UPGRADE_TO_PRIMARY": + return ConfigType_CT_UPGRADE_TO_PRIMARY, nil + case "CT_ADD_SECONDARY": + return ConfigType_CT_ADD_SECONDARY, nil + case "CT_UPGRADE_TO_SECONDARY": + return ConfigType_CT_UPGRADE_TO_SECONDARY, nil + case "CT_DOWNGRADE_TO_SECONDARY": + return ConfigType_CT_DOWNGRADE_TO_SECONDARY, nil + case "CT_DOWNGRADE_TO_INACTIVE": + return ConfigType_CT_DOWNGRADE_TO_INACTIVE, nil + case "CT_REMOVE": + return ConfigType_CT_REMOVE, nil + case "CT_ADD_SECONDARY_FOR_LB": + return ConfigType_CT_ADD_SECONDARY_FOR_LB, nil + case "CT_PRIMARY_FORCE_UPDATE_BALLOT": + return ConfigType_CT_PRIMARY_FORCE_UPDATE_BALLOT, nil + case "CT_DROP_PARTITION": + return ConfigType_CT_DROP_PARTITION, nil + case "CT_REGISTER_CHILD": + return ConfigType_CT_REGISTER_CHILD, nil + } + return ConfigType(0), fmt.Errorf("not a valid ConfigType string") +} + +func ConfigTypePtr(v ConfigType) *ConfigType { return &v } + +func (p ConfigType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *ConfigType) UnmarshalText(text []byte) error { + q, err := ConfigTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *ConfigType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = ConfigType(v) + return nil +} + +func (p *ConfigType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type NodeStatus int64 + +const ( + NodeStatus_NS_INVALID NodeStatus = 0 + NodeStatus_NS_ALIVE NodeStatus = 1 + NodeStatus_NS_UNALIVE NodeStatus = 2 +) + +func (p NodeStatus) String() string { + switch p { + case NodeStatus_NS_INVALID: + return "NS_INVALID" + case NodeStatus_NS_ALIVE: + return "NS_ALIVE" + case NodeStatus_NS_UNALIVE: + return "NS_UNALIVE" + } + return "" +} + +func NodeStatusFromString(s string) (NodeStatus, error) { + switch s { + case "NS_INVALID": + return NodeStatus_NS_INVALID, nil + case "NS_ALIVE": + return NodeStatus_NS_ALIVE, nil + case "NS_UNALIVE": + return NodeStatus_NS_UNALIVE, nil + } + return NodeStatus(0), fmt.Errorf("not a valid NodeStatus string") +} + +func NodeStatusPtr(v NodeStatus) *NodeStatus { return &v } + +func (p NodeStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *NodeStatus) UnmarshalText(text []byte) error { + q, err := NodeStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *NodeStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = NodeStatus(v) + return nil +} + +func (p *NodeStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type AppEnvOperation int64 + +const ( + AppEnvOperation_APP_ENV_OP_INVALID AppEnvOperation = 0 + AppEnvOperation_APP_ENV_OP_SET AppEnvOperation = 1 + AppEnvOperation_APP_ENV_OP_DEL AppEnvOperation = 2 + AppEnvOperation_APP_ENV_OP_CLEAR AppEnvOperation = 3 +) + +func (p AppEnvOperation) String() string { + switch p { + case AppEnvOperation_APP_ENV_OP_INVALID: + return "APP_ENV_OP_INVALID" + case AppEnvOperation_APP_ENV_OP_SET: + return "APP_ENV_OP_SET" + case AppEnvOperation_APP_ENV_OP_DEL: + return "APP_ENV_OP_DEL" + case AppEnvOperation_APP_ENV_OP_CLEAR: + return "APP_ENV_OP_CLEAR" + } + return "" +} + +func AppEnvOperationFromString(s string) (AppEnvOperation, error) { + switch s { + case "APP_ENV_OP_INVALID": + return AppEnvOperation_APP_ENV_OP_INVALID, nil + case "APP_ENV_OP_SET": + return AppEnvOperation_APP_ENV_OP_SET, nil + case "APP_ENV_OP_DEL": + return AppEnvOperation_APP_ENV_OP_DEL, nil + case "APP_ENV_OP_CLEAR": + return AppEnvOperation_APP_ENV_OP_CLEAR, nil + } + return AppEnvOperation(0), fmt.Errorf("not a valid AppEnvOperation string") +} + +func AppEnvOperationPtr(v AppEnvOperation) *AppEnvOperation { return &v } + +func (p AppEnvOperation) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *AppEnvOperation) UnmarshalText(text []byte) error { + q, err := AppEnvOperationFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *AppEnvOperation) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = AppEnvOperation(v) + return nil +} + +func (p *AppEnvOperation) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type MetaFunctionLevel int64 + +const ( + MetaFunctionLevel_fl_stopped MetaFunctionLevel = 100 + MetaFunctionLevel_fl_blind MetaFunctionLevel = 200 + MetaFunctionLevel_fl_freezed MetaFunctionLevel = 300 + MetaFunctionLevel_fl_steady MetaFunctionLevel = 400 + MetaFunctionLevel_fl_lively MetaFunctionLevel = 500 + MetaFunctionLevel_fl_invalid MetaFunctionLevel = 10000 +) + +func (p MetaFunctionLevel) String() string { + switch p { + case MetaFunctionLevel_fl_stopped: + return "fl_stopped" + case MetaFunctionLevel_fl_blind: + return "fl_blind" + case MetaFunctionLevel_fl_freezed: + return "fl_freezed" + case MetaFunctionLevel_fl_steady: + return "fl_steady" + case MetaFunctionLevel_fl_lively: + return "fl_lively" + case MetaFunctionLevel_fl_invalid: + return "fl_invalid" + } + return "" +} + +func MetaFunctionLevelFromString(s string) (MetaFunctionLevel, error) { + switch s { + case "fl_stopped": + return MetaFunctionLevel_fl_stopped, nil + case "fl_blind": + return MetaFunctionLevel_fl_blind, nil + case "fl_freezed": + return MetaFunctionLevel_fl_freezed, nil + case "fl_steady": + return MetaFunctionLevel_fl_steady, nil + case "fl_lively": + return MetaFunctionLevel_fl_lively, nil + case "fl_invalid": + return MetaFunctionLevel_fl_invalid, nil + } + return MetaFunctionLevel(0), fmt.Errorf("not a valid MetaFunctionLevel string") +} + +func MetaFunctionLevelPtr(v MetaFunctionLevel) *MetaFunctionLevel { return &v } + +func (p MetaFunctionLevel) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *MetaFunctionLevel) UnmarshalText(text []byte) error { + q, err := MetaFunctionLevelFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *MetaFunctionLevel) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = MetaFunctionLevel(v) + return nil +} + +func (p *MetaFunctionLevel) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type BalancerRequestType int64 + +const ( + BalancerRequestType_move_primary BalancerRequestType = 0 + BalancerRequestType_copy_primary BalancerRequestType = 1 + BalancerRequestType_copy_secondary BalancerRequestType = 2 +) + +func (p BalancerRequestType) String() string { + switch p { + case BalancerRequestType_move_primary: + return "move_primary" + case BalancerRequestType_copy_primary: + return "copy_primary" + case BalancerRequestType_copy_secondary: + return "copy_secondary" + } + return "" +} + +func BalancerRequestTypeFromString(s string) (BalancerRequestType, error) { + switch s { + case "move_primary": + return BalancerRequestType_move_primary, nil + case "copy_primary": + return BalancerRequestType_copy_primary, nil + case "copy_secondary": + return BalancerRequestType_copy_secondary, nil + } + return BalancerRequestType(0), fmt.Errorf("not a valid BalancerRequestType string") +} + +func BalancerRequestTypePtr(v BalancerRequestType) *BalancerRequestType { return &v } + +func (p BalancerRequestType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *BalancerRequestType) UnmarshalText(text []byte) error { + q, err := BalancerRequestTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *BalancerRequestType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = BalancerRequestType(v) + return nil +} + +func (p *BalancerRequestType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - Info +// - Config +// - Type +// - Node +// - HostNode +// - MetaSplitStatus +// - HpNode +type ConfigurationUpdateRequest struct { + Info *replication.AppInfo `thrift:"info,1" db:"info" json:"info"` + Config *replication.PartitionConfiguration `thrift:"config,2" db:"config" json:"config"` + Type ConfigType `thrift:"type,3" db:"type" json:"type"` + Node *base.RPCAddress `thrift:"node,4" db:"node" json:"node"` + HostNode *base.RPCAddress `thrift:"host_node,5" db:"host_node" json:"host_node"` + MetaSplitStatus *SplitStatus `thrift:"meta_split_status,6" db:"meta_split_status" json:"meta_split_status,omitempty"` + HpNode *base.HostPort `thrift:"hp_node,7" db:"hp_node" json:"hp_node,omitempty"` +} + +func NewConfigurationUpdateRequest() *ConfigurationUpdateRequest { + return &ConfigurationUpdateRequest{ + Type: 0, + } +} + +var ConfigurationUpdateRequest_Info_DEFAULT *replication.AppInfo + +func (p *ConfigurationUpdateRequest) GetInfo() *replication.AppInfo { + if !p.IsSetInfo() { + return ConfigurationUpdateRequest_Info_DEFAULT + } + return p.Info +} + +var ConfigurationUpdateRequest_Config_DEFAULT *replication.PartitionConfiguration + +func (p *ConfigurationUpdateRequest) GetConfig() *replication.PartitionConfiguration { + if !p.IsSetConfig() { + return ConfigurationUpdateRequest_Config_DEFAULT + } + return p.Config +} + +func (p *ConfigurationUpdateRequest) GetType() ConfigType { + return p.Type +} + +var ConfigurationUpdateRequest_Node_DEFAULT *base.RPCAddress + +func (p *ConfigurationUpdateRequest) GetNode() *base.RPCAddress { + if !p.IsSetNode() { + return ConfigurationUpdateRequest_Node_DEFAULT + } + return p.Node +} + +var ConfigurationUpdateRequest_HostNode_DEFAULT *base.RPCAddress + +func (p *ConfigurationUpdateRequest) GetHostNode() *base.RPCAddress { + if !p.IsSetHostNode() { + return ConfigurationUpdateRequest_HostNode_DEFAULT + } + return p.HostNode +} + +var ConfigurationUpdateRequest_MetaSplitStatus_DEFAULT SplitStatus + +func (p *ConfigurationUpdateRequest) GetMetaSplitStatus() SplitStatus { + if !p.IsSetMetaSplitStatus() { + return ConfigurationUpdateRequest_MetaSplitStatus_DEFAULT + } + return *p.MetaSplitStatus +} + +var ConfigurationUpdateRequest_HpNode_DEFAULT *base.HostPort + +func (p *ConfigurationUpdateRequest) GetHpNode() *base.HostPort { + if !p.IsSetHpNode() { + return ConfigurationUpdateRequest_HpNode_DEFAULT + } + return p.HpNode +} +func (p *ConfigurationUpdateRequest) IsSetInfo() bool { + return p.Info != nil +} + +func (p *ConfigurationUpdateRequest) IsSetConfig() bool { + return p.Config != nil +} + +func (p *ConfigurationUpdateRequest) IsSetNode() bool { + return p.Node != nil +} + +func (p *ConfigurationUpdateRequest) IsSetHostNode() bool { + return p.HostNode != nil +} + +func (p *ConfigurationUpdateRequest) IsSetMetaSplitStatus() bool { + return p.MetaSplitStatus != nil +} + +func (p *ConfigurationUpdateRequest) IsSetHpNode() bool { + return p.HpNode != nil +} + +func (p *ConfigurationUpdateRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I32 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationUpdateRequest) ReadField1(iprot thrift.TProtocol) error { + p.Info = &replication.AppInfo{ + Status: 0, + + InitPartitionCount: -1, + } + if err := p.Info.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Info), err) + } + return nil +} + +func (p *ConfigurationUpdateRequest) ReadField2(iprot thrift.TProtocol) error { + p.Config = &replication.PartitionConfiguration{} + if err := p.Config.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Config), err) + } + return nil +} + +func (p *ConfigurationUpdateRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := ConfigType(v) + p.Type = temp + } + return nil +} + +func (p *ConfigurationUpdateRequest) ReadField4(iprot thrift.TProtocol) error { + p.Node = &base.RPCAddress{} + if err := p.Node.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node), err) + } + return nil +} + +func (p *ConfigurationUpdateRequest) ReadField5(iprot thrift.TProtocol) error { + p.HostNode = &base.RPCAddress{} + if err := p.HostNode.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HostNode), err) + } + return nil +} + +func (p *ConfigurationUpdateRequest) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + temp := SplitStatus(v) + p.MetaSplitStatus = &temp + } + return nil +} + +func (p *ConfigurationUpdateRequest) ReadField7(iprot thrift.TProtocol) error { + p.HpNode = &base.HostPort{} + if err := p.HpNode.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode), err) + } + return nil +} + +func (p *ConfigurationUpdateRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_update_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationUpdateRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("info", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:info: ", p), err) + } + if err := p.Info.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Info), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:info: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("config", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:config: ", p), err) + } + if err := p.Config.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Config), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:config: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:type: ", p), err) + } + if err := oprot.WriteI32(int32(p.Type)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:type: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:node: ", p), err) + } + if err := p.Node.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:node: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("host_node", thrift.STRUCT, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:host_node: ", p), err) + } + if err := p.HostNode.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HostNode), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:host_node: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetMetaSplitStatus() { + if err := oprot.WriteFieldBegin("meta_split_status", thrift.I32, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:meta_split_status: ", p), err) + } + if err := oprot.WriteI32(int32(*p.MetaSplitStatus)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.meta_split_status (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:meta_split_status: ", p), err) + } + } + return err +} + +func (p *ConfigurationUpdateRequest) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetHpNode() { + if err := oprot.WriteFieldBegin("hp_node", thrift.STRUCT, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:hp_node: ", p), err) + } + if err := p.HpNode.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:hp_node: ", p), err) + } + } + return err +} + +func (p *ConfigurationUpdateRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationUpdateRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Config +type ConfigurationUpdateResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Config *replication.PartitionConfiguration `thrift:"config,2" db:"config" json:"config"` +} + +func NewConfigurationUpdateResponse() *ConfigurationUpdateResponse { + return &ConfigurationUpdateResponse{} +} + +var ConfigurationUpdateResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationUpdateResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationUpdateResponse_Err_DEFAULT + } + return p.Err +} + +var ConfigurationUpdateResponse_Config_DEFAULT *replication.PartitionConfiguration + +func (p *ConfigurationUpdateResponse) GetConfig() *replication.PartitionConfiguration { + if !p.IsSetConfig() { + return ConfigurationUpdateResponse_Config_DEFAULT + } + return p.Config +} +func (p *ConfigurationUpdateResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationUpdateResponse) IsSetConfig() bool { + return p.Config != nil +} + +func (p *ConfigurationUpdateResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationUpdateResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationUpdateResponse) ReadField2(iprot thrift.TProtocol) error { + p.Config = &replication.PartitionConfiguration{} + if err := p.Config.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Config), err) + } + return nil +} + +func (p *ConfigurationUpdateResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_update_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationUpdateResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("config", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:config: ", p), err) + } + if err := p.Config.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Config), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:config: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationUpdateResponse(%+v)", *p) +} + +// Attributes: +// - GeoTags +// - TotalCapacityMb +type ReplicaServerInfo struct { + GeoTags map[string]string `thrift:"geo_tags,1" db:"geo_tags" json:"geo_tags"` + TotalCapacityMb int64 `thrift:"total_capacity_mb,2" db:"total_capacity_mb" json:"total_capacity_mb"` +} + +func NewReplicaServerInfo() *ReplicaServerInfo { + return &ReplicaServerInfo{} +} + +func (p *ReplicaServerInfo) GetGeoTags() map[string]string { + return p.GeoTags +} + +func (p *ReplicaServerInfo) GetTotalCapacityMb() int64 { + return p.TotalCapacityMb +} +func (p *ReplicaServerInfo) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.MAP { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaServerInfo) ReadField1(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]string, size) + p.GeoTags = tMap + for i := 0; i < size; i++ { + var _key0 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key0 = v + } + var _val1 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _val1 = v + } + p.GeoTags[_key0] = _val1 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *ReplicaServerInfo) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.TotalCapacityMb = v + } + return nil +} + +func (p *ReplicaServerInfo) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("replica_server_info"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaServerInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("geo_tags", thrift.MAP, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:geo_tags: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.GeoTags)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.GeoTags { + if err := oprot.WriteString(string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:geo_tags: ", p), err) + } + return err +} + +func (p *ReplicaServerInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("total_capacity_mb", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:total_capacity_mb: ", p), err) + } + if err := oprot.WriteI64(int64(p.TotalCapacityMb)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.total_capacity_mb (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:total_capacity_mb: ", p), err) + } + return err +} + +func (p *ReplicaServerInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaServerInfo(%+v)", *p) +} + +// Attributes: +// - Node +// - StoredReplicas +// - Info +// - HpNode +type ConfigurationQueryByNodeRequest struct { + Node *base.RPCAddress `thrift:"node,1" db:"node" json:"node"` + StoredReplicas []*ReplicaInfo `thrift:"stored_replicas,2" db:"stored_replicas" json:"stored_replicas,omitempty"` + Info *ReplicaServerInfo `thrift:"info,3" db:"info" json:"info,omitempty"` + HpNode *base.HostPort `thrift:"hp_node,4" db:"hp_node" json:"hp_node,omitempty"` +} + +func NewConfigurationQueryByNodeRequest() *ConfigurationQueryByNodeRequest { + return &ConfigurationQueryByNodeRequest{} +} + +var ConfigurationQueryByNodeRequest_Node_DEFAULT *base.RPCAddress + +func (p *ConfigurationQueryByNodeRequest) GetNode() *base.RPCAddress { + if !p.IsSetNode() { + return ConfigurationQueryByNodeRequest_Node_DEFAULT + } + return p.Node +} + +var ConfigurationQueryByNodeRequest_StoredReplicas_DEFAULT []*ReplicaInfo + +func (p *ConfigurationQueryByNodeRequest) GetStoredReplicas() []*ReplicaInfo { + return p.StoredReplicas +} + +var ConfigurationQueryByNodeRequest_Info_DEFAULT *ReplicaServerInfo + +func (p *ConfigurationQueryByNodeRequest) GetInfo() *ReplicaServerInfo { + if !p.IsSetInfo() { + return ConfigurationQueryByNodeRequest_Info_DEFAULT + } + return p.Info +} + +var ConfigurationQueryByNodeRequest_HpNode_DEFAULT *base.HostPort + +func (p *ConfigurationQueryByNodeRequest) GetHpNode() *base.HostPort { + if !p.IsSetHpNode() { + return ConfigurationQueryByNodeRequest_HpNode_DEFAULT + } + return p.HpNode +} +func (p *ConfigurationQueryByNodeRequest) IsSetNode() bool { + return p.Node != nil +} + +func (p *ConfigurationQueryByNodeRequest) IsSetStoredReplicas() bool { + return p.StoredReplicas != nil +} + +func (p *ConfigurationQueryByNodeRequest) IsSetInfo() bool { + return p.Info != nil +} + +func (p *ConfigurationQueryByNodeRequest) IsSetHpNode() bool { + return p.HpNode != nil +} + +func (p *ConfigurationQueryByNodeRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationQueryByNodeRequest) ReadField1(iprot thrift.TProtocol) error { + p.Node = &base.RPCAddress{} + if err := p.Node.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node), err) + } + return nil +} + +func (p *ConfigurationQueryByNodeRequest) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*ReplicaInfo, 0, size) + p.StoredReplicas = tSlice + for i := 0; i < size; i++ { + _elem2 := &ReplicaInfo{} + if err := _elem2.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) + } + p.StoredReplicas = append(p.StoredReplicas, _elem2) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationQueryByNodeRequest) ReadField3(iprot thrift.TProtocol) error { + p.Info = &ReplicaServerInfo{} + if err := p.Info.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Info), err) + } + return nil +} + +func (p *ConfigurationQueryByNodeRequest) ReadField4(iprot thrift.TProtocol) error { + p.HpNode = &base.HostPort{} + if err := p.HpNode.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode), err) + } + return nil +} + +func (p *ConfigurationQueryByNodeRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_query_by_node_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationQueryByNodeRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:node: ", p), err) + } + if err := p.Node.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:node: ", p), err) + } + return err +} + +func (p *ConfigurationQueryByNodeRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetStoredReplicas() { + if err := oprot.WriteFieldBegin("stored_replicas", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:stored_replicas: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.StoredReplicas)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.StoredReplicas { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:stored_replicas: ", p), err) + } + } + return err +} + +func (p *ConfigurationQueryByNodeRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetInfo() { + if err := oprot.WriteFieldBegin("info", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:info: ", p), err) + } + if err := p.Info.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Info), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:info: ", p), err) + } + } + return err +} + +func (p *ConfigurationQueryByNodeRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetHpNode() { + if err := oprot.WriteFieldBegin("hp_node", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:hp_node: ", p), err) + } + if err := p.HpNode.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:hp_node: ", p), err) + } + } + return err +} + +func (p *ConfigurationQueryByNodeRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationQueryByNodeRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Partitions +// - GcReplicas +type ConfigurationQueryByNodeResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Partitions []*ConfigurationUpdateRequest `thrift:"partitions,2" db:"partitions" json:"partitions"` + GcReplicas []*ReplicaInfo `thrift:"gc_replicas,3" db:"gc_replicas" json:"gc_replicas,omitempty"` +} + +func NewConfigurationQueryByNodeResponse() *ConfigurationQueryByNodeResponse { + return &ConfigurationQueryByNodeResponse{} +} + +var ConfigurationQueryByNodeResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationQueryByNodeResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationQueryByNodeResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationQueryByNodeResponse) GetPartitions() []*ConfigurationUpdateRequest { + return p.Partitions +} + +var ConfigurationQueryByNodeResponse_GcReplicas_DEFAULT []*ReplicaInfo + +func (p *ConfigurationQueryByNodeResponse) GetGcReplicas() []*ReplicaInfo { + return p.GcReplicas +} +func (p *ConfigurationQueryByNodeResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationQueryByNodeResponse) IsSetGcReplicas() bool { + return p.GcReplicas != nil +} + +func (p *ConfigurationQueryByNodeResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationQueryByNodeResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationQueryByNodeResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*ConfigurationUpdateRequest, 0, size) + p.Partitions = tSlice + for i := 0; i < size; i++ { + _elem3 := &ConfigurationUpdateRequest{ + Type: 0, + } + if err := _elem3.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) + } + p.Partitions = append(p.Partitions, _elem3) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationQueryByNodeResponse) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*ReplicaInfo, 0, size) + p.GcReplicas = tSlice + for i := 0; i < size; i++ { + _elem4 := &ReplicaInfo{} + if err := _elem4.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) + } + p.GcReplicas = append(p.GcReplicas, _elem4) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationQueryByNodeResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_query_by_node_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationQueryByNodeResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationQueryByNodeResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partitions", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:partitions: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Partitions)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Partitions { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:partitions: ", p), err) + } + return err +} + +func (p *ConfigurationQueryByNodeResponse) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetGcReplicas() { + if err := oprot.WriteFieldBegin("gc_replicas", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:gc_replicas: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.GcReplicas)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.GcReplicas { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:gc_replicas: ", p), err) + } + } + return err +} + +func (p *ConfigurationQueryByNodeResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationQueryByNodeResponse(%+v)", *p) +} + +// Attributes: +// - RecoveryNodes +// - SkipBadNodes +// - SkipLostPartitions +// - HpRecoveryNodes +type ConfigurationRecoveryRequest struct { + RecoveryNodes []*base.RPCAddress `thrift:"recovery_nodes,1" db:"recovery_nodes" json:"recovery_nodes"` + SkipBadNodes bool `thrift:"skip_bad_nodes,2" db:"skip_bad_nodes" json:"skip_bad_nodes"` + SkipLostPartitions bool `thrift:"skip_lost_partitions,3" db:"skip_lost_partitions" json:"skip_lost_partitions"` + HpRecoveryNodes []*base.HostPort `thrift:"hp_recovery_nodes,4" db:"hp_recovery_nodes" json:"hp_recovery_nodes,omitempty"` +} + +func NewConfigurationRecoveryRequest() *ConfigurationRecoveryRequest { + return &ConfigurationRecoveryRequest{} +} + +func (p *ConfigurationRecoveryRequest) GetRecoveryNodes() []*base.RPCAddress { + return p.RecoveryNodes +} + +func (p *ConfigurationRecoveryRequest) GetSkipBadNodes() bool { + return p.SkipBadNodes +} + +func (p *ConfigurationRecoveryRequest) GetSkipLostPartitions() bool { + return p.SkipLostPartitions +} + +var ConfigurationRecoveryRequest_HpRecoveryNodes_DEFAULT []*base.HostPort + +func (p *ConfigurationRecoveryRequest) GetHpRecoveryNodes() []*base.HostPort { + return p.HpRecoveryNodes +} +func (p *ConfigurationRecoveryRequest) IsSetHpRecoveryNodes() bool { + return p.HpRecoveryNodes != nil +} + +func (p *ConfigurationRecoveryRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.LIST { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationRecoveryRequest) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.RPCAddress, 0, size) + p.RecoveryNodes = tSlice + for i := 0; i < size; i++ { + _elem5 := &base.RPCAddress{} + if err := _elem5.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) + } + p.RecoveryNodes = append(p.RecoveryNodes, _elem5) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationRecoveryRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.SkipBadNodes = v + } + return nil +} + +func (p *ConfigurationRecoveryRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.SkipLostPartitions = v + } + return nil +} + +func (p *ConfigurationRecoveryRequest) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.HostPort, 0, size) + p.HpRecoveryNodes = tSlice + for i := 0; i < size; i++ { + _elem6 := &base.HostPort{} + if err := _elem6.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err) + } + p.HpRecoveryNodes = append(p.HpRecoveryNodes, _elem6) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationRecoveryRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_recovery_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationRecoveryRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("recovery_nodes", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:recovery_nodes: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.RecoveryNodes)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.RecoveryNodes { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:recovery_nodes: ", p), err) + } + return err +} + +func (p *ConfigurationRecoveryRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("skip_bad_nodes", thrift.BOOL, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:skip_bad_nodes: ", p), err) + } + if err := oprot.WriteBool(bool(p.SkipBadNodes)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.skip_bad_nodes (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:skip_bad_nodes: ", p), err) + } + return err +} + +func (p *ConfigurationRecoveryRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("skip_lost_partitions", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:skip_lost_partitions: ", p), err) + } + if err := oprot.WriteBool(bool(p.SkipLostPartitions)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.skip_lost_partitions (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:skip_lost_partitions: ", p), err) + } + return err +} + +func (p *ConfigurationRecoveryRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetHpRecoveryNodes() { + if err := oprot.WriteFieldBegin("hp_recovery_nodes", thrift.LIST, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:hp_recovery_nodes: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.HpRecoveryNodes)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.HpRecoveryNodes { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:hp_recovery_nodes: ", p), err) + } + } + return err +} + +func (p *ConfigurationRecoveryRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationRecoveryRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMessage +type ConfigurationRecoveryResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMessage string `thrift:"hint_message,2" db:"hint_message" json:"hint_message"` +} + +func NewConfigurationRecoveryResponse() *ConfigurationRecoveryResponse { + return &ConfigurationRecoveryResponse{} +} + +var ConfigurationRecoveryResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationRecoveryResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationRecoveryResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationRecoveryResponse) GetHintMessage() string { + return p.HintMessage +} +func (p *ConfigurationRecoveryResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationRecoveryResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationRecoveryResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationRecoveryResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *ConfigurationRecoveryResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_recovery_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationRecoveryResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationRecoveryResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_message: ", p), err) + } + return err +} + +func (p *ConfigurationRecoveryResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationRecoveryResponse(%+v)", *p) +} + +// Attributes: +// - PartitionCount +// - ReplicaCount +// - SuccessIfExist +// - AppType +// - IsStateful +// - Envs +type CreateAppOptions struct { + PartitionCount int32 `thrift:"partition_count,1" db:"partition_count" json:"partition_count"` + ReplicaCount int32 `thrift:"replica_count,2" db:"replica_count" json:"replica_count"` + SuccessIfExist bool `thrift:"success_if_exist,3" db:"success_if_exist" json:"success_if_exist"` + AppType string `thrift:"app_type,4" db:"app_type" json:"app_type"` + IsStateful bool `thrift:"is_stateful,5" db:"is_stateful" json:"is_stateful"` + Envs map[string]string `thrift:"envs,6" db:"envs" json:"envs"` +} + +func NewCreateAppOptions() *CreateAppOptions { + return &CreateAppOptions{} +} + +func (p *CreateAppOptions) GetPartitionCount() int32 { + return p.PartitionCount +} + +func (p *CreateAppOptions) GetReplicaCount() int32 { + return p.ReplicaCount +} + +func (p *CreateAppOptions) GetSuccessIfExist() bool { + return p.SuccessIfExist +} + +func (p *CreateAppOptions) GetAppType() string { + return p.AppType +} + +func (p *CreateAppOptions) GetIsStateful() bool { + return p.IsStateful +} + +func (p *CreateAppOptions) GetEnvs() map[string]string { + return p.Envs +} +func (p *CreateAppOptions) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.MAP { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CreateAppOptions) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.PartitionCount = v + } + return nil +} + +func (p *CreateAppOptions) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.ReplicaCount = v + } + return nil +} + +func (p *CreateAppOptions) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.SuccessIfExist = v + } + return nil +} + +func (p *CreateAppOptions) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.AppType = v + } + return nil +} + +func (p *CreateAppOptions) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.IsStateful = v + } + return nil +} + +func (p *CreateAppOptions) ReadField6(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]string, size) + p.Envs = tMap + for i := 0; i < size; i++ { + var _key7 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key7 = v + } + var _val8 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _val8 = v + } + p.Envs[_key7] = _val8 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *CreateAppOptions) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("create_app_options"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *CreateAppOptions) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_count", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_count (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:partition_count: ", p), err) + } + return err +} + +func (p *CreateAppOptions) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("replica_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.ReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.replica_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:replica_count: ", p), err) + } + return err +} + +func (p *CreateAppOptions) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("success_if_exist", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:success_if_exist: ", p), err) + } + if err := oprot.WriteBool(bool(p.SuccessIfExist)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.success_if_exist (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:success_if_exist: ", p), err) + } + return err +} + +func (p *CreateAppOptions) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_type", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:app_type: ", p), err) + } + if err := oprot.WriteString(string(p.AppType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_type (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:app_type: ", p), err) + } + return err +} + +func (p *CreateAppOptions) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("is_stateful", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:is_stateful: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsStateful)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_stateful (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:is_stateful: ", p), err) + } + return err +} + +func (p *CreateAppOptions) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("envs", thrift.MAP, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:envs: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Envs)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.Envs { + if err := oprot.WriteString(string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:envs: ", p), err) + } + return err +} + +func (p *CreateAppOptions) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CreateAppOptions(%+v)", *p) +} + +// Attributes: +// - AppName +// - Options +type ConfigurationCreateAppRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + Options *CreateAppOptions `thrift:"options,2" db:"options" json:"options"` +} + +func NewConfigurationCreateAppRequest() *ConfigurationCreateAppRequest { + return &ConfigurationCreateAppRequest{} +} + +func (p *ConfigurationCreateAppRequest) GetAppName() string { + return p.AppName +} + +var ConfigurationCreateAppRequest_Options_DEFAULT *CreateAppOptions + +func (p *ConfigurationCreateAppRequest) GetOptions() *CreateAppOptions { + if !p.IsSetOptions() { + return ConfigurationCreateAppRequest_Options_DEFAULT + } + return p.Options +} +func (p *ConfigurationCreateAppRequest) IsSetOptions() bool { + return p.Options != nil +} + +func (p *ConfigurationCreateAppRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationCreateAppRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ConfigurationCreateAppRequest) ReadField2(iprot thrift.TProtocol) error { + p.Options = &CreateAppOptions{} + if err := p.Options.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Options), err) + } + return nil +} + +func (p *ConfigurationCreateAppRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_create_app_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationCreateAppRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *ConfigurationCreateAppRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("options", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:options: ", p), err) + } + if err := p.Options.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Options), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:options: ", p), err) + } + return err +} + +func (p *ConfigurationCreateAppRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationCreateAppRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Appid +type ConfigurationCreateAppResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Appid int32 `thrift:"appid,2" db:"appid" json:"appid"` +} + +func NewConfigurationCreateAppResponse() *ConfigurationCreateAppResponse { + return &ConfigurationCreateAppResponse{} +} + +var ConfigurationCreateAppResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationCreateAppResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationCreateAppResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationCreateAppResponse) GetAppid() int32 { + return p.Appid +} +func (p *ConfigurationCreateAppResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationCreateAppResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationCreateAppResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationCreateAppResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Appid = v + } + return nil +} + +func (p *ConfigurationCreateAppResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_create_app_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationCreateAppResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationCreateAppResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("appid", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:appid: ", p), err) + } + if err := oprot.WriteI32(int32(p.Appid)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.appid (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:appid: ", p), err) + } + return err +} + +func (p *ConfigurationCreateAppResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationCreateAppResponse(%+v)", *p) +} + +// Attributes: +// - SuccessIfNotExist +// - ReserveSeconds +type DropAppOptions struct { + SuccessIfNotExist bool `thrift:"success_if_not_exist,1" db:"success_if_not_exist" json:"success_if_not_exist"` + ReserveSeconds *int64 `thrift:"reserve_seconds,2" db:"reserve_seconds" json:"reserve_seconds,omitempty"` +} + +func NewDropAppOptions() *DropAppOptions { + return &DropAppOptions{} +} + +func (p *DropAppOptions) GetSuccessIfNotExist() bool { + return p.SuccessIfNotExist +} + +var DropAppOptions_ReserveSeconds_DEFAULT int64 + +func (p *DropAppOptions) GetReserveSeconds() int64 { + if !p.IsSetReserveSeconds() { + return DropAppOptions_ReserveSeconds_DEFAULT + } + return *p.ReserveSeconds +} +func (p *DropAppOptions) IsSetReserveSeconds() bool { + return p.ReserveSeconds != nil +} + +func (p *DropAppOptions) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DropAppOptions) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.SuccessIfNotExist = v + } + return nil +} + +func (p *DropAppOptions) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.ReserveSeconds = &v + } + return nil +} + +func (p *DropAppOptions) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("drop_app_options"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DropAppOptions) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("success_if_not_exist", thrift.BOOL, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:success_if_not_exist: ", p), err) + } + if err := oprot.WriteBool(bool(p.SuccessIfNotExist)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.success_if_not_exist (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:success_if_not_exist: ", p), err) + } + return err +} + +func (p *DropAppOptions) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetReserveSeconds() { + if err := oprot.WriteFieldBegin("reserve_seconds", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:reserve_seconds: ", p), err) + } + if err := oprot.WriteI64(int64(*p.ReserveSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.reserve_seconds (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:reserve_seconds: ", p), err) + } + } + return err +} + +func (p *DropAppOptions) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DropAppOptions(%+v)", *p) +} + +// Attributes: +// - AppName +// - Options +type ConfigurationDropAppRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + Options *DropAppOptions `thrift:"options,2" db:"options" json:"options"` +} + +func NewConfigurationDropAppRequest() *ConfigurationDropAppRequest { + return &ConfigurationDropAppRequest{} +} + +func (p *ConfigurationDropAppRequest) GetAppName() string { + return p.AppName +} + +var ConfigurationDropAppRequest_Options_DEFAULT *DropAppOptions + +func (p *ConfigurationDropAppRequest) GetOptions() *DropAppOptions { + if !p.IsSetOptions() { + return ConfigurationDropAppRequest_Options_DEFAULT + } + return p.Options +} +func (p *ConfigurationDropAppRequest) IsSetOptions() bool { + return p.Options != nil +} + +func (p *ConfigurationDropAppRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationDropAppRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ConfigurationDropAppRequest) ReadField2(iprot thrift.TProtocol) error { + p.Options = &DropAppOptions{} + if err := p.Options.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Options), err) + } + return nil +} + +func (p *ConfigurationDropAppRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_drop_app_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationDropAppRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *ConfigurationDropAppRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("options", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:options: ", p), err) + } + if err := p.Options.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Options), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:options: ", p), err) + } + return err +} + +func (p *ConfigurationDropAppRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationDropAppRequest(%+v)", *p) +} + +// Attributes: +// - Err +type ConfigurationDropAppResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` +} + +func NewConfigurationDropAppResponse() *ConfigurationDropAppResponse { + return &ConfigurationDropAppResponse{} +} + +var ConfigurationDropAppResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationDropAppResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationDropAppResponse_Err_DEFAULT + } + return p.Err +} +func (p *ConfigurationDropAppResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationDropAppResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationDropAppResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationDropAppResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_drop_app_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationDropAppResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationDropAppResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationDropAppResponse(%+v)", *p) +} + +// Attributes: +// - OldAppName +// - NewAppName_ +type ConfigurationRenameAppRequest struct { + OldAppName string `thrift:"old_app_name,1" db:"old_app_name" json:"old_app_name"` + NewAppName_ string `thrift:"new_app_name,2" db:"new_app_name" json:"new_app_name"` +} + +func NewConfigurationRenameAppRequest() *ConfigurationRenameAppRequest { + return &ConfigurationRenameAppRequest{} +} + +func (p *ConfigurationRenameAppRequest) GetOldAppName() string { + return p.OldAppName +} + +func (p *ConfigurationRenameAppRequest) GetNewAppName_() string { + return p.NewAppName_ +} +func (p *ConfigurationRenameAppRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationRenameAppRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.OldAppName = v + } + return nil +} + +func (p *ConfigurationRenameAppRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.NewAppName_ = v + } + return nil +} + +func (p *ConfigurationRenameAppRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_rename_app_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationRenameAppRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("old_app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:old_app_name: ", p), err) + } + if err := oprot.WriteString(string(p.OldAppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.old_app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:old_app_name: ", p), err) + } + return err +} + +func (p *ConfigurationRenameAppRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("new_app_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:new_app_name: ", p), err) + } + if err := oprot.WriteString(string(p.NewAppName_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.new_app_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:new_app_name: ", p), err) + } + return err +} + +func (p *ConfigurationRenameAppRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationRenameAppRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMessage +type ConfigurationRenameAppResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMessage string `thrift:"hint_message,2" db:"hint_message" json:"hint_message"` +} + +func NewConfigurationRenameAppResponse() *ConfigurationRenameAppResponse { + return &ConfigurationRenameAppResponse{} +} + +var ConfigurationRenameAppResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationRenameAppResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationRenameAppResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationRenameAppResponse) GetHintMessage() string { + return p.HintMessage +} +func (p *ConfigurationRenameAppResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationRenameAppResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationRenameAppResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationRenameAppResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *ConfigurationRenameAppResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_rename_app_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationRenameAppResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationRenameAppResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_message: ", p), err) + } + return err +} + +func (p *ConfigurationRenameAppResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationRenameAppResponse(%+v)", *p) +} + +// Attributes: +// - AppID +// - NewAppName_ +type ConfigurationRecallAppRequest struct { + AppID int32 `thrift:"app_id,1" db:"app_id" json:"app_id"` + NewAppName_ string `thrift:"new_app_name,2" db:"new_app_name" json:"new_app_name"` +} + +func NewConfigurationRecallAppRequest() *ConfigurationRecallAppRequest { + return &ConfigurationRecallAppRequest{} +} + +func (p *ConfigurationRecallAppRequest) GetAppID() int32 { + return p.AppID +} + +func (p *ConfigurationRecallAppRequest) GetNewAppName_() string { + return p.NewAppName_ +} +func (p *ConfigurationRecallAppRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationRecallAppRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *ConfigurationRecallAppRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.NewAppName_ = v + } + return nil +} + +func (p *ConfigurationRecallAppRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_recall_app_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationRecallAppRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_id: ", p), err) + } + return err +} + +func (p *ConfigurationRecallAppRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("new_app_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:new_app_name: ", p), err) + } + if err := oprot.WriteString(string(p.NewAppName_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.new_app_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:new_app_name: ", p), err) + } + return err +} + +func (p *ConfigurationRecallAppRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationRecallAppRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Info +type ConfigurationRecallAppResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Info *replication.AppInfo `thrift:"info,2" db:"info" json:"info"` +} + +func NewConfigurationRecallAppResponse() *ConfigurationRecallAppResponse { + return &ConfigurationRecallAppResponse{} +} + +var ConfigurationRecallAppResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationRecallAppResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationRecallAppResponse_Err_DEFAULT + } + return p.Err +} + +var ConfigurationRecallAppResponse_Info_DEFAULT *replication.AppInfo + +func (p *ConfigurationRecallAppResponse) GetInfo() *replication.AppInfo { + if !p.IsSetInfo() { + return ConfigurationRecallAppResponse_Info_DEFAULT + } + return p.Info +} +func (p *ConfigurationRecallAppResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationRecallAppResponse) IsSetInfo() bool { + return p.Info != nil +} + +func (p *ConfigurationRecallAppResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationRecallAppResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationRecallAppResponse) ReadField2(iprot thrift.TProtocol) error { + p.Info = &replication.AppInfo{ + Status: 0, + + InitPartitionCount: -1, + } + if err := p.Info.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Info), err) + } + return nil +} + +func (p *ConfigurationRecallAppResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_recall_app_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationRecallAppResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationRecallAppResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("info", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:info: ", p), err) + } + if err := p.Info.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Info), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:info: ", p), err) + } + return err +} + +func (p *ConfigurationRecallAppResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationRecallAppResponse(%+v)", *p) +} + +// Attributes: +// - Status +type ConfigurationListAppsRequest struct { + Status replication.AppStatus `thrift:"status,1" db:"status" json:"status"` +} + +func NewConfigurationListAppsRequest() *ConfigurationListAppsRequest { + return &ConfigurationListAppsRequest{ + Status: 0, + } +} + +func (p *ConfigurationListAppsRequest) GetStatus() replication.AppStatus { + return p.Status +} +func (p *ConfigurationListAppsRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationListAppsRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := replication.AppStatus(v) + p.Status = temp + } + return nil +} + +func (p *ConfigurationListAppsRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_list_apps_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationListAppsRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) + } + if err := oprot.WriteI32(int32(p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) + } + return err +} + +func (p *ConfigurationListAppsRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationListAppsRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Infos +type ConfigurationListAppsResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Infos []*replication.AppInfo `thrift:"infos,2" db:"infos" json:"infos"` +} + +func NewConfigurationListAppsResponse() *ConfigurationListAppsResponse { + return &ConfigurationListAppsResponse{} +} + +var ConfigurationListAppsResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationListAppsResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationListAppsResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationListAppsResponse) GetInfos() []*replication.AppInfo { + return p.Infos +} +func (p *ConfigurationListAppsResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationListAppsResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationListAppsResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationListAppsResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*replication.AppInfo, 0, size) + p.Infos = tSlice + for i := 0; i < size; i++ { + _elem9 := &replication.AppInfo{ + Status: 0, + + InitPartitionCount: -1, + } + if err := _elem9.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err) + } + p.Infos = append(p.Infos, _elem9) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationListAppsResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_list_apps_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationListAppsResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationListAppsResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("infos", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:infos: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Infos)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Infos { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:infos: ", p), err) + } + return err +} + +func (p *ConfigurationListAppsResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationListAppsResponse(%+v)", *p) +} + +// Attributes: +// - MetaServer +// - HpMetaServer +type QueryAppInfoRequest struct { + MetaServer *base.RPCAddress `thrift:"meta_server,1" db:"meta_server" json:"meta_server"` + HpMetaServer *base.HostPort `thrift:"hp_meta_server,2" db:"hp_meta_server" json:"hp_meta_server,omitempty"` +} + +func NewQueryAppInfoRequest() *QueryAppInfoRequest { + return &QueryAppInfoRequest{} +} + +var QueryAppInfoRequest_MetaServer_DEFAULT *base.RPCAddress + +func (p *QueryAppInfoRequest) GetMetaServer() *base.RPCAddress { + if !p.IsSetMetaServer() { + return QueryAppInfoRequest_MetaServer_DEFAULT + } + return p.MetaServer +} + +var QueryAppInfoRequest_HpMetaServer_DEFAULT *base.HostPort + +func (p *QueryAppInfoRequest) GetHpMetaServer() *base.HostPort { + if !p.IsSetHpMetaServer() { + return QueryAppInfoRequest_HpMetaServer_DEFAULT + } + return p.HpMetaServer +} +func (p *QueryAppInfoRequest) IsSetMetaServer() bool { + return p.MetaServer != nil +} + +func (p *QueryAppInfoRequest) IsSetHpMetaServer() bool { + return p.HpMetaServer != nil +} + +func (p *QueryAppInfoRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryAppInfoRequest) ReadField1(iprot thrift.TProtocol) error { + p.MetaServer = &base.RPCAddress{} + if err := p.MetaServer.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.MetaServer), err) + } + return nil +} + +func (p *QueryAppInfoRequest) ReadField2(iprot thrift.TProtocol) error { + p.HpMetaServer = &base.HostPort{} + if err := p.HpMetaServer.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpMetaServer), err) + } + return nil +} + +func (p *QueryAppInfoRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_app_info_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryAppInfoRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("meta_server", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:meta_server: ", p), err) + } + if err := p.MetaServer.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.MetaServer), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:meta_server: ", p), err) + } + return err +} + +func (p *QueryAppInfoRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetHpMetaServer() { + if err := oprot.WriteFieldBegin("hp_meta_server", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hp_meta_server: ", p), err) + } + if err := p.HpMetaServer.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpMetaServer), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hp_meta_server: ", p), err) + } + } + return err +} + +func (p *QueryAppInfoRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryAppInfoRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Apps +type QueryAppInfoResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Apps []*replication.AppInfo `thrift:"apps,2" db:"apps" json:"apps"` +} + +func NewQueryAppInfoResponse() *QueryAppInfoResponse { + return &QueryAppInfoResponse{} +} + +var QueryAppInfoResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryAppInfoResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryAppInfoResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QueryAppInfoResponse) GetApps() []*replication.AppInfo { + return p.Apps +} +func (p *QueryAppInfoResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryAppInfoResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryAppInfoResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryAppInfoResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*replication.AppInfo, 0, size) + p.Apps = tSlice + for i := 0; i < size; i++ { + _elem10 := &replication.AppInfo{ + Status: 0, + + InitPartitionCount: -1, + } + if err := _elem10.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) + } + p.Apps = append(p.Apps, _elem10) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryAppInfoResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_app_info_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryAppInfoResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryAppInfoResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("apps", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:apps: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Apps)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Apps { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:apps: ", p), err) + } + return err +} + +func (p *QueryAppInfoResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryAppInfoResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - Op +// - Keys +// - Values +// - ClearPrefix +type ConfigurationUpdateAppEnvRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + Op AppEnvOperation `thrift:"op,2" db:"op" json:"op"` + Keys []string `thrift:"keys,3" db:"keys" json:"keys,omitempty"` + Values []string `thrift:"values,4" db:"values" json:"values,omitempty"` + ClearPrefix *string `thrift:"clear_prefix,5" db:"clear_prefix" json:"clear_prefix,omitempty"` +} + +func NewConfigurationUpdateAppEnvRequest() *ConfigurationUpdateAppEnvRequest { + return &ConfigurationUpdateAppEnvRequest{ + Op: 0, + } +} + +func (p *ConfigurationUpdateAppEnvRequest) GetAppName() string { + return p.AppName +} + +func (p *ConfigurationUpdateAppEnvRequest) GetOp() AppEnvOperation { + return p.Op +} + +var ConfigurationUpdateAppEnvRequest_Keys_DEFAULT []string + +func (p *ConfigurationUpdateAppEnvRequest) GetKeys() []string { + return p.Keys +} + +var ConfigurationUpdateAppEnvRequest_Values_DEFAULT []string + +func (p *ConfigurationUpdateAppEnvRequest) GetValues() []string { + return p.Values +} + +var ConfigurationUpdateAppEnvRequest_ClearPrefix_DEFAULT string + +func (p *ConfigurationUpdateAppEnvRequest) GetClearPrefix() string { + if !p.IsSetClearPrefix() { + return ConfigurationUpdateAppEnvRequest_ClearPrefix_DEFAULT + } + return *p.ClearPrefix +} +func (p *ConfigurationUpdateAppEnvRequest) IsSetKeys() bool { + return p.Keys != nil +} + +func (p *ConfigurationUpdateAppEnvRequest) IsSetValues() bool { + return p.Values != nil +} + +func (p *ConfigurationUpdateAppEnvRequest) IsSetClearPrefix() bool { + return p.ClearPrefix != nil +} + +func (p *ConfigurationUpdateAppEnvRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.LIST { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationUpdateAppEnvRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ConfigurationUpdateAppEnvRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + temp := AppEnvOperation(v) + p.Op = temp + } + return nil +} + +func (p *ConfigurationUpdateAppEnvRequest) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.Keys = tSlice + for i := 0; i < size; i++ { + var _elem11 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem11 = v + } + p.Keys = append(p.Keys, _elem11) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationUpdateAppEnvRequest) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.Values = tSlice + for i := 0; i < size; i++ { + var _elem12 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem12 = v + } + p.Values = append(p.Values, _elem12) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationUpdateAppEnvRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.ClearPrefix = &v + } + return nil +} + +func (p *ConfigurationUpdateAppEnvRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_update_app_env_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationUpdateAppEnvRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateAppEnvRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("op", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:op: ", p), err) + } + if err := oprot.WriteI32(int32(p.Op)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.op (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:op: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateAppEnvRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetKeys() { + if err := oprot.WriteFieldBegin("keys", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:keys: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.Keys)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Keys { + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:keys: ", p), err) + } + } + return err +} + +func (p *ConfigurationUpdateAppEnvRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetValues() { + if err := oprot.WriteFieldBegin("values", thrift.LIST, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:values: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.Values)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Values { + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:values: ", p), err) + } + } + return err +} + +func (p *ConfigurationUpdateAppEnvRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetClearPrefix() { + if err := oprot.WriteFieldBegin("clear_prefix", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:clear_prefix: ", p), err) + } + if err := oprot.WriteString(string(*p.ClearPrefix)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.clear_prefix (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:clear_prefix: ", p), err) + } + } + return err +} + +func (p *ConfigurationUpdateAppEnvRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationUpdateAppEnvRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMessage +type ConfigurationUpdateAppEnvResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMessage string `thrift:"hint_message,2" db:"hint_message" json:"hint_message"` +} + +func NewConfigurationUpdateAppEnvResponse() *ConfigurationUpdateAppEnvResponse { + return &ConfigurationUpdateAppEnvResponse{} +} + +var ConfigurationUpdateAppEnvResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationUpdateAppEnvResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationUpdateAppEnvResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationUpdateAppEnvResponse) GetHintMessage() string { + return p.HintMessage +} +func (p *ConfigurationUpdateAppEnvResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationUpdateAppEnvResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationUpdateAppEnvResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationUpdateAppEnvResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *ConfigurationUpdateAppEnvResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_update_app_env_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationUpdateAppEnvResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateAppEnvResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_message: ", p), err) + } + return err +} + +func (p *ConfigurationUpdateAppEnvResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationUpdateAppEnvResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - TriggerTime +// - TargetLevel +// - Bottommost +// - MaxRunningCount +type StartAppManualCompactRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + TriggerTime *int64 `thrift:"trigger_time,2" db:"trigger_time" json:"trigger_time,omitempty"` + TargetLevel *int32 `thrift:"target_level,3" db:"target_level" json:"target_level,omitempty"` + Bottommost *bool `thrift:"bottommost,4" db:"bottommost" json:"bottommost,omitempty"` + MaxRunningCount *int32 `thrift:"max_running_count,5" db:"max_running_count" json:"max_running_count,omitempty"` +} + +func NewStartAppManualCompactRequest() *StartAppManualCompactRequest { + return &StartAppManualCompactRequest{} +} + +func (p *StartAppManualCompactRequest) GetAppName() string { + return p.AppName +} + +var StartAppManualCompactRequest_TriggerTime_DEFAULT int64 + +func (p *StartAppManualCompactRequest) GetTriggerTime() int64 { + if !p.IsSetTriggerTime() { + return StartAppManualCompactRequest_TriggerTime_DEFAULT + } + return *p.TriggerTime +} + +var StartAppManualCompactRequest_TargetLevel_DEFAULT int32 + +func (p *StartAppManualCompactRequest) GetTargetLevel() int32 { + if !p.IsSetTargetLevel() { + return StartAppManualCompactRequest_TargetLevel_DEFAULT + } + return *p.TargetLevel +} + +var StartAppManualCompactRequest_Bottommost_DEFAULT bool + +func (p *StartAppManualCompactRequest) GetBottommost() bool { + if !p.IsSetBottommost() { + return StartAppManualCompactRequest_Bottommost_DEFAULT + } + return *p.Bottommost +} + +var StartAppManualCompactRequest_MaxRunningCount_DEFAULT int32 + +func (p *StartAppManualCompactRequest) GetMaxRunningCount() int32 { + if !p.IsSetMaxRunningCount() { + return StartAppManualCompactRequest_MaxRunningCount_DEFAULT + } + return *p.MaxRunningCount +} +func (p *StartAppManualCompactRequest) IsSetTriggerTime() bool { + return p.TriggerTime != nil +} + +func (p *StartAppManualCompactRequest) IsSetTargetLevel() bool { + return p.TargetLevel != nil +} + +func (p *StartAppManualCompactRequest) IsSetBottommost() bool { + return p.Bottommost != nil +} + +func (p *StartAppManualCompactRequest) IsSetMaxRunningCount() bool { + return p.MaxRunningCount != nil +} + +func (p *StartAppManualCompactRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *StartAppManualCompactRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *StartAppManualCompactRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.TriggerTime = &v + } + return nil +} + +func (p *StartAppManualCompactRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.TargetLevel = &v + } + return nil +} + +func (p *StartAppManualCompactRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Bottommost = &v + } + return nil +} + +func (p *StartAppManualCompactRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.MaxRunningCount = &v + } + return nil +} + +func (p *StartAppManualCompactRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_app_manual_compact_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *StartAppManualCompactRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *StartAppManualCompactRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTriggerTime() { + if err := oprot.WriteFieldBegin("trigger_time", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:trigger_time: ", p), err) + } + if err := oprot.WriteI64(int64(*p.TriggerTime)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trigger_time (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:trigger_time: ", p), err) + } + } + return err +} + +func (p *StartAppManualCompactRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTargetLevel() { + if err := oprot.WriteFieldBegin("target_level", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:target_level: ", p), err) + } + if err := oprot.WriteI32(int32(*p.TargetLevel)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.target_level (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:target_level: ", p), err) + } + } + return err +} + +func (p *StartAppManualCompactRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetBottommost() { + if err := oprot.WriteFieldBegin("bottommost", thrift.BOOL, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:bottommost: ", p), err) + } + if err := oprot.WriteBool(bool(*p.Bottommost)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.bottommost (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:bottommost: ", p), err) + } + } + return err +} + +func (p *StartAppManualCompactRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetMaxRunningCount() { + if err := oprot.WriteFieldBegin("max_running_count", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:max_running_count: ", p), err) + } + if err := oprot.WriteI32(int32(*p.MaxRunningCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_running_count (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:max_running_count: ", p), err) + } + } + return err +} + +func (p *StartAppManualCompactRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("StartAppManualCompactRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMsg +type StartAppManualCompactResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMsg string `thrift:"hint_msg,2" db:"hint_msg" json:"hint_msg"` +} + +func NewStartAppManualCompactResponse() *StartAppManualCompactResponse { + return &StartAppManualCompactResponse{} +} + +var StartAppManualCompactResponse_Err_DEFAULT *base.ErrorCode + +func (p *StartAppManualCompactResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return StartAppManualCompactResponse_Err_DEFAULT + } + return p.Err +} + +func (p *StartAppManualCompactResponse) GetHintMsg() string { + return p.HintMsg +} +func (p *StartAppManualCompactResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *StartAppManualCompactResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *StartAppManualCompactResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *StartAppManualCompactResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMsg = v + } + return nil +} + +func (p *StartAppManualCompactResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_app_manual_compact_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *StartAppManualCompactResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *StartAppManualCompactResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_msg: ", p), err) + } + return err +} + +func (p *StartAppManualCompactResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("StartAppManualCompactResponse(%+v)", *p) +} + +// Attributes: +// - AppName +type QueryAppManualCompactRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` +} + +func NewQueryAppManualCompactRequest() *QueryAppManualCompactRequest { + return &QueryAppManualCompactRequest{} +} + +func (p *QueryAppManualCompactRequest) GetAppName() string { + return p.AppName +} +func (p *QueryAppManualCompactRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryAppManualCompactRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *QueryAppManualCompactRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_app_manual_compact_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryAppManualCompactRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *QueryAppManualCompactRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryAppManualCompactRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMsg +// - Progress +type QueryAppManualCompactResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMsg string `thrift:"hint_msg,2" db:"hint_msg" json:"hint_msg"` + Progress *int32 `thrift:"progress,3" db:"progress" json:"progress,omitempty"` +} + +func NewQueryAppManualCompactResponse() *QueryAppManualCompactResponse { + return &QueryAppManualCompactResponse{} +} + +var QueryAppManualCompactResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryAppManualCompactResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryAppManualCompactResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QueryAppManualCompactResponse) GetHintMsg() string { + return p.HintMsg +} + +var QueryAppManualCompactResponse_Progress_DEFAULT int32 + +func (p *QueryAppManualCompactResponse) GetProgress() int32 { + if !p.IsSetProgress() { + return QueryAppManualCompactResponse_Progress_DEFAULT + } + return *p.Progress +} +func (p *QueryAppManualCompactResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryAppManualCompactResponse) IsSetProgress() bool { + return p.Progress != nil +} + +func (p *QueryAppManualCompactResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryAppManualCompactResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryAppManualCompactResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMsg = v + } + return nil +} + +func (p *QueryAppManualCompactResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Progress = &v + } + return nil +} + +func (p *QueryAppManualCompactResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_app_manual_compact_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryAppManualCompactResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryAppManualCompactResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_msg: ", p), err) + } + return err +} + +func (p *QueryAppManualCompactResponse) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetProgress() { + if err := oprot.WriteFieldBegin("progress", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:progress: ", p), err) + } + if err := oprot.WriteI32(int32(*p.Progress)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.progress (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:progress: ", p), err) + } + } + return err +} + +func (p *QueryAppManualCompactResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryAppManualCompactResponse(%+v)", *p) +} + +// Attributes: +// - Status +// - Node +// - HpNode +type NodeInfo struct { + Status NodeStatus `thrift:"status,1" db:"status" json:"status"` + Node *base.RPCAddress `thrift:"node,2" db:"node" json:"node"` + HpNode *base.HostPort `thrift:"hp_node,3" db:"hp_node" json:"hp_node,omitempty"` +} + +func NewNodeInfo() *NodeInfo { + return &NodeInfo{ + Status: 0, + } +} + +func (p *NodeInfo) GetStatus() NodeStatus { + return p.Status +} + +var NodeInfo_Node_DEFAULT *base.RPCAddress + +func (p *NodeInfo) GetNode() *base.RPCAddress { + if !p.IsSetNode() { + return NodeInfo_Node_DEFAULT + } + return p.Node +} + +var NodeInfo_HpNode_DEFAULT *base.HostPort + +func (p *NodeInfo) GetHpNode() *base.HostPort { + if !p.IsSetHpNode() { + return NodeInfo_HpNode_DEFAULT + } + return p.HpNode +} +func (p *NodeInfo) IsSetNode() bool { + return p.Node != nil +} + +func (p *NodeInfo) IsSetHpNode() bool { + return p.HpNode != nil +} + +func (p *NodeInfo) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *NodeInfo) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := NodeStatus(v) + p.Status = temp + } + return nil +} + +func (p *NodeInfo) ReadField2(iprot thrift.TProtocol) error { + p.Node = &base.RPCAddress{} + if err := p.Node.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node), err) + } + return nil +} + +func (p *NodeInfo) ReadField3(iprot thrift.TProtocol) error { + p.HpNode = &base.HostPort{} + if err := p.HpNode.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode), err) + } + return nil +} + +func (p *NodeInfo) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("node_info"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *NodeInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) + } + if err := oprot.WriteI32(int32(p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) + } + return err +} + +func (p *NodeInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:node: ", p), err) + } + if err := p.Node.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:node: ", p), err) + } + return err +} + +func (p *NodeInfo) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetHpNode() { + if err := oprot.WriteFieldBegin("hp_node", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:hp_node: ", p), err) + } + if err := p.HpNode.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:hp_node: ", p), err) + } + } + return err +} + +func (p *NodeInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("NodeInfo(%+v)", *p) +} + +// Attributes: +// - Status +type ConfigurationListNodesRequest struct { + Status NodeStatus `thrift:"status,1" db:"status" json:"status"` +} + +func NewConfigurationListNodesRequest() *ConfigurationListNodesRequest { + return &ConfigurationListNodesRequest{ + Status: 0, + } +} + +func (p *ConfigurationListNodesRequest) GetStatus() NodeStatus { + return p.Status +} +func (p *ConfigurationListNodesRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationListNodesRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := NodeStatus(v) + p.Status = temp + } + return nil +} + +func (p *ConfigurationListNodesRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_list_nodes_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationListNodesRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) + } + if err := oprot.WriteI32(int32(p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) + } + return err +} + +func (p *ConfigurationListNodesRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationListNodesRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Infos +type ConfigurationListNodesResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Infos []*NodeInfo `thrift:"infos,2" db:"infos" json:"infos"` +} + +func NewConfigurationListNodesResponse() *ConfigurationListNodesResponse { + return &ConfigurationListNodesResponse{} +} + +var ConfigurationListNodesResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationListNodesResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationListNodesResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationListNodesResponse) GetInfos() []*NodeInfo { + return p.Infos +} +func (p *ConfigurationListNodesResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationListNodesResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationListNodesResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationListNodesResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*NodeInfo, 0, size) + p.Infos = tSlice + for i := 0; i < size; i++ { + _elem13 := &NodeInfo{ + Status: 0, + } + if err := _elem13.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem13), err) + } + p.Infos = append(p.Infos, _elem13) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationListNodesResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_list_nodes_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationListNodesResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationListNodesResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("infos", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:infos: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Infos)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Infos { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:infos: ", p), err) + } + return err +} + +func (p *ConfigurationListNodesResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationListNodesResponse(%+v)", *p) +} + +type ConfigurationClusterInfoRequest struct { +} + +func NewConfigurationClusterInfoRequest() *ConfigurationClusterInfoRequest { + return &ConfigurationClusterInfoRequest{} +} + +func (p *ConfigurationClusterInfoRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationClusterInfoRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_cluster_info_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationClusterInfoRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationClusterInfoRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Keys +// - Values +type ConfigurationClusterInfoResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Keys []string `thrift:"keys,2" db:"keys" json:"keys"` + Values []string `thrift:"values,3" db:"values" json:"values"` +} + +func NewConfigurationClusterInfoResponse() *ConfigurationClusterInfoResponse { + return &ConfigurationClusterInfoResponse{} +} + +var ConfigurationClusterInfoResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationClusterInfoResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationClusterInfoResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationClusterInfoResponse) GetKeys() []string { + return p.Keys +} + +func (p *ConfigurationClusterInfoResponse) GetValues() []string { + return p.Values +} +func (p *ConfigurationClusterInfoResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationClusterInfoResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.LIST { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationClusterInfoResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationClusterInfoResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.Keys = tSlice + for i := 0; i < size; i++ { + var _elem14 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem14 = v + } + p.Keys = append(p.Keys, _elem14) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationClusterInfoResponse) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.Values = tSlice + for i := 0; i < size; i++ { + var _elem15 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem15 = v + } + p.Values = append(p.Values, _elem15) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationClusterInfoResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_cluster_info_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationClusterInfoResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationClusterInfoResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("keys", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:keys: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.Keys)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Keys { + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:keys: ", p), err) + } + return err +} + +func (p *ConfigurationClusterInfoResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("values", thrift.LIST, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:values: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.Values)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Values { + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:values: ", p), err) + } + return err +} + +func (p *ConfigurationClusterInfoResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationClusterInfoResponse(%+v)", *p) +} + +// Attributes: +// - Level +type ConfigurationMetaControlRequest struct { + Level MetaFunctionLevel `thrift:"level,1" db:"level" json:"level"` +} + +func NewConfigurationMetaControlRequest() *ConfigurationMetaControlRequest { + return &ConfigurationMetaControlRequest{} +} + +func (p *ConfigurationMetaControlRequest) GetLevel() MetaFunctionLevel { + return p.Level +} +func (p *ConfigurationMetaControlRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationMetaControlRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := MetaFunctionLevel(v) + p.Level = temp + } + return nil +} + +func (p *ConfigurationMetaControlRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_meta_control_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationMetaControlRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("level", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:level: ", p), err) + } + if err := oprot.WriteI32(int32(p.Level)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.level (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:level: ", p), err) + } + return err +} + +func (p *ConfigurationMetaControlRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationMetaControlRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - OldLevel +type ConfigurationMetaControlResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + OldLevel MetaFunctionLevel `thrift:"old_level,2" db:"old_level" json:"old_level"` +} + +func NewConfigurationMetaControlResponse() *ConfigurationMetaControlResponse { + return &ConfigurationMetaControlResponse{} +} + +var ConfigurationMetaControlResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationMetaControlResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationMetaControlResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationMetaControlResponse) GetOldLevel() MetaFunctionLevel { + return p.OldLevel +} +func (p *ConfigurationMetaControlResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationMetaControlResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationMetaControlResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationMetaControlResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + temp := MetaFunctionLevel(v) + p.OldLevel = temp + } + return nil +} + +func (p *ConfigurationMetaControlResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_meta_control_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationMetaControlResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationMetaControlResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("old_level", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:old_level: ", p), err) + } + if err := oprot.WriteI32(int32(p.OldLevel)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.old_level (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:old_level: ", p), err) + } + return err +} + +func (p *ConfigurationMetaControlResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationMetaControlResponse(%+v)", *p) +} + +// Attributes: +// - Target +// - Node +// - Type +// - HpTarget +// - HpNode +type ConfigurationProposalAction struct { + Target *base.RPCAddress `thrift:"target,1" db:"target" json:"target"` + Node *base.RPCAddress `thrift:"node,2" db:"node" json:"node"` + Type ConfigType `thrift:"type,3" db:"type" json:"type"` + // unused field # 4 + HpTarget *base.HostPort `thrift:"hp_target,5" db:"hp_target" json:"hp_target,omitempty"` + HpNode *base.HostPort `thrift:"hp_node,6" db:"hp_node" json:"hp_node,omitempty"` +} + +func NewConfigurationProposalAction() *ConfigurationProposalAction { + return &ConfigurationProposalAction{} +} + +var ConfigurationProposalAction_Target_DEFAULT *base.RPCAddress + +func (p *ConfigurationProposalAction) GetTarget() *base.RPCAddress { + if !p.IsSetTarget() { + return ConfigurationProposalAction_Target_DEFAULT + } + return p.Target +} + +var ConfigurationProposalAction_Node_DEFAULT *base.RPCAddress + +func (p *ConfigurationProposalAction) GetNode() *base.RPCAddress { + if !p.IsSetNode() { + return ConfigurationProposalAction_Node_DEFAULT + } + return p.Node +} + +func (p *ConfigurationProposalAction) GetType() ConfigType { + return p.Type +} + +var ConfigurationProposalAction_HpTarget_DEFAULT *base.HostPort + +func (p *ConfigurationProposalAction) GetHpTarget() *base.HostPort { + if !p.IsSetHpTarget() { + return ConfigurationProposalAction_HpTarget_DEFAULT + } + return p.HpTarget +} + +var ConfigurationProposalAction_HpNode_DEFAULT *base.HostPort + +func (p *ConfigurationProposalAction) GetHpNode() *base.HostPort { + if !p.IsSetHpNode() { + return ConfigurationProposalAction_HpNode_DEFAULT + } + return p.HpNode +} +func (p *ConfigurationProposalAction) IsSetTarget() bool { + return p.Target != nil +} + +func (p *ConfigurationProposalAction) IsSetNode() bool { + return p.Node != nil +} + +func (p *ConfigurationProposalAction) IsSetHpTarget() bool { + return p.HpTarget != nil +} + +func (p *ConfigurationProposalAction) IsSetHpNode() bool { + return p.HpNode != nil +} + +func (p *ConfigurationProposalAction) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationProposalAction) ReadField1(iprot thrift.TProtocol) error { + p.Target = &base.RPCAddress{} + if err := p.Target.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Target), err) + } + return nil +} + +func (p *ConfigurationProposalAction) ReadField2(iprot thrift.TProtocol) error { + p.Node = &base.RPCAddress{} + if err := p.Node.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node), err) + } + return nil +} + +func (p *ConfigurationProposalAction) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := ConfigType(v) + p.Type = temp + } + return nil +} + +func (p *ConfigurationProposalAction) ReadField5(iprot thrift.TProtocol) error { + p.HpTarget = &base.HostPort{} + if err := p.HpTarget.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpTarget), err) + } + return nil +} + +func (p *ConfigurationProposalAction) ReadField6(iprot thrift.TProtocol) error { + p.HpNode = &base.HostPort{} + if err := p.HpNode.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode), err) + } + return nil +} + +func (p *ConfigurationProposalAction) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_proposal_action"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationProposalAction) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("target", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:target: ", p), err) + } + if err := p.Target.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Target), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:target: ", p), err) + } + return err +} + +func (p *ConfigurationProposalAction) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:node: ", p), err) + } + if err := p.Node.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:node: ", p), err) + } + return err +} + +func (p *ConfigurationProposalAction) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:type: ", p), err) + } + if err := oprot.WriteI32(int32(p.Type)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:type: ", p), err) + } + return err +} + +func (p *ConfigurationProposalAction) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetHpTarget() { + if err := oprot.WriteFieldBegin("hp_target", thrift.STRUCT, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:hp_target: ", p), err) + } + if err := p.HpTarget.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpTarget), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:hp_target: ", p), err) + } + } + return err +} + +func (p *ConfigurationProposalAction) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetHpNode() { + if err := oprot.WriteFieldBegin("hp_node", thrift.STRUCT, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:hp_node: ", p), err) + } + if err := p.HpNode.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:hp_node: ", p), err) + } + } + return err +} + +func (p *ConfigurationProposalAction) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationProposalAction(%+v)", *p) +} + +// Attributes: +// - Gpid +// - ActionList +// - Force +// - BalanceType +type ConfigurationBalancerRequest struct { + Gpid *base.Gpid `thrift:"gpid,1" db:"gpid" json:"gpid"` + ActionList []*ConfigurationProposalAction `thrift:"action_list,2" db:"action_list" json:"action_list"` + Force bool `thrift:"force,3" db:"force" json:"force"` + BalanceType *BalancerRequestType `thrift:"balance_type,4" db:"balance_type" json:"balance_type,omitempty"` +} + +func NewConfigurationBalancerRequest() *ConfigurationBalancerRequest { + return &ConfigurationBalancerRequest{} +} + +var ConfigurationBalancerRequest_Gpid_DEFAULT *base.Gpid + +func (p *ConfigurationBalancerRequest) GetGpid() *base.Gpid { + if !p.IsSetGpid() { + return ConfigurationBalancerRequest_Gpid_DEFAULT + } + return p.Gpid +} + +func (p *ConfigurationBalancerRequest) GetActionList() []*ConfigurationProposalAction { + return p.ActionList +} + +var ConfigurationBalancerRequest_Force_DEFAULT bool = false + +func (p *ConfigurationBalancerRequest) GetForce() bool { + return p.Force +} + +var ConfigurationBalancerRequest_BalanceType_DEFAULT BalancerRequestType + +func (p *ConfigurationBalancerRequest) GetBalanceType() BalancerRequestType { + if !p.IsSetBalanceType() { + return ConfigurationBalancerRequest_BalanceType_DEFAULT + } + return *p.BalanceType +} +func (p *ConfigurationBalancerRequest) IsSetGpid() bool { + return p.Gpid != nil +} + +func (p *ConfigurationBalancerRequest) IsSetForce() bool { + return p.Force != ConfigurationBalancerRequest_Force_DEFAULT +} + +func (p *ConfigurationBalancerRequest) IsSetBalanceType() bool { + return p.BalanceType != nil +} + +func (p *ConfigurationBalancerRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationBalancerRequest) ReadField1(iprot thrift.TProtocol) error { + p.Gpid = &base.Gpid{} + if err := p.Gpid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Gpid), err) + } + return nil +} + +func (p *ConfigurationBalancerRequest) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*ConfigurationProposalAction, 0, size) + p.ActionList = tSlice + for i := 0; i < size; i++ { + _elem16 := &ConfigurationProposalAction{} + if err := _elem16.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem16), err) + } + p.ActionList = append(p.ActionList, _elem16) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ConfigurationBalancerRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Force = v + } + return nil +} + +func (p *ConfigurationBalancerRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + temp := BalancerRequestType(v) + p.BalanceType = &temp + } + return nil +} + +func (p *ConfigurationBalancerRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_balancer_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationBalancerRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("gpid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:gpid: ", p), err) + } + if err := p.Gpid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Gpid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:gpid: ", p), err) + } + return err +} + +func (p *ConfigurationBalancerRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("action_list", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:action_list: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ActionList)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.ActionList { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:action_list: ", p), err) + } + return err +} + +func (p *ConfigurationBalancerRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetForce() { + if err := oprot.WriteFieldBegin("force", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:force: ", p), err) + } + if err := oprot.WriteBool(bool(p.Force)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.force (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:force: ", p), err) + } + } + return err +} + +func (p *ConfigurationBalancerRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetBalanceType() { + if err := oprot.WriteFieldBegin("balance_type", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:balance_type: ", p), err) + } + if err := oprot.WriteI32(int32(*p.BalanceType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.balance_type (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:balance_type: ", p), err) + } + } + return err +} + +func (p *ConfigurationBalancerRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationBalancerRequest(%+v)", *p) +} + +// Attributes: +// - Err +type ConfigurationBalancerResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` +} + +func NewConfigurationBalancerResponse() *ConfigurationBalancerResponse { + return &ConfigurationBalancerResponse{} +} + +var ConfigurationBalancerResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationBalancerResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationBalancerResponse_Err_DEFAULT + } + return p.Err +} +func (p *ConfigurationBalancerResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationBalancerResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationBalancerResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationBalancerResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_balancer_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationBalancerResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationBalancerResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationBalancerResponse(%+v)", *p) +} + +// Attributes: +// - Pid +type DddDiagnoseRequest struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` +} + +func NewDddDiagnoseRequest() *DddDiagnoseRequest { + return &DddDiagnoseRequest{} +} + +var DddDiagnoseRequest_Pid_DEFAULT *base.Gpid + +func (p *DddDiagnoseRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return DddDiagnoseRequest_Pid_DEFAULT + } + return p.Pid +} +func (p *DddDiagnoseRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *DddDiagnoseRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DddDiagnoseRequest) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *DddDiagnoseRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ddd_diagnose_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DddDiagnoseRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *DddDiagnoseRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DddDiagnoseRequest(%+v)", *p) +} + +// Attributes: +// - Node +// - DropTimeMs +// - IsAlive +// - IsCollected +// - Ballot +// - LastCommittedDecree +// - LastPreparedDecree +// - HpNode +type DddNodeInfo struct { + Node *base.RPCAddress `thrift:"node,1" db:"node" json:"node"` + DropTimeMs int64 `thrift:"drop_time_ms,2" db:"drop_time_ms" json:"drop_time_ms"` + IsAlive bool `thrift:"is_alive,3" db:"is_alive" json:"is_alive"` + IsCollected bool `thrift:"is_collected,4" db:"is_collected" json:"is_collected"` + Ballot int64 `thrift:"ballot,5" db:"ballot" json:"ballot"` + LastCommittedDecree int64 `thrift:"last_committed_decree,6" db:"last_committed_decree" json:"last_committed_decree"` + LastPreparedDecree int64 `thrift:"last_prepared_decree,7" db:"last_prepared_decree" json:"last_prepared_decree"` + HpNode *base.HostPort `thrift:"hp_node,8" db:"hp_node" json:"hp_node,omitempty"` +} + +func NewDddNodeInfo() *DddNodeInfo { + return &DddNodeInfo{} +} + +var DddNodeInfo_Node_DEFAULT *base.RPCAddress + +func (p *DddNodeInfo) GetNode() *base.RPCAddress { + if !p.IsSetNode() { + return DddNodeInfo_Node_DEFAULT + } + return p.Node +} + +func (p *DddNodeInfo) GetDropTimeMs() int64 { + return p.DropTimeMs +} + +func (p *DddNodeInfo) GetIsAlive() bool { + return p.IsAlive +} + +func (p *DddNodeInfo) GetIsCollected() bool { + return p.IsCollected +} + +func (p *DddNodeInfo) GetBallot() int64 { + return p.Ballot +} + +func (p *DddNodeInfo) GetLastCommittedDecree() int64 { + return p.LastCommittedDecree +} + +func (p *DddNodeInfo) GetLastPreparedDecree() int64 { + return p.LastPreparedDecree +} + +var DddNodeInfo_HpNode_DEFAULT *base.HostPort + +func (p *DddNodeInfo) GetHpNode() *base.HostPort { + if !p.IsSetHpNode() { + return DddNodeInfo_HpNode_DEFAULT + } + return p.HpNode +} +func (p *DddNodeInfo) IsSetNode() bool { + return p.Node != nil +} + +func (p *DddNodeInfo) IsSetHpNode() bool { + return p.HpNode != nil +} + +func (p *DddNodeInfo) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I64 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DddNodeInfo) ReadField1(iprot thrift.TProtocol) error { + p.Node = &base.RPCAddress{} + if err := p.Node.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node), err) + } + return nil +} + +func (p *DddNodeInfo) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.DropTimeMs = v + } + return nil +} + +func (p *DddNodeInfo) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.IsAlive = v + } + return nil +} + +func (p *DddNodeInfo) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.IsCollected = v + } + return nil +} + +func (p *DddNodeInfo) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.Ballot = v + } + return nil +} + +func (p *DddNodeInfo) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.LastCommittedDecree = v + } + return nil +} + +func (p *DddNodeInfo) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.LastPreparedDecree = v + } + return nil +} + +func (p *DddNodeInfo) ReadField8(iprot thrift.TProtocol) error { + p.HpNode = &base.HostPort{} + if err := p.HpNode.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode), err) + } + return nil +} + +func (p *DddNodeInfo) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ddd_node_info"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DddNodeInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:node: ", p), err) + } + if err := p.Node.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:node: ", p), err) + } + return err +} + +func (p *DddNodeInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("drop_time_ms", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:drop_time_ms: ", p), err) + } + if err := oprot.WriteI64(int64(p.DropTimeMs)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.drop_time_ms (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:drop_time_ms: ", p), err) + } + return err +} + +func (p *DddNodeInfo) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("is_alive", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:is_alive: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsAlive)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_alive (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:is_alive: ", p), err) + } + return err +} + +func (p *DddNodeInfo) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("is_collected", thrift.BOOL, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:is_collected: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsCollected)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_collected (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:is_collected: ", p), err) + } + return err +} + +func (p *DddNodeInfo) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ballot", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:ballot: ", p), err) + } + if err := oprot.WriteI64(int64(p.Ballot)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ballot (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:ballot: ", p), err) + } + return err +} + +func (p *DddNodeInfo) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("last_committed_decree", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:last_committed_decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.LastCommittedDecree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.last_committed_decree (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:last_committed_decree: ", p), err) + } + return err +} + +func (p *DddNodeInfo) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("last_prepared_decree", thrift.I64, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:last_prepared_decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.LastPreparedDecree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.last_prepared_decree (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:last_prepared_decree: ", p), err) + } + return err +} + +func (p *DddNodeInfo) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetHpNode() { + if err := oprot.WriteFieldBegin("hp_node", thrift.STRUCT, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:hp_node: ", p), err) + } + if err := p.HpNode.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:hp_node: ", p), err) + } + } + return err +} + +func (p *DddNodeInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DddNodeInfo(%+v)", *p) +} + +// Attributes: +// - Config +// - Dropped +// - Reason +type DddPartitionInfo struct { + Config *replication.PartitionConfiguration `thrift:"config,1" db:"config" json:"config"` + Dropped []*DddNodeInfo `thrift:"dropped,2" db:"dropped" json:"dropped"` + Reason string `thrift:"reason,3" db:"reason" json:"reason"` +} + +func NewDddPartitionInfo() *DddPartitionInfo { + return &DddPartitionInfo{} +} + +var DddPartitionInfo_Config_DEFAULT *replication.PartitionConfiguration + +func (p *DddPartitionInfo) GetConfig() *replication.PartitionConfiguration { + if !p.IsSetConfig() { + return DddPartitionInfo_Config_DEFAULT + } + return p.Config +} + +func (p *DddPartitionInfo) GetDropped() []*DddNodeInfo { + return p.Dropped +} + +func (p *DddPartitionInfo) GetReason() string { + return p.Reason +} +func (p *DddPartitionInfo) IsSetConfig() bool { + return p.Config != nil +} + +func (p *DddPartitionInfo) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DddPartitionInfo) ReadField1(iprot thrift.TProtocol) error { + p.Config = &replication.PartitionConfiguration{} + if err := p.Config.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Config), err) + } + return nil +} + +func (p *DddPartitionInfo) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*DddNodeInfo, 0, size) + p.Dropped = tSlice + for i := 0; i < size; i++ { + _elem17 := &DddNodeInfo{} + if err := _elem17.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err) + } + p.Dropped = append(p.Dropped, _elem17) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *DddPartitionInfo) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Reason = v + } + return nil +} + +func (p *DddPartitionInfo) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ddd_partition_info"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DddPartitionInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("config", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:config: ", p), err) + } + if err := p.Config.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Config), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:config: ", p), err) + } + return err +} + +func (p *DddPartitionInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("dropped", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:dropped: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Dropped)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Dropped { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:dropped: ", p), err) + } + return err +} + +func (p *DddPartitionInfo) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("reason", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:reason: ", p), err) + } + if err := oprot.WriteString(string(p.Reason)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.reason (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:reason: ", p), err) + } + return err +} + +func (p *DddPartitionInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DddPartitionInfo(%+v)", *p) +} + +// Attributes: +// - Err +// - Partitions +type DddDiagnoseResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Partitions []*DddPartitionInfo `thrift:"partitions,2" db:"partitions" json:"partitions"` +} + +func NewDddDiagnoseResponse() *DddDiagnoseResponse { + return &DddDiagnoseResponse{} +} + +var DddDiagnoseResponse_Err_DEFAULT *base.ErrorCode + +func (p *DddDiagnoseResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return DddDiagnoseResponse_Err_DEFAULT + } + return p.Err +} + +func (p *DddDiagnoseResponse) GetPartitions() []*DddPartitionInfo { + return p.Partitions +} +func (p *DddDiagnoseResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *DddDiagnoseResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DddDiagnoseResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *DddDiagnoseResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*DddPartitionInfo, 0, size) + p.Partitions = tSlice + for i := 0; i < size; i++ { + _elem18 := &DddPartitionInfo{} + if err := _elem18.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem18), err) + } + p.Partitions = append(p.Partitions, _elem18) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *DddDiagnoseResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ddd_diagnose_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DddDiagnoseResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *DddDiagnoseResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partitions", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:partitions: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Partitions)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Partitions { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:partitions: ", p), err) + } + return err +} + +func (p *DddDiagnoseResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DddDiagnoseResponse(%+v)", *p) +} + +// Attributes: +// - AppName +type ConfigurationGetMaxReplicaCountRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` +} + +func NewConfigurationGetMaxReplicaCountRequest() *ConfigurationGetMaxReplicaCountRequest { + return &ConfigurationGetMaxReplicaCountRequest{} +} + +func (p *ConfigurationGetMaxReplicaCountRequest) GetAppName() string { + return p.AppName +} +func (p *ConfigurationGetMaxReplicaCountRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationGetMaxReplicaCountRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ConfigurationGetMaxReplicaCountRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_get_max_replica_count_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationGetMaxReplicaCountRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *ConfigurationGetMaxReplicaCountRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationGetMaxReplicaCountRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - MaxReplicaCount +// - HintMessage +type ConfigurationGetMaxReplicaCountResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + MaxReplicaCount int32 `thrift:"max_replica_count,2" db:"max_replica_count" json:"max_replica_count"` + HintMessage string `thrift:"hint_message,3" db:"hint_message" json:"hint_message"` +} + +func NewConfigurationGetMaxReplicaCountResponse() *ConfigurationGetMaxReplicaCountResponse { + return &ConfigurationGetMaxReplicaCountResponse{} +} + +var ConfigurationGetMaxReplicaCountResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationGetMaxReplicaCountResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationGetMaxReplicaCountResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationGetMaxReplicaCountResponse) GetMaxReplicaCount() int32 { + return p.MaxReplicaCount +} + +func (p *ConfigurationGetMaxReplicaCountResponse) GetHintMessage() string { + return p.HintMessage +} +func (p *ConfigurationGetMaxReplicaCountResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationGetMaxReplicaCountResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationGetMaxReplicaCountResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationGetMaxReplicaCountResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.MaxReplicaCount = v + } + return nil +} + +func (p *ConfigurationGetMaxReplicaCountResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *ConfigurationGetMaxReplicaCountResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_get_max_replica_count_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationGetMaxReplicaCountResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationGetMaxReplicaCountResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("max_replica_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:max_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.MaxReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_replica_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:max_replica_count: ", p), err) + } + return err +} + +func (p *ConfigurationGetMaxReplicaCountResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:hint_message: ", p), err) + } + return err +} + +func (p *ConfigurationGetMaxReplicaCountResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationGetMaxReplicaCountResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - MaxReplicaCount +type ConfigurationSetMaxReplicaCountRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + MaxReplicaCount int32 `thrift:"max_replica_count,2" db:"max_replica_count" json:"max_replica_count"` +} + +func NewConfigurationSetMaxReplicaCountRequest() *ConfigurationSetMaxReplicaCountRequest { + return &ConfigurationSetMaxReplicaCountRequest{} +} + +func (p *ConfigurationSetMaxReplicaCountRequest) GetAppName() string { + return p.AppName +} + +func (p *ConfigurationSetMaxReplicaCountRequest) GetMaxReplicaCount() int32 { + return p.MaxReplicaCount +} +func (p *ConfigurationSetMaxReplicaCountRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.MaxReplicaCount = v + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_set_max_replica_count_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *ConfigurationSetMaxReplicaCountRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("max_replica_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:max_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.MaxReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_replica_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:max_replica_count: ", p), err) + } + return err +} + +func (p *ConfigurationSetMaxReplicaCountRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationSetMaxReplicaCountRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - OldMaxReplicaCount +// - HintMessage +type ConfigurationSetMaxReplicaCountResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + OldMaxReplicaCount int32 `thrift:"old_max_replica_count,2" db:"old_max_replica_count" json:"old_max_replica_count"` + HintMessage string `thrift:"hint_message,3" db:"hint_message" json:"hint_message"` +} + +func NewConfigurationSetMaxReplicaCountResponse() *ConfigurationSetMaxReplicaCountResponse { + return &ConfigurationSetMaxReplicaCountResponse{} +} + +var ConfigurationSetMaxReplicaCountResponse_Err_DEFAULT *base.ErrorCode + +func (p *ConfigurationSetMaxReplicaCountResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ConfigurationSetMaxReplicaCountResponse_Err_DEFAULT + } + return p.Err +} + +func (p *ConfigurationSetMaxReplicaCountResponse) GetOldMaxReplicaCount() int32 { + return p.OldMaxReplicaCount +} + +func (p *ConfigurationSetMaxReplicaCountResponse) GetHintMessage() string { + return p.HintMessage +} +func (p *ConfigurationSetMaxReplicaCountResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ConfigurationSetMaxReplicaCountResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.OldMaxReplicaCount = v + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.HintMessage = v + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("configuration_set_max_replica_count_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ConfigurationSetMaxReplicaCountResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ConfigurationSetMaxReplicaCountResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("old_max_replica_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:old_max_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.OldMaxReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.old_max_replica_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:old_max_replica_count: ", p), err) + } + return err +} + +func (p *ConfigurationSetMaxReplicaCountResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_message", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:hint_message: ", p), err) + } + if err := oprot.WriteString(string(p.HintMessage)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_message (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:hint_message: ", p), err) + } + return err +} + +func (p *ConfigurationSetMaxReplicaCountResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ConfigurationSetMaxReplicaCountResponse(%+v)", *p) +} + +type AdminClient interface { + // Parameters: + // - Req + CreateApp(ctx context.Context, req *ConfigurationCreateAppRequest) (r *ConfigurationCreateAppResponse, err error) + // Parameters: + // - Req + DropApp(ctx context.Context, req *ConfigurationDropAppRequest) (r *ConfigurationDropAppResponse, err error) + // Parameters: + // - Req + RecallApp(ctx context.Context, req *ConfigurationRecallAppRequest) (r *ConfigurationRecallAppResponse, err error) + // Parameters: + // - Req + ListApps(ctx context.Context, req *ConfigurationListAppsRequest) (r *ConfigurationListAppsResponse, err error) + // Parameters: + // - Req + AddDuplication(ctx context.Context, req *DuplicationAddRequest) (r *DuplicationAddResponse, err error) + // Parameters: + // - Req + QueryDuplication(ctx context.Context, req *DuplicationQueryRequest) (r *DuplicationQueryResponse, err error) + // Parameters: + // - Req + ModifyDuplication(ctx context.Context, req *DuplicationModifyRequest) (r *DuplicationModifyResponse, err error) + // Parameters: + // - Req + QueryAppInfo(ctx context.Context, req *QueryAppInfoRequest) (r *QueryAppInfoResponse, err error) + // Parameters: + // - Req + UpdateAppEnv(ctx context.Context, req *ConfigurationUpdateAppEnvRequest) (r *ConfigurationUpdateAppEnvResponse, err error) + // Parameters: + // - Req + ListNodes(ctx context.Context, req *ConfigurationListNodesRequest) (r *ConfigurationListNodesResponse, err error) + // Parameters: + // - Req + QueryClusterInfo(ctx context.Context, req *ConfigurationClusterInfoRequest) (r *ConfigurationClusterInfoResponse, err error) + // Parameters: + // - Req + MetaControl(ctx context.Context, req *ConfigurationMetaControlRequest) (r *ConfigurationMetaControlResponse, err error) + // Parameters: + // - Req + QueryBackupPolicy(ctx context.Context, req *ConfigurationQueryBackupPolicyRequest) (r *ConfigurationQueryBackupPolicyResponse, err error) + // Parameters: + // - Req + Balance(ctx context.Context, req *ConfigurationBalancerRequest) (r *ConfigurationBalancerResponse, err error) + // Parameters: + // - Req + StartBackupApp(ctx context.Context, req *StartBackupAppRequest) (r *StartBackupAppResponse, err error) + // Parameters: + // - Req + QueryBackupStatus(ctx context.Context, req *QueryBackupStatusRequest) (r *QueryBackupStatusResponse, err error) + // Parameters: + // - Req + RestoreApp(ctx context.Context, req *ConfigurationRestoreRequest) (r *ConfigurationCreateAppResponse, err error) + // Parameters: + // - Req + StartPartitionSplit(ctx context.Context, req *StartPartitionSplitRequest) (r *StartPartitionSplitResponse, err error) + // Parameters: + // - Req + QuerySplitStatus(ctx context.Context, req *QuerySplitRequest) (r *QuerySplitResponse, err error) + // Parameters: + // - Req + ControlPartitionSplit(ctx context.Context, req *ControlSplitRequest) (r *ControlSplitResponse, err error) + // Parameters: + // - Req + StartBulkLoad(ctx context.Context, req *StartBulkLoadRequest) (r *StartBulkLoadResponse, err error) + // Parameters: + // - Req + QueryBulkLoadStatus(ctx context.Context, req *QueryBulkLoadRequest) (r *QueryBulkLoadResponse, err error) + // Parameters: + // - Req + ControlBulkLoad(ctx context.Context, req *ControlBulkLoadRequest) (r *ControlBulkLoadResponse, err error) + // Parameters: + // - Req + ClearBulkLoad(ctx context.Context, req *ClearBulkLoadStateRequest) (r *ClearBulkLoadStateResponse, err error) + // Parameters: + // - Req + StartManualCompact(ctx context.Context, req *StartAppManualCompactRequest) (r *StartAppManualCompactResponse, err error) + // Parameters: + // - Req + QueryManualCompact(ctx context.Context, req *QueryAppManualCompactRequest) (r *QueryAppManualCompactResponse, err error) +} + +type AdminClientClient struct { + c thrift.TClient +} + +func NewAdminClientClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AdminClientClient { + return &AdminClientClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewAdminClientClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AdminClientClient { + return &AdminClientClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewAdminClientClient(c thrift.TClient) *AdminClientClient { + return &AdminClientClient{ + c: c, + } +} + +func (p *AdminClientClient) Client_() thrift.TClient { + return p.c +} + +// Parameters: +// - Req +func (p *AdminClientClient) CreateApp(ctx context.Context, req *ConfigurationCreateAppRequest) (r *ConfigurationCreateAppResponse, err error) { + var _args19 AdminClientCreateAppArgs + _args19.Req = req + var _result20 AdminClientCreateAppResult + if err = p.Client_().Call(ctx, "create_app", &_args19, &_result20); err != nil { + return + } + return _result20.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) DropApp(ctx context.Context, req *ConfigurationDropAppRequest) (r *ConfigurationDropAppResponse, err error) { + var _args21 AdminClientDropAppArgs + _args21.Req = req + var _result22 AdminClientDropAppResult + if err = p.Client_().Call(ctx, "drop_app", &_args21, &_result22); err != nil { + return + } + return _result22.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) RecallApp(ctx context.Context, req *ConfigurationRecallAppRequest) (r *ConfigurationRecallAppResponse, err error) { + var _args23 AdminClientRecallAppArgs + _args23.Req = req + var _result24 AdminClientRecallAppResult + if err = p.Client_().Call(ctx, "recall_app", &_args23, &_result24); err != nil { + return + } + return _result24.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) ListApps(ctx context.Context, req *ConfigurationListAppsRequest) (r *ConfigurationListAppsResponse, err error) { + var _args25 AdminClientListAppsArgs + _args25.Req = req + var _result26 AdminClientListAppsResult + if err = p.Client_().Call(ctx, "list_apps", &_args25, &_result26); err != nil { + return + } + return _result26.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) AddDuplication(ctx context.Context, req *DuplicationAddRequest) (r *DuplicationAddResponse, err error) { + var _args27 AdminClientAddDuplicationArgs + _args27.Req = req + var _result28 AdminClientAddDuplicationResult + if err = p.Client_().Call(ctx, "add_duplication", &_args27, &_result28); err != nil { + return + } + return _result28.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) QueryDuplication(ctx context.Context, req *DuplicationQueryRequest) (r *DuplicationQueryResponse, err error) { + var _args29 AdminClientQueryDuplicationArgs + _args29.Req = req + var _result30 AdminClientQueryDuplicationResult + if err = p.Client_().Call(ctx, "query_duplication", &_args29, &_result30); err != nil { + return + } + return _result30.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) ModifyDuplication(ctx context.Context, req *DuplicationModifyRequest) (r *DuplicationModifyResponse, err error) { + var _args31 AdminClientModifyDuplicationArgs + _args31.Req = req + var _result32 AdminClientModifyDuplicationResult + if err = p.Client_().Call(ctx, "modify_duplication", &_args31, &_result32); err != nil { + return + } + return _result32.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) QueryAppInfo(ctx context.Context, req *QueryAppInfoRequest) (r *QueryAppInfoResponse, err error) { + var _args33 AdminClientQueryAppInfoArgs + _args33.Req = req + var _result34 AdminClientQueryAppInfoResult + if err = p.Client_().Call(ctx, "query_app_info", &_args33, &_result34); err != nil { + return + } + return _result34.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) UpdateAppEnv(ctx context.Context, req *ConfigurationUpdateAppEnvRequest) (r *ConfigurationUpdateAppEnvResponse, err error) { + var _args35 AdminClientUpdateAppEnvArgs + _args35.Req = req + var _result36 AdminClientUpdateAppEnvResult + if err = p.Client_().Call(ctx, "update_app_env", &_args35, &_result36); err != nil { + return + } + return _result36.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) ListNodes(ctx context.Context, req *ConfigurationListNodesRequest) (r *ConfigurationListNodesResponse, err error) { + var _args37 AdminClientListNodesArgs + _args37.Req = req + var _result38 AdminClientListNodesResult + if err = p.Client_().Call(ctx, "list_nodes", &_args37, &_result38); err != nil { + return + } + return _result38.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) QueryClusterInfo(ctx context.Context, req *ConfigurationClusterInfoRequest) (r *ConfigurationClusterInfoResponse, err error) { + var _args39 AdminClientQueryClusterInfoArgs + _args39.Req = req + var _result40 AdminClientQueryClusterInfoResult + if err = p.Client_().Call(ctx, "query_cluster_info", &_args39, &_result40); err != nil { + return + } + return _result40.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) MetaControl(ctx context.Context, req *ConfigurationMetaControlRequest) (r *ConfigurationMetaControlResponse, err error) { + var _args41 AdminClientMetaControlArgs + _args41.Req = req + var _result42 AdminClientMetaControlResult + if err = p.Client_().Call(ctx, "meta_control", &_args41, &_result42); err != nil { + return + } + return _result42.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) QueryBackupPolicy(ctx context.Context, req *ConfigurationQueryBackupPolicyRequest) (r *ConfigurationQueryBackupPolicyResponse, err error) { + var _args43 AdminClientQueryBackupPolicyArgs + _args43.Req = req + var _result44 AdminClientQueryBackupPolicyResult + if err = p.Client_().Call(ctx, "query_backup_policy", &_args43, &_result44); err != nil { + return + } + return _result44.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) Balance(ctx context.Context, req *ConfigurationBalancerRequest) (r *ConfigurationBalancerResponse, err error) { + var _args45 AdminClientBalanceArgs + _args45.Req = req + var _result46 AdminClientBalanceResult + if err = p.Client_().Call(ctx, "balance", &_args45, &_result46); err != nil { + return + } + return _result46.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) StartBackupApp(ctx context.Context, req *StartBackupAppRequest) (r *StartBackupAppResponse, err error) { + var _args47 AdminClientStartBackupAppArgs + _args47.Req = req + var _result48 AdminClientStartBackupAppResult + if err = p.Client_().Call(ctx, "start_backup_app", &_args47, &_result48); err != nil { + return + } + return _result48.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) QueryBackupStatus(ctx context.Context, req *QueryBackupStatusRequest) (r *QueryBackupStatusResponse, err error) { + var _args49 AdminClientQueryBackupStatusArgs + _args49.Req = req + var _result50 AdminClientQueryBackupStatusResult + if err = p.Client_().Call(ctx, "query_backup_status", &_args49, &_result50); err != nil { + return + } + return _result50.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) RestoreApp(ctx context.Context, req *ConfigurationRestoreRequest) (r *ConfigurationCreateAppResponse, err error) { + var _args51 AdminClientRestoreAppArgs + _args51.Req = req + var _result52 AdminClientRestoreAppResult + if err = p.Client_().Call(ctx, "restore_app", &_args51, &_result52); err != nil { + return + } + return _result52.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) StartPartitionSplit(ctx context.Context, req *StartPartitionSplitRequest) (r *StartPartitionSplitResponse, err error) { + var _args53 AdminClientStartPartitionSplitArgs + _args53.Req = req + var _result54 AdminClientStartPartitionSplitResult + if err = p.Client_().Call(ctx, "start_partition_split", &_args53, &_result54); err != nil { + return + } + return _result54.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) QuerySplitStatus(ctx context.Context, req *QuerySplitRequest) (r *QuerySplitResponse, err error) { + var _args55 AdminClientQuerySplitStatusArgs + _args55.Req = req + var _result56 AdminClientQuerySplitStatusResult + if err = p.Client_().Call(ctx, "query_split_status", &_args55, &_result56); err != nil { + return + } + return _result56.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) ControlPartitionSplit(ctx context.Context, req *ControlSplitRequest) (r *ControlSplitResponse, err error) { + var _args57 AdminClientControlPartitionSplitArgs + _args57.Req = req + var _result58 AdminClientControlPartitionSplitResult + if err = p.Client_().Call(ctx, "control_partition_split", &_args57, &_result58); err != nil { + return + } + return _result58.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) StartBulkLoad(ctx context.Context, req *StartBulkLoadRequest) (r *StartBulkLoadResponse, err error) { + var _args59 AdminClientStartBulkLoadArgs + _args59.Req = req + var _result60 AdminClientStartBulkLoadResult + if err = p.Client_().Call(ctx, "start_bulk_load", &_args59, &_result60); err != nil { + return + } + return _result60.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) QueryBulkLoadStatus(ctx context.Context, req *QueryBulkLoadRequest) (r *QueryBulkLoadResponse, err error) { + var _args61 AdminClientQueryBulkLoadStatusArgs + _args61.Req = req + var _result62 AdminClientQueryBulkLoadStatusResult + if err = p.Client_().Call(ctx, "query_bulk_load_status", &_args61, &_result62); err != nil { + return + } + return _result62.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) ControlBulkLoad(ctx context.Context, req *ControlBulkLoadRequest) (r *ControlBulkLoadResponse, err error) { + var _args63 AdminClientControlBulkLoadArgs + _args63.Req = req + var _result64 AdminClientControlBulkLoadResult + if err = p.Client_().Call(ctx, "control_bulk_load", &_args63, &_result64); err != nil { + return + } + return _result64.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) ClearBulkLoad(ctx context.Context, req *ClearBulkLoadStateRequest) (r *ClearBulkLoadStateResponse, err error) { + var _args65 AdminClientClearBulkLoadArgs + _args65.Req = req + var _result66 AdminClientClearBulkLoadResult + if err = p.Client_().Call(ctx, "clear_bulk_load", &_args65, &_result66); err != nil { + return + } + return _result66.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) StartManualCompact(ctx context.Context, req *StartAppManualCompactRequest) (r *StartAppManualCompactResponse, err error) { + var _args67 AdminClientStartManualCompactArgs + _args67.Req = req + var _result68 AdminClientStartManualCompactResult + if err = p.Client_().Call(ctx, "start_manual_compact", &_args67, &_result68); err != nil { + return + } + return _result68.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *AdminClientClient) QueryManualCompact(ctx context.Context, req *QueryAppManualCompactRequest) (r *QueryAppManualCompactResponse, err error) { + var _args69 AdminClientQueryManualCompactArgs + _args69.Req = req + var _result70 AdminClientQueryManualCompactResult + if err = p.Client_().Call(ctx, "query_manual_compact", &_args69, &_result70); err != nil { + return + } + return _result70.GetSuccess(), nil +} + +type AdminClientProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler AdminClient +} + +func (p *AdminClientProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *AdminClientProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *AdminClientProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewAdminClientProcessor(handler AdminClient) *AdminClientProcessor { + + self71 := &AdminClientProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self71.processorMap["create_app"] = &adminClientProcessorCreateApp{handler: handler} + self71.processorMap["drop_app"] = &adminClientProcessorDropApp{handler: handler} + self71.processorMap["recall_app"] = &adminClientProcessorRecallApp{handler: handler} + self71.processorMap["list_apps"] = &adminClientProcessorListApps{handler: handler} + self71.processorMap["add_duplication"] = &adminClientProcessorAddDuplication{handler: handler} + self71.processorMap["query_duplication"] = &adminClientProcessorQueryDuplication{handler: handler} + self71.processorMap["modify_duplication"] = &adminClientProcessorModifyDuplication{handler: handler} + self71.processorMap["query_app_info"] = &adminClientProcessorQueryAppInfo{handler: handler} + self71.processorMap["update_app_env"] = &adminClientProcessorUpdateAppEnv{handler: handler} + self71.processorMap["list_nodes"] = &adminClientProcessorListNodes{handler: handler} + self71.processorMap["query_cluster_info"] = &adminClientProcessorQueryClusterInfo{handler: handler} + self71.processorMap["meta_control"] = &adminClientProcessorMetaControl{handler: handler} + self71.processorMap["query_backup_policy"] = &adminClientProcessorQueryBackupPolicy{handler: handler} + self71.processorMap["balance"] = &adminClientProcessorBalance{handler: handler} + self71.processorMap["start_backup_app"] = &adminClientProcessorStartBackupApp{handler: handler} + self71.processorMap["query_backup_status"] = &adminClientProcessorQueryBackupStatus{handler: handler} + self71.processorMap["restore_app"] = &adminClientProcessorRestoreApp{handler: handler} + self71.processorMap["start_partition_split"] = &adminClientProcessorStartPartitionSplit{handler: handler} + self71.processorMap["query_split_status"] = &adminClientProcessorQuerySplitStatus{handler: handler} + self71.processorMap["control_partition_split"] = &adminClientProcessorControlPartitionSplit{handler: handler} + self71.processorMap["start_bulk_load"] = &adminClientProcessorStartBulkLoad{handler: handler} + self71.processorMap["query_bulk_load_status"] = &adminClientProcessorQueryBulkLoadStatus{handler: handler} + self71.processorMap["control_bulk_load"] = &adminClientProcessorControlBulkLoad{handler: handler} + self71.processorMap["clear_bulk_load"] = &adminClientProcessorClearBulkLoad{handler: handler} + self71.processorMap["start_manual_compact"] = &adminClientProcessorStartManualCompact{handler: handler} + self71.processorMap["query_manual_compact"] = &adminClientProcessorQueryManualCompact{handler: handler} + return self71 +} + +func (p *AdminClientProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x72 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x72.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, x72 + +} + +type adminClientProcessorCreateApp struct { + handler AdminClient +} + +func (p *adminClientProcessorCreateApp) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientCreateAppArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("create_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientCreateAppResult{} + var retval *ConfigurationCreateAppResponse + var err2 error + if retval, err2 = p.handler.CreateApp(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing create_app: "+err2.Error()) + oprot.WriteMessageBegin("create_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("create_app", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorDropApp struct { + handler AdminClient +} + +func (p *adminClientProcessorDropApp) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientDropAppArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("drop_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientDropAppResult{} + var retval *ConfigurationDropAppResponse + var err2 error + if retval, err2 = p.handler.DropApp(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing drop_app: "+err2.Error()) + oprot.WriteMessageBegin("drop_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("drop_app", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorRecallApp struct { + handler AdminClient +} + +func (p *adminClientProcessorRecallApp) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientRecallAppArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("recall_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientRecallAppResult{} + var retval *ConfigurationRecallAppResponse + var err2 error + if retval, err2 = p.handler.RecallApp(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing recall_app: "+err2.Error()) + oprot.WriteMessageBegin("recall_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("recall_app", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorListApps struct { + handler AdminClient +} + +func (p *adminClientProcessorListApps) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientListAppsArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("list_apps", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientListAppsResult{} + var retval *ConfigurationListAppsResponse + var err2 error + if retval, err2 = p.handler.ListApps(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing list_apps: "+err2.Error()) + oprot.WriteMessageBegin("list_apps", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("list_apps", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorAddDuplication struct { + handler AdminClient +} + +func (p *adminClientProcessorAddDuplication) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientAddDuplicationArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("add_duplication", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientAddDuplicationResult{} + var retval *DuplicationAddResponse + var err2 error + if retval, err2 = p.handler.AddDuplication(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing add_duplication: "+err2.Error()) + oprot.WriteMessageBegin("add_duplication", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("add_duplication", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorQueryDuplication struct { + handler AdminClient +} + +func (p *adminClientProcessorQueryDuplication) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientQueryDuplicationArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_duplication", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientQueryDuplicationResult{} + var retval *DuplicationQueryResponse + var err2 error + if retval, err2 = p.handler.QueryDuplication(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_duplication: "+err2.Error()) + oprot.WriteMessageBegin("query_duplication", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_duplication", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorModifyDuplication struct { + handler AdminClient +} + +func (p *adminClientProcessorModifyDuplication) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientModifyDuplicationArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("modify_duplication", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientModifyDuplicationResult{} + var retval *DuplicationModifyResponse + var err2 error + if retval, err2 = p.handler.ModifyDuplication(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing modify_duplication: "+err2.Error()) + oprot.WriteMessageBegin("modify_duplication", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("modify_duplication", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorQueryAppInfo struct { + handler AdminClient +} + +func (p *adminClientProcessorQueryAppInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientQueryAppInfoArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_app_info", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientQueryAppInfoResult{} + var retval *QueryAppInfoResponse + var err2 error + if retval, err2 = p.handler.QueryAppInfo(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_app_info: "+err2.Error()) + oprot.WriteMessageBegin("query_app_info", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_app_info", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorUpdateAppEnv struct { + handler AdminClient +} + +func (p *adminClientProcessorUpdateAppEnv) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientUpdateAppEnvArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("update_app_env", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientUpdateAppEnvResult{} + var retval *ConfigurationUpdateAppEnvResponse + var err2 error + if retval, err2 = p.handler.UpdateAppEnv(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing update_app_env: "+err2.Error()) + oprot.WriteMessageBegin("update_app_env", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("update_app_env", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorListNodes struct { + handler AdminClient +} + +func (p *adminClientProcessorListNodes) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientListNodesArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("list_nodes", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientListNodesResult{} + var retval *ConfigurationListNodesResponse + var err2 error + if retval, err2 = p.handler.ListNodes(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing list_nodes: "+err2.Error()) + oprot.WriteMessageBegin("list_nodes", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("list_nodes", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorQueryClusterInfo struct { + handler AdminClient +} + +func (p *adminClientProcessorQueryClusterInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientQueryClusterInfoArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_cluster_info", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientQueryClusterInfoResult{} + var retval *ConfigurationClusterInfoResponse + var err2 error + if retval, err2 = p.handler.QueryClusterInfo(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_cluster_info: "+err2.Error()) + oprot.WriteMessageBegin("query_cluster_info", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_cluster_info", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorMetaControl struct { + handler AdminClient +} + +func (p *adminClientProcessorMetaControl) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientMetaControlArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("meta_control", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientMetaControlResult{} + var retval *ConfigurationMetaControlResponse + var err2 error + if retval, err2 = p.handler.MetaControl(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing meta_control: "+err2.Error()) + oprot.WriteMessageBegin("meta_control", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("meta_control", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorQueryBackupPolicy struct { + handler AdminClient +} + +func (p *adminClientProcessorQueryBackupPolicy) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientQueryBackupPolicyArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_backup_policy", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientQueryBackupPolicyResult{} + var retval *ConfigurationQueryBackupPolicyResponse + var err2 error + if retval, err2 = p.handler.QueryBackupPolicy(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_backup_policy: "+err2.Error()) + oprot.WriteMessageBegin("query_backup_policy", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_backup_policy", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorBalance struct { + handler AdminClient +} + +func (p *adminClientProcessorBalance) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientBalanceArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("balance", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientBalanceResult{} + var retval *ConfigurationBalancerResponse + var err2 error + if retval, err2 = p.handler.Balance(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing balance: "+err2.Error()) + oprot.WriteMessageBegin("balance", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("balance", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorStartBackupApp struct { + handler AdminClient +} + +func (p *adminClientProcessorStartBackupApp) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientStartBackupAppArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("start_backup_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientStartBackupAppResult{} + var retval *StartBackupAppResponse + var err2 error + if retval, err2 = p.handler.StartBackupApp(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing start_backup_app: "+err2.Error()) + oprot.WriteMessageBegin("start_backup_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("start_backup_app", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorQueryBackupStatus struct { + handler AdminClient +} + +func (p *adminClientProcessorQueryBackupStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientQueryBackupStatusArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_backup_status", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientQueryBackupStatusResult{} + var retval *QueryBackupStatusResponse + var err2 error + if retval, err2 = p.handler.QueryBackupStatus(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_backup_status: "+err2.Error()) + oprot.WriteMessageBegin("query_backup_status", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_backup_status", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorRestoreApp struct { + handler AdminClient +} + +func (p *adminClientProcessorRestoreApp) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientRestoreAppArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("restore_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientRestoreAppResult{} + var retval *ConfigurationCreateAppResponse + var err2 error + if retval, err2 = p.handler.RestoreApp(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing restore_app: "+err2.Error()) + oprot.WriteMessageBegin("restore_app", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("restore_app", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorStartPartitionSplit struct { + handler AdminClient +} + +func (p *adminClientProcessorStartPartitionSplit) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientStartPartitionSplitArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("start_partition_split", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientStartPartitionSplitResult{} + var retval *StartPartitionSplitResponse + var err2 error + if retval, err2 = p.handler.StartPartitionSplit(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing start_partition_split: "+err2.Error()) + oprot.WriteMessageBegin("start_partition_split", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("start_partition_split", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorQuerySplitStatus struct { + handler AdminClient +} + +func (p *adminClientProcessorQuerySplitStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientQuerySplitStatusArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_split_status", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientQuerySplitStatusResult{} + var retval *QuerySplitResponse + var err2 error + if retval, err2 = p.handler.QuerySplitStatus(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_split_status: "+err2.Error()) + oprot.WriteMessageBegin("query_split_status", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_split_status", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorControlPartitionSplit struct { + handler AdminClient +} + +func (p *adminClientProcessorControlPartitionSplit) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientControlPartitionSplitArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("control_partition_split", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientControlPartitionSplitResult{} + var retval *ControlSplitResponse + var err2 error + if retval, err2 = p.handler.ControlPartitionSplit(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing control_partition_split: "+err2.Error()) + oprot.WriteMessageBegin("control_partition_split", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("control_partition_split", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorStartBulkLoad struct { + handler AdminClient +} + +func (p *adminClientProcessorStartBulkLoad) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientStartBulkLoadArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("start_bulk_load", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientStartBulkLoadResult{} + var retval *StartBulkLoadResponse + var err2 error + if retval, err2 = p.handler.StartBulkLoad(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing start_bulk_load: "+err2.Error()) + oprot.WriteMessageBegin("start_bulk_load", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("start_bulk_load", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorQueryBulkLoadStatus struct { + handler AdminClient +} + +func (p *adminClientProcessorQueryBulkLoadStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientQueryBulkLoadStatusArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_bulk_load_status", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientQueryBulkLoadStatusResult{} + var retval *QueryBulkLoadResponse + var err2 error + if retval, err2 = p.handler.QueryBulkLoadStatus(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_bulk_load_status: "+err2.Error()) + oprot.WriteMessageBegin("query_bulk_load_status", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_bulk_load_status", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorControlBulkLoad struct { + handler AdminClient +} + +func (p *adminClientProcessorControlBulkLoad) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientControlBulkLoadArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("control_bulk_load", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientControlBulkLoadResult{} + var retval *ControlBulkLoadResponse + var err2 error + if retval, err2 = p.handler.ControlBulkLoad(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing control_bulk_load: "+err2.Error()) + oprot.WriteMessageBegin("control_bulk_load", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("control_bulk_load", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorClearBulkLoad struct { + handler AdminClient +} + +func (p *adminClientProcessorClearBulkLoad) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientClearBulkLoadArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("clear_bulk_load", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientClearBulkLoadResult{} + var retval *ClearBulkLoadStateResponse + var err2 error + if retval, err2 = p.handler.ClearBulkLoad(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing clear_bulk_load: "+err2.Error()) + oprot.WriteMessageBegin("clear_bulk_load", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("clear_bulk_load", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorStartManualCompact struct { + handler AdminClient +} + +func (p *adminClientProcessorStartManualCompact) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientStartManualCompactArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("start_manual_compact", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientStartManualCompactResult{} + var retval *StartAppManualCompactResponse + var err2 error + if retval, err2 = p.handler.StartManualCompact(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing start_manual_compact: "+err2.Error()) + oprot.WriteMessageBegin("start_manual_compact", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("start_manual_compact", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type adminClientProcessorQueryManualCompact struct { + handler AdminClient +} + +func (p *adminClientProcessorQueryManualCompact) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AdminClientQueryManualCompactArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_manual_compact", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := AdminClientQueryManualCompactResult{} + var retval *QueryAppManualCompactResponse + var err2 error + if retval, err2 = p.handler.QueryManualCompact(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_manual_compact: "+err2.Error()) + oprot.WriteMessageBegin("query_manual_compact", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_manual_compact", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Req +type AdminClientCreateAppArgs struct { + Req *ConfigurationCreateAppRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientCreateAppArgs() *AdminClientCreateAppArgs { + return &AdminClientCreateAppArgs{} +} + +var AdminClientCreateAppArgs_Req_DEFAULT *ConfigurationCreateAppRequest + +func (p *AdminClientCreateAppArgs) GetReq() *ConfigurationCreateAppRequest { + if !p.IsSetReq() { + return AdminClientCreateAppArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientCreateAppArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientCreateAppArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientCreateAppArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationCreateAppRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientCreateAppArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("create_app_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientCreateAppArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientCreateAppArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientCreateAppArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientCreateAppResult struct { + Success *ConfigurationCreateAppResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientCreateAppResult() *AdminClientCreateAppResult { + return &AdminClientCreateAppResult{} +} + +var AdminClientCreateAppResult_Success_DEFAULT *ConfigurationCreateAppResponse + +func (p *AdminClientCreateAppResult) GetSuccess() *ConfigurationCreateAppResponse { + if !p.IsSetSuccess() { + return AdminClientCreateAppResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientCreateAppResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientCreateAppResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientCreateAppResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationCreateAppResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientCreateAppResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("create_app_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientCreateAppResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientCreateAppResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientCreateAppResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientDropAppArgs struct { + Req *ConfigurationDropAppRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientDropAppArgs() *AdminClientDropAppArgs { + return &AdminClientDropAppArgs{} +} + +var AdminClientDropAppArgs_Req_DEFAULT *ConfigurationDropAppRequest + +func (p *AdminClientDropAppArgs) GetReq() *ConfigurationDropAppRequest { + if !p.IsSetReq() { + return AdminClientDropAppArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientDropAppArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientDropAppArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientDropAppArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationDropAppRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientDropAppArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("drop_app_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientDropAppArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientDropAppArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientDropAppArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientDropAppResult struct { + Success *ConfigurationDropAppResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientDropAppResult() *AdminClientDropAppResult { + return &AdminClientDropAppResult{} +} + +var AdminClientDropAppResult_Success_DEFAULT *ConfigurationDropAppResponse + +func (p *AdminClientDropAppResult) GetSuccess() *ConfigurationDropAppResponse { + if !p.IsSetSuccess() { + return AdminClientDropAppResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientDropAppResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientDropAppResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientDropAppResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationDropAppResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientDropAppResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("drop_app_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientDropAppResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientDropAppResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientDropAppResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientRecallAppArgs struct { + Req *ConfigurationRecallAppRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientRecallAppArgs() *AdminClientRecallAppArgs { + return &AdminClientRecallAppArgs{} +} + +var AdminClientRecallAppArgs_Req_DEFAULT *ConfigurationRecallAppRequest + +func (p *AdminClientRecallAppArgs) GetReq() *ConfigurationRecallAppRequest { + if !p.IsSetReq() { + return AdminClientRecallAppArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientRecallAppArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientRecallAppArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientRecallAppArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationRecallAppRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientRecallAppArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("recall_app_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientRecallAppArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientRecallAppArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientRecallAppArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientRecallAppResult struct { + Success *ConfigurationRecallAppResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientRecallAppResult() *AdminClientRecallAppResult { + return &AdminClientRecallAppResult{} +} + +var AdminClientRecallAppResult_Success_DEFAULT *ConfigurationRecallAppResponse + +func (p *AdminClientRecallAppResult) GetSuccess() *ConfigurationRecallAppResponse { + if !p.IsSetSuccess() { + return AdminClientRecallAppResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientRecallAppResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientRecallAppResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientRecallAppResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationRecallAppResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientRecallAppResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("recall_app_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientRecallAppResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientRecallAppResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientRecallAppResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientListAppsArgs struct { + Req *ConfigurationListAppsRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientListAppsArgs() *AdminClientListAppsArgs { + return &AdminClientListAppsArgs{} +} + +var AdminClientListAppsArgs_Req_DEFAULT *ConfigurationListAppsRequest + +func (p *AdminClientListAppsArgs) GetReq() *ConfigurationListAppsRequest { + if !p.IsSetReq() { + return AdminClientListAppsArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientListAppsArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientListAppsArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientListAppsArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationListAppsRequest{ + Status: 0, + } + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientListAppsArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("list_apps_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientListAppsArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientListAppsArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientListAppsArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientListAppsResult struct { + Success *ConfigurationListAppsResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientListAppsResult() *AdminClientListAppsResult { + return &AdminClientListAppsResult{} +} + +var AdminClientListAppsResult_Success_DEFAULT *ConfigurationListAppsResponse + +func (p *AdminClientListAppsResult) GetSuccess() *ConfigurationListAppsResponse { + if !p.IsSetSuccess() { + return AdminClientListAppsResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientListAppsResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientListAppsResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientListAppsResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationListAppsResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientListAppsResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("list_apps_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientListAppsResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientListAppsResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientListAppsResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientAddDuplicationArgs struct { + Req *DuplicationAddRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientAddDuplicationArgs() *AdminClientAddDuplicationArgs { + return &AdminClientAddDuplicationArgs{} +} + +var AdminClientAddDuplicationArgs_Req_DEFAULT *DuplicationAddRequest + +func (p *AdminClientAddDuplicationArgs) GetReq() *DuplicationAddRequest { + if !p.IsSetReq() { + return AdminClientAddDuplicationArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientAddDuplicationArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientAddDuplicationArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientAddDuplicationArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &DuplicationAddRequest{ + IsDuplicatingCheckpoint: true, + } + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientAddDuplicationArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("add_duplication_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientAddDuplicationArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientAddDuplicationArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientAddDuplicationArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientAddDuplicationResult struct { + Success *DuplicationAddResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientAddDuplicationResult() *AdminClientAddDuplicationResult { + return &AdminClientAddDuplicationResult{} +} + +var AdminClientAddDuplicationResult_Success_DEFAULT *DuplicationAddResponse + +func (p *AdminClientAddDuplicationResult) GetSuccess() *DuplicationAddResponse { + if !p.IsSetSuccess() { + return AdminClientAddDuplicationResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientAddDuplicationResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientAddDuplicationResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientAddDuplicationResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &DuplicationAddResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientAddDuplicationResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("add_duplication_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientAddDuplicationResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientAddDuplicationResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientAddDuplicationResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientQueryDuplicationArgs struct { + Req *DuplicationQueryRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientQueryDuplicationArgs() *AdminClientQueryDuplicationArgs { + return &AdminClientQueryDuplicationArgs{} +} + +var AdminClientQueryDuplicationArgs_Req_DEFAULT *DuplicationQueryRequest + +func (p *AdminClientQueryDuplicationArgs) GetReq() *DuplicationQueryRequest { + if !p.IsSetReq() { + return AdminClientQueryDuplicationArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientQueryDuplicationArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientQueryDuplicationArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryDuplicationArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &DuplicationQueryRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientQueryDuplicationArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_duplication_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryDuplicationArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientQueryDuplicationArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryDuplicationArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientQueryDuplicationResult struct { + Success *DuplicationQueryResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientQueryDuplicationResult() *AdminClientQueryDuplicationResult { + return &AdminClientQueryDuplicationResult{} +} + +var AdminClientQueryDuplicationResult_Success_DEFAULT *DuplicationQueryResponse + +func (p *AdminClientQueryDuplicationResult) GetSuccess() *DuplicationQueryResponse { + if !p.IsSetSuccess() { + return AdminClientQueryDuplicationResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientQueryDuplicationResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientQueryDuplicationResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryDuplicationResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &DuplicationQueryResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientQueryDuplicationResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_duplication_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryDuplicationResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientQueryDuplicationResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryDuplicationResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientModifyDuplicationArgs struct { + Req *DuplicationModifyRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientModifyDuplicationArgs() *AdminClientModifyDuplicationArgs { + return &AdminClientModifyDuplicationArgs{} +} + +var AdminClientModifyDuplicationArgs_Req_DEFAULT *DuplicationModifyRequest + +func (p *AdminClientModifyDuplicationArgs) GetReq() *DuplicationModifyRequest { + if !p.IsSetReq() { + return AdminClientModifyDuplicationArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientModifyDuplicationArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientModifyDuplicationArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientModifyDuplicationArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &DuplicationModifyRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientModifyDuplicationArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("modify_duplication_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientModifyDuplicationArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientModifyDuplicationArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientModifyDuplicationArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientModifyDuplicationResult struct { + Success *DuplicationModifyResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientModifyDuplicationResult() *AdminClientModifyDuplicationResult { + return &AdminClientModifyDuplicationResult{} +} + +var AdminClientModifyDuplicationResult_Success_DEFAULT *DuplicationModifyResponse + +func (p *AdminClientModifyDuplicationResult) GetSuccess() *DuplicationModifyResponse { + if !p.IsSetSuccess() { + return AdminClientModifyDuplicationResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientModifyDuplicationResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientModifyDuplicationResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientModifyDuplicationResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &DuplicationModifyResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientModifyDuplicationResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("modify_duplication_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientModifyDuplicationResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientModifyDuplicationResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientModifyDuplicationResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientQueryAppInfoArgs struct { + Req *QueryAppInfoRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientQueryAppInfoArgs() *AdminClientQueryAppInfoArgs { + return &AdminClientQueryAppInfoArgs{} +} + +var AdminClientQueryAppInfoArgs_Req_DEFAULT *QueryAppInfoRequest + +func (p *AdminClientQueryAppInfoArgs) GetReq() *QueryAppInfoRequest { + if !p.IsSetReq() { + return AdminClientQueryAppInfoArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientQueryAppInfoArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientQueryAppInfoArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryAppInfoArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &QueryAppInfoRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientQueryAppInfoArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_app_info_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryAppInfoArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientQueryAppInfoArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryAppInfoArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientQueryAppInfoResult struct { + Success *QueryAppInfoResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientQueryAppInfoResult() *AdminClientQueryAppInfoResult { + return &AdminClientQueryAppInfoResult{} +} + +var AdminClientQueryAppInfoResult_Success_DEFAULT *QueryAppInfoResponse + +func (p *AdminClientQueryAppInfoResult) GetSuccess() *QueryAppInfoResponse { + if !p.IsSetSuccess() { + return AdminClientQueryAppInfoResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientQueryAppInfoResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientQueryAppInfoResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryAppInfoResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &QueryAppInfoResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientQueryAppInfoResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_app_info_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryAppInfoResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientQueryAppInfoResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryAppInfoResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientUpdateAppEnvArgs struct { + Req *ConfigurationUpdateAppEnvRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientUpdateAppEnvArgs() *AdminClientUpdateAppEnvArgs { + return &AdminClientUpdateAppEnvArgs{} +} + +var AdminClientUpdateAppEnvArgs_Req_DEFAULT *ConfigurationUpdateAppEnvRequest + +func (p *AdminClientUpdateAppEnvArgs) GetReq() *ConfigurationUpdateAppEnvRequest { + if !p.IsSetReq() { + return AdminClientUpdateAppEnvArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientUpdateAppEnvArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientUpdateAppEnvArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientUpdateAppEnvArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationUpdateAppEnvRequest{ + Op: 0, + } + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientUpdateAppEnvArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("update_app_env_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientUpdateAppEnvArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientUpdateAppEnvArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientUpdateAppEnvArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientUpdateAppEnvResult struct { + Success *ConfigurationUpdateAppEnvResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientUpdateAppEnvResult() *AdminClientUpdateAppEnvResult { + return &AdminClientUpdateAppEnvResult{} +} + +var AdminClientUpdateAppEnvResult_Success_DEFAULT *ConfigurationUpdateAppEnvResponse + +func (p *AdminClientUpdateAppEnvResult) GetSuccess() *ConfigurationUpdateAppEnvResponse { + if !p.IsSetSuccess() { + return AdminClientUpdateAppEnvResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientUpdateAppEnvResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientUpdateAppEnvResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientUpdateAppEnvResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationUpdateAppEnvResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientUpdateAppEnvResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("update_app_env_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientUpdateAppEnvResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientUpdateAppEnvResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientUpdateAppEnvResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientListNodesArgs struct { + Req *ConfigurationListNodesRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientListNodesArgs() *AdminClientListNodesArgs { + return &AdminClientListNodesArgs{} +} + +var AdminClientListNodesArgs_Req_DEFAULT *ConfigurationListNodesRequest + +func (p *AdminClientListNodesArgs) GetReq() *ConfigurationListNodesRequest { + if !p.IsSetReq() { + return AdminClientListNodesArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientListNodesArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientListNodesArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientListNodesArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationListNodesRequest{ + Status: 0, + } + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientListNodesArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("list_nodes_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientListNodesArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientListNodesArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientListNodesArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientListNodesResult struct { + Success *ConfigurationListNodesResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientListNodesResult() *AdminClientListNodesResult { + return &AdminClientListNodesResult{} +} + +var AdminClientListNodesResult_Success_DEFAULT *ConfigurationListNodesResponse + +func (p *AdminClientListNodesResult) GetSuccess() *ConfigurationListNodesResponse { + if !p.IsSetSuccess() { + return AdminClientListNodesResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientListNodesResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientListNodesResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientListNodesResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationListNodesResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientListNodesResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("list_nodes_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientListNodesResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientListNodesResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientListNodesResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientQueryClusterInfoArgs struct { + Req *ConfigurationClusterInfoRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientQueryClusterInfoArgs() *AdminClientQueryClusterInfoArgs { + return &AdminClientQueryClusterInfoArgs{} +} + +var AdminClientQueryClusterInfoArgs_Req_DEFAULT *ConfigurationClusterInfoRequest + +func (p *AdminClientQueryClusterInfoArgs) GetReq() *ConfigurationClusterInfoRequest { + if !p.IsSetReq() { + return AdminClientQueryClusterInfoArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientQueryClusterInfoArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientQueryClusterInfoArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryClusterInfoArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationClusterInfoRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientQueryClusterInfoArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_cluster_info_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryClusterInfoArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientQueryClusterInfoArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryClusterInfoArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientQueryClusterInfoResult struct { + Success *ConfigurationClusterInfoResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientQueryClusterInfoResult() *AdminClientQueryClusterInfoResult { + return &AdminClientQueryClusterInfoResult{} +} + +var AdminClientQueryClusterInfoResult_Success_DEFAULT *ConfigurationClusterInfoResponse + +func (p *AdminClientQueryClusterInfoResult) GetSuccess() *ConfigurationClusterInfoResponse { + if !p.IsSetSuccess() { + return AdminClientQueryClusterInfoResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientQueryClusterInfoResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientQueryClusterInfoResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryClusterInfoResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationClusterInfoResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientQueryClusterInfoResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_cluster_info_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryClusterInfoResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientQueryClusterInfoResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryClusterInfoResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientMetaControlArgs struct { + Req *ConfigurationMetaControlRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientMetaControlArgs() *AdminClientMetaControlArgs { + return &AdminClientMetaControlArgs{} +} + +var AdminClientMetaControlArgs_Req_DEFAULT *ConfigurationMetaControlRequest + +func (p *AdminClientMetaControlArgs) GetReq() *ConfigurationMetaControlRequest { + if !p.IsSetReq() { + return AdminClientMetaControlArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientMetaControlArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientMetaControlArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientMetaControlArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationMetaControlRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientMetaControlArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("meta_control_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientMetaControlArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientMetaControlArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientMetaControlArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientMetaControlResult struct { + Success *ConfigurationMetaControlResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientMetaControlResult() *AdminClientMetaControlResult { + return &AdminClientMetaControlResult{} +} + +var AdminClientMetaControlResult_Success_DEFAULT *ConfigurationMetaControlResponse + +func (p *AdminClientMetaControlResult) GetSuccess() *ConfigurationMetaControlResponse { + if !p.IsSetSuccess() { + return AdminClientMetaControlResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientMetaControlResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientMetaControlResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientMetaControlResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationMetaControlResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientMetaControlResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("meta_control_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientMetaControlResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientMetaControlResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientMetaControlResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientQueryBackupPolicyArgs struct { + Req *ConfigurationQueryBackupPolicyRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientQueryBackupPolicyArgs() *AdminClientQueryBackupPolicyArgs { + return &AdminClientQueryBackupPolicyArgs{} +} + +var AdminClientQueryBackupPolicyArgs_Req_DEFAULT *ConfigurationQueryBackupPolicyRequest + +func (p *AdminClientQueryBackupPolicyArgs) GetReq() *ConfigurationQueryBackupPolicyRequest { + if !p.IsSetReq() { + return AdminClientQueryBackupPolicyArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientQueryBackupPolicyArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientQueryBackupPolicyArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryBackupPolicyArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationQueryBackupPolicyRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientQueryBackupPolicyArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_backup_policy_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryBackupPolicyArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientQueryBackupPolicyArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryBackupPolicyArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientQueryBackupPolicyResult struct { + Success *ConfigurationQueryBackupPolicyResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientQueryBackupPolicyResult() *AdminClientQueryBackupPolicyResult { + return &AdminClientQueryBackupPolicyResult{} +} + +var AdminClientQueryBackupPolicyResult_Success_DEFAULT *ConfigurationQueryBackupPolicyResponse + +func (p *AdminClientQueryBackupPolicyResult) GetSuccess() *ConfigurationQueryBackupPolicyResponse { + if !p.IsSetSuccess() { + return AdminClientQueryBackupPolicyResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientQueryBackupPolicyResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientQueryBackupPolicyResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryBackupPolicyResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationQueryBackupPolicyResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientQueryBackupPolicyResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_backup_policy_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryBackupPolicyResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientQueryBackupPolicyResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryBackupPolicyResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientBalanceArgs struct { + Req *ConfigurationBalancerRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientBalanceArgs() *AdminClientBalanceArgs { + return &AdminClientBalanceArgs{} +} + +var AdminClientBalanceArgs_Req_DEFAULT *ConfigurationBalancerRequest + +func (p *AdminClientBalanceArgs) GetReq() *ConfigurationBalancerRequest { + if !p.IsSetReq() { + return AdminClientBalanceArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientBalanceArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientBalanceArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientBalanceArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationBalancerRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientBalanceArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("balance_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientBalanceArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientBalanceArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientBalanceArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientBalanceResult struct { + Success *ConfigurationBalancerResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientBalanceResult() *AdminClientBalanceResult { + return &AdminClientBalanceResult{} +} + +var AdminClientBalanceResult_Success_DEFAULT *ConfigurationBalancerResponse + +func (p *AdminClientBalanceResult) GetSuccess() *ConfigurationBalancerResponse { + if !p.IsSetSuccess() { + return AdminClientBalanceResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientBalanceResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientBalanceResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientBalanceResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationBalancerResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientBalanceResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("balance_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientBalanceResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientBalanceResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientBalanceResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientStartBackupAppArgs struct { + Req *StartBackupAppRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientStartBackupAppArgs() *AdminClientStartBackupAppArgs { + return &AdminClientStartBackupAppArgs{} +} + +var AdminClientStartBackupAppArgs_Req_DEFAULT *StartBackupAppRequest + +func (p *AdminClientStartBackupAppArgs) GetReq() *StartBackupAppRequest { + if !p.IsSetReq() { + return AdminClientStartBackupAppArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientStartBackupAppArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientStartBackupAppArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientStartBackupAppArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &StartBackupAppRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientStartBackupAppArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_backup_app_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientStartBackupAppArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientStartBackupAppArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientStartBackupAppArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientStartBackupAppResult struct { + Success *StartBackupAppResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientStartBackupAppResult() *AdminClientStartBackupAppResult { + return &AdminClientStartBackupAppResult{} +} + +var AdminClientStartBackupAppResult_Success_DEFAULT *StartBackupAppResponse + +func (p *AdminClientStartBackupAppResult) GetSuccess() *StartBackupAppResponse { + if !p.IsSetSuccess() { + return AdminClientStartBackupAppResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientStartBackupAppResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientStartBackupAppResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientStartBackupAppResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &StartBackupAppResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientStartBackupAppResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_backup_app_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientStartBackupAppResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientStartBackupAppResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientStartBackupAppResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientQueryBackupStatusArgs struct { + Req *QueryBackupStatusRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientQueryBackupStatusArgs() *AdminClientQueryBackupStatusArgs { + return &AdminClientQueryBackupStatusArgs{} +} + +var AdminClientQueryBackupStatusArgs_Req_DEFAULT *QueryBackupStatusRequest + +func (p *AdminClientQueryBackupStatusArgs) GetReq() *QueryBackupStatusRequest { + if !p.IsSetReq() { + return AdminClientQueryBackupStatusArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientQueryBackupStatusArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientQueryBackupStatusArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryBackupStatusArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &QueryBackupStatusRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientQueryBackupStatusArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_backup_status_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryBackupStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientQueryBackupStatusArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryBackupStatusArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientQueryBackupStatusResult struct { + Success *QueryBackupStatusResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientQueryBackupStatusResult() *AdminClientQueryBackupStatusResult { + return &AdminClientQueryBackupStatusResult{} +} + +var AdminClientQueryBackupStatusResult_Success_DEFAULT *QueryBackupStatusResponse + +func (p *AdminClientQueryBackupStatusResult) GetSuccess() *QueryBackupStatusResponse { + if !p.IsSetSuccess() { + return AdminClientQueryBackupStatusResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientQueryBackupStatusResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientQueryBackupStatusResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryBackupStatusResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &QueryBackupStatusResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientQueryBackupStatusResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_backup_status_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryBackupStatusResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientQueryBackupStatusResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryBackupStatusResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientRestoreAppArgs struct { + Req *ConfigurationRestoreRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientRestoreAppArgs() *AdminClientRestoreAppArgs { + return &AdminClientRestoreAppArgs{} +} + +var AdminClientRestoreAppArgs_Req_DEFAULT *ConfigurationRestoreRequest + +func (p *AdminClientRestoreAppArgs) GetReq() *ConfigurationRestoreRequest { + if !p.IsSetReq() { + return AdminClientRestoreAppArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientRestoreAppArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientRestoreAppArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientRestoreAppArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ConfigurationRestoreRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientRestoreAppArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("restore_app_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientRestoreAppArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientRestoreAppArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientRestoreAppArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientRestoreAppResult struct { + Success *ConfigurationCreateAppResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientRestoreAppResult() *AdminClientRestoreAppResult { + return &AdminClientRestoreAppResult{} +} + +var AdminClientRestoreAppResult_Success_DEFAULT *ConfigurationCreateAppResponse + +func (p *AdminClientRestoreAppResult) GetSuccess() *ConfigurationCreateAppResponse { + if !p.IsSetSuccess() { + return AdminClientRestoreAppResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientRestoreAppResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientRestoreAppResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientRestoreAppResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ConfigurationCreateAppResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientRestoreAppResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("restore_app_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientRestoreAppResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientRestoreAppResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientRestoreAppResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientStartPartitionSplitArgs struct { + Req *StartPartitionSplitRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientStartPartitionSplitArgs() *AdminClientStartPartitionSplitArgs { + return &AdminClientStartPartitionSplitArgs{} +} + +var AdminClientStartPartitionSplitArgs_Req_DEFAULT *StartPartitionSplitRequest + +func (p *AdminClientStartPartitionSplitArgs) GetReq() *StartPartitionSplitRequest { + if !p.IsSetReq() { + return AdminClientStartPartitionSplitArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientStartPartitionSplitArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientStartPartitionSplitArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientStartPartitionSplitArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &StartPartitionSplitRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientStartPartitionSplitArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_partition_split_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientStartPartitionSplitArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientStartPartitionSplitArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientStartPartitionSplitArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientStartPartitionSplitResult struct { + Success *StartPartitionSplitResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientStartPartitionSplitResult() *AdminClientStartPartitionSplitResult { + return &AdminClientStartPartitionSplitResult{} +} + +var AdminClientStartPartitionSplitResult_Success_DEFAULT *StartPartitionSplitResponse + +func (p *AdminClientStartPartitionSplitResult) GetSuccess() *StartPartitionSplitResponse { + if !p.IsSetSuccess() { + return AdminClientStartPartitionSplitResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientStartPartitionSplitResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientStartPartitionSplitResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientStartPartitionSplitResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &StartPartitionSplitResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientStartPartitionSplitResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_partition_split_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientStartPartitionSplitResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientStartPartitionSplitResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientStartPartitionSplitResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientQuerySplitStatusArgs struct { + Req *QuerySplitRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientQuerySplitStatusArgs() *AdminClientQuerySplitStatusArgs { + return &AdminClientQuerySplitStatusArgs{} +} + +var AdminClientQuerySplitStatusArgs_Req_DEFAULT *QuerySplitRequest + +func (p *AdminClientQuerySplitStatusArgs) GetReq() *QuerySplitRequest { + if !p.IsSetReq() { + return AdminClientQuerySplitStatusArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientQuerySplitStatusArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientQuerySplitStatusArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQuerySplitStatusArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &QuerySplitRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientQuerySplitStatusArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_split_status_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQuerySplitStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientQuerySplitStatusArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQuerySplitStatusArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientQuerySplitStatusResult struct { + Success *QuerySplitResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientQuerySplitStatusResult() *AdminClientQuerySplitStatusResult { + return &AdminClientQuerySplitStatusResult{} +} + +var AdminClientQuerySplitStatusResult_Success_DEFAULT *QuerySplitResponse + +func (p *AdminClientQuerySplitStatusResult) GetSuccess() *QuerySplitResponse { + if !p.IsSetSuccess() { + return AdminClientQuerySplitStatusResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientQuerySplitStatusResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientQuerySplitStatusResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQuerySplitStatusResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &QuerySplitResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientQuerySplitStatusResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_split_status_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQuerySplitStatusResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientQuerySplitStatusResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQuerySplitStatusResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientControlPartitionSplitArgs struct { + Req *ControlSplitRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientControlPartitionSplitArgs() *AdminClientControlPartitionSplitArgs { + return &AdminClientControlPartitionSplitArgs{} +} + +var AdminClientControlPartitionSplitArgs_Req_DEFAULT *ControlSplitRequest + +func (p *AdminClientControlPartitionSplitArgs) GetReq() *ControlSplitRequest { + if !p.IsSetReq() { + return AdminClientControlPartitionSplitArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientControlPartitionSplitArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientControlPartitionSplitArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientControlPartitionSplitArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ControlSplitRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientControlPartitionSplitArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("control_partition_split_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientControlPartitionSplitArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientControlPartitionSplitArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientControlPartitionSplitArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientControlPartitionSplitResult struct { + Success *ControlSplitResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientControlPartitionSplitResult() *AdminClientControlPartitionSplitResult { + return &AdminClientControlPartitionSplitResult{} +} + +var AdminClientControlPartitionSplitResult_Success_DEFAULT *ControlSplitResponse + +func (p *AdminClientControlPartitionSplitResult) GetSuccess() *ControlSplitResponse { + if !p.IsSetSuccess() { + return AdminClientControlPartitionSplitResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientControlPartitionSplitResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientControlPartitionSplitResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientControlPartitionSplitResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ControlSplitResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientControlPartitionSplitResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("control_partition_split_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientControlPartitionSplitResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientControlPartitionSplitResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientControlPartitionSplitResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientStartBulkLoadArgs struct { + Req *StartBulkLoadRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientStartBulkLoadArgs() *AdminClientStartBulkLoadArgs { + return &AdminClientStartBulkLoadArgs{} +} + +var AdminClientStartBulkLoadArgs_Req_DEFAULT *StartBulkLoadRequest + +func (p *AdminClientStartBulkLoadArgs) GetReq() *StartBulkLoadRequest { + if !p.IsSetReq() { + return AdminClientStartBulkLoadArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientStartBulkLoadArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientStartBulkLoadArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientStartBulkLoadArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &StartBulkLoadRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientStartBulkLoadArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_bulk_load_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientStartBulkLoadArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientStartBulkLoadArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientStartBulkLoadArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientStartBulkLoadResult struct { + Success *StartBulkLoadResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientStartBulkLoadResult() *AdminClientStartBulkLoadResult { + return &AdminClientStartBulkLoadResult{} +} + +var AdminClientStartBulkLoadResult_Success_DEFAULT *StartBulkLoadResponse + +func (p *AdminClientStartBulkLoadResult) GetSuccess() *StartBulkLoadResponse { + if !p.IsSetSuccess() { + return AdminClientStartBulkLoadResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientStartBulkLoadResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientStartBulkLoadResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientStartBulkLoadResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &StartBulkLoadResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientStartBulkLoadResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_bulk_load_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientStartBulkLoadResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientStartBulkLoadResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientStartBulkLoadResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientQueryBulkLoadStatusArgs struct { + Req *QueryBulkLoadRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientQueryBulkLoadStatusArgs() *AdminClientQueryBulkLoadStatusArgs { + return &AdminClientQueryBulkLoadStatusArgs{} +} + +var AdminClientQueryBulkLoadStatusArgs_Req_DEFAULT *QueryBulkLoadRequest + +func (p *AdminClientQueryBulkLoadStatusArgs) GetReq() *QueryBulkLoadRequest { + if !p.IsSetReq() { + return AdminClientQueryBulkLoadStatusArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientQueryBulkLoadStatusArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientQueryBulkLoadStatusArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryBulkLoadStatusArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &QueryBulkLoadRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientQueryBulkLoadStatusArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_bulk_load_status_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryBulkLoadStatusArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientQueryBulkLoadStatusArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryBulkLoadStatusArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientQueryBulkLoadStatusResult struct { + Success *QueryBulkLoadResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientQueryBulkLoadStatusResult() *AdminClientQueryBulkLoadStatusResult { + return &AdminClientQueryBulkLoadStatusResult{} +} + +var AdminClientQueryBulkLoadStatusResult_Success_DEFAULT *QueryBulkLoadResponse + +func (p *AdminClientQueryBulkLoadStatusResult) GetSuccess() *QueryBulkLoadResponse { + if !p.IsSetSuccess() { + return AdminClientQueryBulkLoadStatusResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientQueryBulkLoadStatusResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientQueryBulkLoadStatusResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryBulkLoadStatusResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &QueryBulkLoadResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientQueryBulkLoadStatusResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_bulk_load_status_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryBulkLoadStatusResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientQueryBulkLoadStatusResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryBulkLoadStatusResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientControlBulkLoadArgs struct { + Req *ControlBulkLoadRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientControlBulkLoadArgs() *AdminClientControlBulkLoadArgs { + return &AdminClientControlBulkLoadArgs{} +} + +var AdminClientControlBulkLoadArgs_Req_DEFAULT *ControlBulkLoadRequest + +func (p *AdminClientControlBulkLoadArgs) GetReq() *ControlBulkLoadRequest { + if !p.IsSetReq() { + return AdminClientControlBulkLoadArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientControlBulkLoadArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientControlBulkLoadArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientControlBulkLoadArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ControlBulkLoadRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientControlBulkLoadArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("control_bulk_load_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientControlBulkLoadArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientControlBulkLoadArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientControlBulkLoadArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientControlBulkLoadResult struct { + Success *ControlBulkLoadResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientControlBulkLoadResult() *AdminClientControlBulkLoadResult { + return &AdminClientControlBulkLoadResult{} +} + +var AdminClientControlBulkLoadResult_Success_DEFAULT *ControlBulkLoadResponse + +func (p *AdminClientControlBulkLoadResult) GetSuccess() *ControlBulkLoadResponse { + if !p.IsSetSuccess() { + return AdminClientControlBulkLoadResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientControlBulkLoadResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientControlBulkLoadResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientControlBulkLoadResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ControlBulkLoadResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientControlBulkLoadResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("control_bulk_load_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientControlBulkLoadResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientControlBulkLoadResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientControlBulkLoadResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientClearBulkLoadArgs struct { + Req *ClearBulkLoadStateRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientClearBulkLoadArgs() *AdminClientClearBulkLoadArgs { + return &AdminClientClearBulkLoadArgs{} +} + +var AdminClientClearBulkLoadArgs_Req_DEFAULT *ClearBulkLoadStateRequest + +func (p *AdminClientClearBulkLoadArgs) GetReq() *ClearBulkLoadStateRequest { + if !p.IsSetReq() { + return AdminClientClearBulkLoadArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientClearBulkLoadArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientClearBulkLoadArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientClearBulkLoadArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ClearBulkLoadStateRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientClearBulkLoadArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("clear_bulk_load_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientClearBulkLoadArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientClearBulkLoadArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientClearBulkLoadArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientClearBulkLoadResult struct { + Success *ClearBulkLoadStateResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientClearBulkLoadResult() *AdminClientClearBulkLoadResult { + return &AdminClientClearBulkLoadResult{} +} + +var AdminClientClearBulkLoadResult_Success_DEFAULT *ClearBulkLoadStateResponse + +func (p *AdminClientClearBulkLoadResult) GetSuccess() *ClearBulkLoadStateResponse { + if !p.IsSetSuccess() { + return AdminClientClearBulkLoadResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientClearBulkLoadResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientClearBulkLoadResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientClearBulkLoadResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ClearBulkLoadStateResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientClearBulkLoadResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("clear_bulk_load_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientClearBulkLoadResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientClearBulkLoadResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientClearBulkLoadResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientStartManualCompactArgs struct { + Req *StartAppManualCompactRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientStartManualCompactArgs() *AdminClientStartManualCompactArgs { + return &AdminClientStartManualCompactArgs{} +} + +var AdminClientStartManualCompactArgs_Req_DEFAULT *StartAppManualCompactRequest + +func (p *AdminClientStartManualCompactArgs) GetReq() *StartAppManualCompactRequest { + if !p.IsSetReq() { + return AdminClientStartManualCompactArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientStartManualCompactArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientStartManualCompactArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientStartManualCompactArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &StartAppManualCompactRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientStartManualCompactArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_manual_compact_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientStartManualCompactArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientStartManualCompactArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientStartManualCompactArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientStartManualCompactResult struct { + Success *StartAppManualCompactResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientStartManualCompactResult() *AdminClientStartManualCompactResult { + return &AdminClientStartManualCompactResult{} +} + +var AdminClientStartManualCompactResult_Success_DEFAULT *StartAppManualCompactResponse + +func (p *AdminClientStartManualCompactResult) GetSuccess() *StartAppManualCompactResponse { + if !p.IsSetSuccess() { + return AdminClientStartManualCompactResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientStartManualCompactResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientStartManualCompactResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientStartManualCompactResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &StartAppManualCompactResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientStartManualCompactResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_manual_compact_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientStartManualCompactResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientStartManualCompactResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientStartManualCompactResult(%+v)", *p) +} + +// Attributes: +// - Req +type AdminClientQueryManualCompactArgs struct { + Req *QueryAppManualCompactRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewAdminClientQueryManualCompactArgs() *AdminClientQueryManualCompactArgs { + return &AdminClientQueryManualCompactArgs{} +} + +var AdminClientQueryManualCompactArgs_Req_DEFAULT *QueryAppManualCompactRequest + +func (p *AdminClientQueryManualCompactArgs) GetReq() *QueryAppManualCompactRequest { + if !p.IsSetReq() { + return AdminClientQueryManualCompactArgs_Req_DEFAULT + } + return p.Req +} +func (p *AdminClientQueryManualCompactArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *AdminClientQueryManualCompactArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryManualCompactArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &QueryAppManualCompactRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *AdminClientQueryManualCompactArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_manual_compact_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryManualCompactArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *AdminClientQueryManualCompactArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryManualCompactArgs(%+v)", *p) +} + +// Attributes: +// - Success +type AdminClientQueryManualCompactResult struct { + Success *QueryAppManualCompactResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewAdminClientQueryManualCompactResult() *AdminClientQueryManualCompactResult { + return &AdminClientQueryManualCompactResult{} +} + +var AdminClientQueryManualCompactResult_Success_DEFAULT *QueryAppManualCompactResponse + +func (p *AdminClientQueryManualCompactResult) GetSuccess() *QueryAppManualCompactResponse { + if !p.IsSetSuccess() { + return AdminClientQueryManualCompactResult_Success_DEFAULT + } + return p.Success +} +func (p *AdminClientQueryManualCompactResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *AdminClientQueryManualCompactResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AdminClientQueryManualCompactResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &QueryAppManualCompactResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *AdminClientQueryManualCompactResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_manual_compact_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AdminClientQueryManualCompactResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *AdminClientQueryManualCompactResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AdminClientQueryManualCompactResult(%+v)", *p) +} diff --git a/go-client/idl/admin/metadata-consts.go b/go-client/idl/admin/metadata-consts.go new file mode 100644 index 0000000000..757b943ef3 --- /dev/null +++ b/go-client/idl/admin/metadata-consts.go @@ -0,0 +1,27 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/admin/metadata.go b/go-client/idl/admin/metadata.go new file mode 100644 index 0000000000..4a3c5592c5 --- /dev/null +++ b/go-client/idl/admin/metadata.go @@ -0,0 +1,1373 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +type PartitionStatus int64 + +const ( + PartitionStatus_PS_INVALID PartitionStatus = 0 + PartitionStatus_PS_INACTIVE PartitionStatus = 1 + PartitionStatus_PS_ERROR PartitionStatus = 2 + PartitionStatus_PS_PRIMARY PartitionStatus = 3 + PartitionStatus_PS_SECONDARY PartitionStatus = 4 + PartitionStatus_PS_POTENTIAL_SECONDARY PartitionStatus = 5 + PartitionStatus_PS_PARTITION_SPLIT PartitionStatus = 6 +) + +func (p PartitionStatus) String() string { + switch p { + case PartitionStatus_PS_INVALID: + return "PS_INVALID" + case PartitionStatus_PS_INACTIVE: + return "PS_INACTIVE" + case PartitionStatus_PS_ERROR: + return "PS_ERROR" + case PartitionStatus_PS_PRIMARY: + return "PS_PRIMARY" + case PartitionStatus_PS_SECONDARY: + return "PS_SECONDARY" + case PartitionStatus_PS_POTENTIAL_SECONDARY: + return "PS_POTENTIAL_SECONDARY" + case PartitionStatus_PS_PARTITION_SPLIT: + return "PS_PARTITION_SPLIT" + } + return "" +} + +func PartitionStatusFromString(s string) (PartitionStatus, error) { + switch s { + case "PS_INVALID": + return PartitionStatus_PS_INVALID, nil + case "PS_INACTIVE": + return PartitionStatus_PS_INACTIVE, nil + case "PS_ERROR": + return PartitionStatus_PS_ERROR, nil + case "PS_PRIMARY": + return PartitionStatus_PS_PRIMARY, nil + case "PS_SECONDARY": + return PartitionStatus_PS_SECONDARY, nil + case "PS_POTENTIAL_SECONDARY": + return PartitionStatus_PS_POTENTIAL_SECONDARY, nil + case "PS_PARTITION_SPLIT": + return PartitionStatus_PS_PARTITION_SPLIT, nil + } + return PartitionStatus(0), fmt.Errorf("not a valid PartitionStatus string") +} + +func PartitionStatusPtr(v PartitionStatus) *PartitionStatus { return &v } + +func (p PartitionStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *PartitionStatus) UnmarshalText(text []byte) error { + q, err := PartitionStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *PartitionStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = PartitionStatus(v) + return nil +} + +func (p *PartitionStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type SplitStatus int64 + +const ( + SplitStatus_NOT_SPLIT SplitStatus = 0 + SplitStatus_SPLITTING SplitStatus = 1 + SplitStatus_PAUSING SplitStatus = 2 + SplitStatus_PAUSED SplitStatus = 3 + SplitStatus_CANCELING SplitStatus = 4 +) + +func (p SplitStatus) String() string { + switch p { + case SplitStatus_NOT_SPLIT: + return "NOT_SPLIT" + case SplitStatus_SPLITTING: + return "SPLITTING" + case SplitStatus_PAUSING: + return "PAUSING" + case SplitStatus_PAUSED: + return "PAUSED" + case SplitStatus_CANCELING: + return "CANCELING" + } + return "" +} + +func SplitStatusFromString(s string) (SplitStatus, error) { + switch s { + case "NOT_SPLIT": + return SplitStatus_NOT_SPLIT, nil + case "SPLITTING": + return SplitStatus_SPLITTING, nil + case "PAUSING": + return SplitStatus_PAUSING, nil + case "PAUSED": + return SplitStatus_PAUSED, nil + case "CANCELING": + return SplitStatus_CANCELING, nil + } + return SplitStatus(0), fmt.Errorf("not a valid SplitStatus string") +} + +func SplitStatusPtr(v SplitStatus) *SplitStatus { return &v } + +func (p SplitStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *SplitStatus) UnmarshalText(text []byte) error { + q, err := SplitStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *SplitStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = SplitStatus(v) + return nil +} + +func (p *SplitStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type DiskStatus int64 + +const ( + DiskStatus_NORMAL DiskStatus = 0 + DiskStatus_SPACE_INSUFFICIENT DiskStatus = 1 + DiskStatus_IO_ERROR DiskStatus = 2 +) + +func (p DiskStatus) String() string { + switch p { + case DiskStatus_NORMAL: + return "NORMAL" + case DiskStatus_SPACE_INSUFFICIENT: + return "SPACE_INSUFFICIENT" + case DiskStatus_IO_ERROR: + return "IO_ERROR" + } + return "" +} + +func DiskStatusFromString(s string) (DiskStatus, error) { + switch s { + case "NORMAL": + return DiskStatus_NORMAL, nil + case "SPACE_INSUFFICIENT": + return DiskStatus_SPACE_INSUFFICIENT, nil + case "IO_ERROR": + return DiskStatus_IO_ERROR, nil + } + return DiskStatus(0), fmt.Errorf("not a valid DiskStatus string") +} + +func DiskStatusPtr(v DiskStatus) *DiskStatus { return &v } + +func (p DiskStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *DiskStatus) UnmarshalText(text []byte) error { + q, err := DiskStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *DiskStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = DiskStatus(v) + return nil +} + +func (p *DiskStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type ManualCompactionStatus int64 + +const ( + ManualCompactionStatus_IDLE ManualCompactionStatus = 0 + ManualCompactionStatus_QUEUING ManualCompactionStatus = 1 + ManualCompactionStatus_RUNNING ManualCompactionStatus = 2 + ManualCompactionStatus_FINISHED ManualCompactionStatus = 3 +) + +func (p ManualCompactionStatus) String() string { + switch p { + case ManualCompactionStatus_IDLE: + return "IDLE" + case ManualCompactionStatus_QUEUING: + return "QUEUING" + case ManualCompactionStatus_RUNNING: + return "RUNNING" + case ManualCompactionStatus_FINISHED: + return "FINISHED" + } + return "" +} + +func ManualCompactionStatusFromString(s string) (ManualCompactionStatus, error) { + switch s { + case "IDLE": + return ManualCompactionStatus_IDLE, nil + case "QUEUING": + return ManualCompactionStatus_QUEUING, nil + case "RUNNING": + return ManualCompactionStatus_RUNNING, nil + case "FINISHED": + return ManualCompactionStatus_FINISHED, nil + } + return ManualCompactionStatus(0), fmt.Errorf("not a valid ManualCompactionStatus string") +} + +func ManualCompactionStatusPtr(v ManualCompactionStatus) *ManualCompactionStatus { return &v } + +func (p ManualCompactionStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *ManualCompactionStatus) UnmarshalText(text []byte) error { + q, err := ManualCompactionStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *ManualCompactionStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = ManualCompactionStatus(v) + return nil +} + +func (p *ManualCompactionStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - Name +// - Size +// - Md5 +type FileMeta struct { + Name string `thrift:"name,1" db:"name" json:"name"` + Size int64 `thrift:"size,2" db:"size" json:"size"` + Md5 string `thrift:"md5,3" db:"md5" json:"md5"` +} + +func NewFileMeta() *FileMeta { + return &FileMeta{} +} + +func (p *FileMeta) GetName() string { + return p.Name +} + +func (p *FileMeta) GetSize() int64 { + return p.Size +} + +func (p *FileMeta) GetMd5() string { + return p.Md5 +} +func (p *FileMeta) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *FileMeta) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Name = v + } + return nil +} + +func (p *FileMeta) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Size = v + } + return nil +} + +func (p *FileMeta) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Md5 = v + } + return nil +} + +func (p *FileMeta) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("file_meta"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *FileMeta) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:name: ", p), err) + } + if err := oprot.WriteString(string(p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:name: ", p), err) + } + return err +} + +func (p *FileMeta) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("size", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:size: ", p), err) + } + if err := oprot.WriteI64(int64(p.Size)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.size (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:size: ", p), err) + } + return err +} + +func (p *FileMeta) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("md5", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:md5: ", p), err) + } + if err := oprot.WriteString(string(p.Md5)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.md5 (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:md5: ", p), err) + } + return err +} + +func (p *FileMeta) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("FileMeta(%+v)", *p) +} + +// Attributes: +// - Pid +// - Ballot +// - Primary +// - Status +// - LearnerSignature +// - PopAll +// - SplitSyncToChild +// - HpPrimary +type ReplicaConfiguration struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + Ballot int64 `thrift:"ballot,2" db:"ballot" json:"ballot"` + Primary *base.RPCAddress `thrift:"primary,3" db:"primary" json:"primary"` + Status PartitionStatus `thrift:"status,4" db:"status" json:"status"` + LearnerSignature int64 `thrift:"learner_signature,5" db:"learner_signature" json:"learner_signature"` + PopAll bool `thrift:"pop_all,6" db:"pop_all" json:"pop_all"` + SplitSyncToChild bool `thrift:"split_sync_to_child,7" db:"split_sync_to_child" json:"split_sync_to_child"` + HpPrimary *base.HostPort `thrift:"hp_primary,8" db:"hp_primary" json:"hp_primary,omitempty"` +} + +func NewReplicaConfiguration() *ReplicaConfiguration { + return &ReplicaConfiguration{ + Status: 0, + } +} + +var ReplicaConfiguration_Pid_DEFAULT *base.Gpid + +func (p *ReplicaConfiguration) GetPid() *base.Gpid { + if !p.IsSetPid() { + return ReplicaConfiguration_Pid_DEFAULT + } + return p.Pid +} + +func (p *ReplicaConfiguration) GetBallot() int64 { + return p.Ballot +} + +var ReplicaConfiguration_Primary_DEFAULT *base.RPCAddress + +func (p *ReplicaConfiguration) GetPrimary() *base.RPCAddress { + if !p.IsSetPrimary() { + return ReplicaConfiguration_Primary_DEFAULT + } + return p.Primary +} + +func (p *ReplicaConfiguration) GetStatus() PartitionStatus { + return p.Status +} + +func (p *ReplicaConfiguration) GetLearnerSignature() int64 { + return p.LearnerSignature +} + +var ReplicaConfiguration_PopAll_DEFAULT bool = false + +func (p *ReplicaConfiguration) GetPopAll() bool { + return p.PopAll +} + +var ReplicaConfiguration_SplitSyncToChild_DEFAULT bool = false + +func (p *ReplicaConfiguration) GetSplitSyncToChild() bool { + return p.SplitSyncToChild +} + +var ReplicaConfiguration_HpPrimary_DEFAULT *base.HostPort + +func (p *ReplicaConfiguration) GetHpPrimary() *base.HostPort { + if !p.IsSetHpPrimary() { + return ReplicaConfiguration_HpPrimary_DEFAULT + } + return p.HpPrimary +} +func (p *ReplicaConfiguration) IsSetPid() bool { + return p.Pid != nil +} + +func (p *ReplicaConfiguration) IsSetPrimary() bool { + return p.Primary != nil +} + +func (p *ReplicaConfiguration) IsSetPopAll() bool { + return p.PopAll != ReplicaConfiguration_PopAll_DEFAULT +} + +func (p *ReplicaConfiguration) IsSetSplitSyncToChild() bool { + return p.SplitSyncToChild != ReplicaConfiguration_SplitSyncToChild_DEFAULT +} + +func (p *ReplicaConfiguration) IsSetHpPrimary() bool { + return p.HpPrimary != nil +} + +func (p *ReplicaConfiguration) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaConfiguration) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *ReplicaConfiguration) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Ballot = v + } + return nil +} + +func (p *ReplicaConfiguration) ReadField3(iprot thrift.TProtocol) error { + p.Primary = &base.RPCAddress{} + if err := p.Primary.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Primary), err) + } + return nil +} + +func (p *ReplicaConfiguration) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + temp := PartitionStatus(v) + p.Status = temp + } + return nil +} + +func (p *ReplicaConfiguration) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.LearnerSignature = v + } + return nil +} + +func (p *ReplicaConfiguration) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.PopAll = v + } + return nil +} + +func (p *ReplicaConfiguration) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.SplitSyncToChild = v + } + return nil +} + +func (p *ReplicaConfiguration) ReadField8(iprot thrift.TProtocol) error { + p.HpPrimary = &base.HostPort{} + if err := p.HpPrimary.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpPrimary), err) + } + return nil +} + +func (p *ReplicaConfiguration) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("replica_configuration"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaConfiguration) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *ReplicaConfiguration) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ballot", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:ballot: ", p), err) + } + if err := oprot.WriteI64(int64(p.Ballot)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ballot (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:ballot: ", p), err) + } + return err +} + +func (p *ReplicaConfiguration) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("primary", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:primary: ", p), err) + } + if err := p.Primary.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Primary), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:primary: ", p), err) + } + return err +} + +func (p *ReplicaConfiguration) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:status: ", p), err) + } + if err := oprot.WriteI32(int32(p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:status: ", p), err) + } + return err +} + +func (p *ReplicaConfiguration) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("learner_signature", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:learner_signature: ", p), err) + } + if err := oprot.WriteI64(int64(p.LearnerSignature)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.learner_signature (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:learner_signature: ", p), err) + } + return err +} + +func (p *ReplicaConfiguration) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetPopAll() { + if err := oprot.WriteFieldBegin("pop_all", thrift.BOOL, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:pop_all: ", p), err) + } + if err := oprot.WriteBool(bool(p.PopAll)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.pop_all (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:pop_all: ", p), err) + } + } + return err +} + +func (p *ReplicaConfiguration) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetSplitSyncToChild() { + if err := oprot.WriteFieldBegin("split_sync_to_child", thrift.BOOL, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:split_sync_to_child: ", p), err) + } + if err := oprot.WriteBool(bool(p.SplitSyncToChild)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.split_sync_to_child (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:split_sync_to_child: ", p), err) + } + } + return err +} + +func (p *ReplicaConfiguration) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetHpPrimary() { + if err := oprot.WriteFieldBegin("hp_primary", thrift.STRUCT, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:hp_primary: ", p), err) + } + if err := p.HpPrimary.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpPrimary), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:hp_primary: ", p), err) + } + } + return err +} + +func (p *ReplicaConfiguration) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaConfiguration(%+v)", *p) +} + +// Attributes: +// - Pid +// - Ballot +// - Status +// - LastCommittedDecree +// - LastPreparedDecree +// - LastDurableDecree +// - AppType +// - DiskTag +// - ManualCompactStatus +type ReplicaInfo struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + Ballot int64 `thrift:"ballot,2" db:"ballot" json:"ballot"` + Status PartitionStatus `thrift:"status,3" db:"status" json:"status"` + LastCommittedDecree int64 `thrift:"last_committed_decree,4" db:"last_committed_decree" json:"last_committed_decree"` + LastPreparedDecree int64 `thrift:"last_prepared_decree,5" db:"last_prepared_decree" json:"last_prepared_decree"` + LastDurableDecree int64 `thrift:"last_durable_decree,6" db:"last_durable_decree" json:"last_durable_decree"` + AppType string `thrift:"app_type,7" db:"app_type" json:"app_type"` + DiskTag string `thrift:"disk_tag,8" db:"disk_tag" json:"disk_tag"` + ManualCompactStatus *ManualCompactionStatus `thrift:"manual_compact_status,9" db:"manual_compact_status" json:"manual_compact_status,omitempty"` +} + +func NewReplicaInfo() *ReplicaInfo { + return &ReplicaInfo{} +} + +var ReplicaInfo_Pid_DEFAULT *base.Gpid + +func (p *ReplicaInfo) GetPid() *base.Gpid { + if !p.IsSetPid() { + return ReplicaInfo_Pid_DEFAULT + } + return p.Pid +} + +func (p *ReplicaInfo) GetBallot() int64 { + return p.Ballot +} + +func (p *ReplicaInfo) GetStatus() PartitionStatus { + return p.Status +} + +func (p *ReplicaInfo) GetLastCommittedDecree() int64 { + return p.LastCommittedDecree +} + +func (p *ReplicaInfo) GetLastPreparedDecree() int64 { + return p.LastPreparedDecree +} + +func (p *ReplicaInfo) GetLastDurableDecree() int64 { + return p.LastDurableDecree +} + +func (p *ReplicaInfo) GetAppType() string { + return p.AppType +} + +func (p *ReplicaInfo) GetDiskTag() string { + return p.DiskTag +} + +var ReplicaInfo_ManualCompactStatus_DEFAULT ManualCompactionStatus + +func (p *ReplicaInfo) GetManualCompactStatus() ManualCompactionStatus { + if !p.IsSetManualCompactStatus() { + return ReplicaInfo_ManualCompactStatus_DEFAULT + } + return *p.ManualCompactStatus +} +func (p *ReplicaInfo) IsSetPid() bool { + return p.Pid != nil +} + +func (p *ReplicaInfo) IsSetManualCompactStatus() bool { + return p.ManualCompactStatus != nil +} + +func (p *ReplicaInfo) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I64 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRING { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRING { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.I32 { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaInfo) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *ReplicaInfo) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Ballot = v + } + return nil +} + +func (p *ReplicaInfo) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := PartitionStatus(v) + p.Status = temp + } + return nil +} + +func (p *ReplicaInfo) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.LastCommittedDecree = v + } + return nil +} + +func (p *ReplicaInfo) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.LastPreparedDecree = v + } + return nil +} + +func (p *ReplicaInfo) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.LastDurableDecree = v + } + return nil +} + +func (p *ReplicaInfo) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.AppType = v + } + return nil +} + +func (p *ReplicaInfo) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.DiskTag = v + } + return nil +} + +func (p *ReplicaInfo) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + temp := ManualCompactionStatus(v) + p.ManualCompactStatus = &temp + } + return nil +} + +func (p *ReplicaInfo) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("replica_info"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *ReplicaInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ballot", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:ballot: ", p), err) + } + if err := oprot.WriteI64(int64(p.Ballot)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ballot (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:ballot: ", p), err) + } + return err +} + +func (p *ReplicaInfo) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:status: ", p), err) + } + if err := oprot.WriteI32(int32(p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:status: ", p), err) + } + return err +} + +func (p *ReplicaInfo) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("last_committed_decree", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:last_committed_decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.LastCommittedDecree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.last_committed_decree (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:last_committed_decree: ", p), err) + } + return err +} + +func (p *ReplicaInfo) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("last_prepared_decree", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:last_prepared_decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.LastPreparedDecree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.last_prepared_decree (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:last_prepared_decree: ", p), err) + } + return err +} + +func (p *ReplicaInfo) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("last_durable_decree", thrift.I64, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:last_durable_decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.LastDurableDecree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.last_durable_decree (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:last_durable_decree: ", p), err) + } + return err +} + +func (p *ReplicaInfo) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_type", thrift.STRING, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:app_type: ", p), err) + } + if err := oprot.WriteString(string(p.AppType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_type (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:app_type: ", p), err) + } + return err +} + +func (p *ReplicaInfo) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("disk_tag", thrift.STRING, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:disk_tag: ", p), err) + } + if err := oprot.WriteString(string(p.DiskTag)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.disk_tag (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:disk_tag: ", p), err) + } + return err +} + +func (p *ReplicaInfo) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetManualCompactStatus() { + if err := oprot.WriteFieldBegin("manual_compact_status", thrift.I32, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:manual_compact_status: ", p), err) + } + if err := oprot.WriteI32(int32(*p.ManualCompactStatus)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.manual_compact_status (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:manual_compact_status: ", p), err) + } + } + return err +} + +func (p *ReplicaInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaInfo(%+v)", *p) +} diff --git a/go-client/idl/admin/partition_split-consts.go b/go-client/idl/admin/partition_split-consts.go new file mode 100644 index 0000000000..757b943ef3 --- /dev/null +++ b/go-client/idl/admin/partition_split-consts.go @@ -0,0 +1,27 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/admin/partition_split.go b/go-client/idl/admin/partition_split.go new file mode 100644 index 0000000000..45cc544d62 --- /dev/null +++ b/go-client/idl/admin/partition_split.go @@ -0,0 +1,3245 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package admin + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ + +type SplitControlType int64 + +const ( + SplitControlType_PAUSE SplitControlType = 0 + SplitControlType_RESTART SplitControlType = 1 + SplitControlType_CANCEL SplitControlType = 2 +) + +func (p SplitControlType) String() string { + switch p { + case SplitControlType_PAUSE: + return "PAUSE" + case SplitControlType_RESTART: + return "RESTART" + case SplitControlType_CANCEL: + return "CANCEL" + } + return "" +} + +func SplitControlTypeFromString(s string) (SplitControlType, error) { + switch s { + case "PAUSE": + return SplitControlType_PAUSE, nil + case "RESTART": + return SplitControlType_RESTART, nil + case "CANCEL": + return SplitControlType_CANCEL, nil + } + return SplitControlType(0), fmt.Errorf("not a valid SplitControlType string") +} + +func SplitControlTypePtr(v SplitControlType) *SplitControlType { return &v } + +func (p SplitControlType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *SplitControlType) UnmarshalText(text []byte) error { + q, err := SplitControlTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *SplitControlType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = SplitControlType(v) + return nil +} + +func (p *SplitControlType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - AppName +// - NewPartitionCount_ +type StartPartitionSplitRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + NewPartitionCount_ int32 `thrift:"new_partition_count,2" db:"new_partition_count" json:"new_partition_count"` +} + +func NewStartPartitionSplitRequest() *StartPartitionSplitRequest { + return &StartPartitionSplitRequest{} +} + +func (p *StartPartitionSplitRequest) GetAppName() string { + return p.AppName +} + +func (p *StartPartitionSplitRequest) GetNewPartitionCount_() int32 { + return p.NewPartitionCount_ +} +func (p *StartPartitionSplitRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *StartPartitionSplitRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *StartPartitionSplitRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.NewPartitionCount_ = v + } + return nil +} + +func (p *StartPartitionSplitRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_partition_split_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *StartPartitionSplitRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *StartPartitionSplitRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("new_partition_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:new_partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.NewPartitionCount_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.new_partition_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:new_partition_count: ", p), err) + } + return err +} + +func (p *StartPartitionSplitRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("StartPartitionSplitRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMsg +type StartPartitionSplitResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMsg string `thrift:"hint_msg,2" db:"hint_msg" json:"hint_msg"` +} + +func NewStartPartitionSplitResponse() *StartPartitionSplitResponse { + return &StartPartitionSplitResponse{} +} + +var StartPartitionSplitResponse_Err_DEFAULT *base.ErrorCode + +func (p *StartPartitionSplitResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return StartPartitionSplitResponse_Err_DEFAULT + } + return p.Err +} + +func (p *StartPartitionSplitResponse) GetHintMsg() string { + return p.HintMsg +} +func (p *StartPartitionSplitResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *StartPartitionSplitResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *StartPartitionSplitResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *StartPartitionSplitResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMsg = v + } + return nil +} + +func (p *StartPartitionSplitResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("start_partition_split_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *StartPartitionSplitResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *StartPartitionSplitResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_msg: ", p), err) + } + return err +} + +func (p *StartPartitionSplitResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("StartPartitionSplitResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - ControlType +// - ParentPidx +// - OldPartitionCount +type ControlSplitRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + ControlType SplitControlType `thrift:"control_type,2" db:"control_type" json:"control_type"` + ParentPidx int32 `thrift:"parent_pidx,3" db:"parent_pidx" json:"parent_pidx"` + OldPartitionCount *int32 `thrift:"old_partition_count,4" db:"old_partition_count" json:"old_partition_count,omitempty"` +} + +func NewControlSplitRequest() *ControlSplitRequest { + return &ControlSplitRequest{} +} + +func (p *ControlSplitRequest) GetAppName() string { + return p.AppName +} + +func (p *ControlSplitRequest) GetControlType() SplitControlType { + return p.ControlType +} + +func (p *ControlSplitRequest) GetParentPidx() int32 { + return p.ParentPidx +} + +var ControlSplitRequest_OldPartitionCount_DEFAULT int32 + +func (p *ControlSplitRequest) GetOldPartitionCount() int32 { + if !p.IsSetOldPartitionCount() { + return ControlSplitRequest_OldPartitionCount_DEFAULT + } + return *p.OldPartitionCount +} +func (p *ControlSplitRequest) IsSetOldPartitionCount() bool { + return p.OldPartitionCount != nil +} + +func (p *ControlSplitRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ControlSplitRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *ControlSplitRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + temp := SplitControlType(v) + p.ControlType = temp + } + return nil +} + +func (p *ControlSplitRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ParentPidx = v + } + return nil +} + +func (p *ControlSplitRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.OldPartitionCount = &v + } + return nil +} + +func (p *ControlSplitRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("control_split_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ControlSplitRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *ControlSplitRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("control_type", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:control_type: ", p), err) + } + if err := oprot.WriteI32(int32(p.ControlType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.control_type (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:control_type: ", p), err) + } + return err +} + +func (p *ControlSplitRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("parent_pidx", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:parent_pidx: ", p), err) + } + if err := oprot.WriteI32(int32(p.ParentPidx)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parent_pidx (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:parent_pidx: ", p), err) + } + return err +} + +func (p *ControlSplitRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetOldPartitionCount() { + if err := oprot.WriteFieldBegin("old_partition_count", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:old_partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(*p.OldPartitionCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.old_partition_count (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:old_partition_count: ", p), err) + } + } + return err +} + +func (p *ControlSplitRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ControlSplitRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - HintMsg +type ControlSplitResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + HintMsg *string `thrift:"hint_msg,2" db:"hint_msg" json:"hint_msg,omitempty"` +} + +func NewControlSplitResponse() *ControlSplitResponse { + return &ControlSplitResponse{} +} + +var ControlSplitResponse_Err_DEFAULT *base.ErrorCode + +func (p *ControlSplitResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ControlSplitResponse_Err_DEFAULT + } + return p.Err +} + +var ControlSplitResponse_HintMsg_DEFAULT string + +func (p *ControlSplitResponse) GetHintMsg() string { + if !p.IsSetHintMsg() { + return ControlSplitResponse_HintMsg_DEFAULT + } + return *p.HintMsg +} +func (p *ControlSplitResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ControlSplitResponse) IsSetHintMsg() bool { + return p.HintMsg != nil +} + +func (p *ControlSplitResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ControlSplitResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ControlSplitResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.HintMsg = &v + } + return nil +} + +func (p *ControlSplitResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("control_split_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ControlSplitResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ControlSplitResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetHintMsg() { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(*p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint_msg: ", p), err) + } + } + return err +} + +func (p *ControlSplitResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ControlSplitResponse(%+v)", *p) +} + +// Attributes: +// - AppName +type QuerySplitRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` +} + +func NewQuerySplitRequest() *QuerySplitRequest { + return &QuerySplitRequest{} +} + +func (p *QuerySplitRequest) GetAppName() string { + return p.AppName +} +func (p *QuerySplitRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QuerySplitRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *QuerySplitRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_split_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QuerySplitRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *QuerySplitRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QuerySplitRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - NewPartitionCount_ +// - Status +// - HintMsg +type QuerySplitResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + NewPartitionCount_ int32 `thrift:"new_partition_count,2" db:"new_partition_count" json:"new_partition_count"` + Status map[int32]SplitStatus `thrift:"status,3" db:"status" json:"status"` + HintMsg *string `thrift:"hint_msg,4" db:"hint_msg" json:"hint_msg,omitempty"` +} + +func NewQuerySplitResponse() *QuerySplitResponse { + return &QuerySplitResponse{} +} + +var QuerySplitResponse_Err_DEFAULT *base.ErrorCode + +func (p *QuerySplitResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QuerySplitResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QuerySplitResponse) GetNewPartitionCount_() int32 { + return p.NewPartitionCount_ +} + +func (p *QuerySplitResponse) GetStatus() map[int32]SplitStatus { + return p.Status +} + +var QuerySplitResponse_HintMsg_DEFAULT string + +func (p *QuerySplitResponse) GetHintMsg() string { + if !p.IsSetHintMsg() { + return QuerySplitResponse_HintMsg_DEFAULT + } + return *p.HintMsg +} +func (p *QuerySplitResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QuerySplitResponse) IsSetHintMsg() bool { + return p.HintMsg != nil +} + +func (p *QuerySplitResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.MAP { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QuerySplitResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QuerySplitResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.NewPartitionCount_ = v + } + return nil +} + +func (p *QuerySplitResponse) ReadField3(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[int32]SplitStatus, size) + p.Status = tMap + for i := 0; i < size; i++ { + var _key0 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key0 = v + } + var _val1 SplitStatus + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + temp := SplitStatus(v) + _val1 = temp + } + p.Status[_key0] = _val1 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *QuerySplitResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.HintMsg = &v + } + return nil +} + +func (p *QuerySplitResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_split_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QuerySplitResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QuerySplitResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("new_partition_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:new_partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.NewPartitionCount_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.new_partition_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:new_partition_count: ", p), err) + } + return err +} + +func (p *QuerySplitResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.MAP, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:status: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.I32, len(p.Status)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.Status { + if err := oprot.WriteI32(int32(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:status: ", p), err) + } + return err +} + +func (p *QuerySplitResponse) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetHintMsg() { + if err := oprot.WriteFieldBegin("hint_msg", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:hint_msg: ", p), err) + } + if err := oprot.WriteString(string(*p.HintMsg)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint_msg (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:hint_msg: ", p), err) + } + } + return err +} + +func (p *QuerySplitResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QuerySplitResponse(%+v)", *p) +} + +// Attributes: +// - ParentGpid +// - ChildGpid +// - ChildBallot +// - Child +// - HpChild +type NotifyCatchUpRequest struct { + ParentGpid *base.Gpid `thrift:"parent_gpid,1" db:"parent_gpid" json:"parent_gpid"` + ChildGpid *base.Gpid `thrift:"child_gpid,2" db:"child_gpid" json:"child_gpid"` + ChildBallot int64 `thrift:"child_ballot,3" db:"child_ballot" json:"child_ballot"` + Child *base.RPCAddress `thrift:"child,4" db:"child" json:"child"` + HpChild *base.HostPort `thrift:"hp_child,5" db:"hp_child" json:"hp_child,omitempty"` +} + +func NewNotifyCatchUpRequest() *NotifyCatchUpRequest { + return &NotifyCatchUpRequest{} +} + +var NotifyCatchUpRequest_ParentGpid_DEFAULT *base.Gpid + +func (p *NotifyCatchUpRequest) GetParentGpid() *base.Gpid { + if !p.IsSetParentGpid() { + return NotifyCatchUpRequest_ParentGpid_DEFAULT + } + return p.ParentGpid +} + +var NotifyCatchUpRequest_ChildGpid_DEFAULT *base.Gpid + +func (p *NotifyCatchUpRequest) GetChildGpid() *base.Gpid { + if !p.IsSetChildGpid() { + return NotifyCatchUpRequest_ChildGpid_DEFAULT + } + return p.ChildGpid +} + +func (p *NotifyCatchUpRequest) GetChildBallot() int64 { + return p.ChildBallot +} + +var NotifyCatchUpRequest_Child_DEFAULT *base.RPCAddress + +func (p *NotifyCatchUpRequest) GetChild() *base.RPCAddress { + if !p.IsSetChild() { + return NotifyCatchUpRequest_Child_DEFAULT + } + return p.Child +} + +var NotifyCatchUpRequest_HpChild_DEFAULT *base.HostPort + +func (p *NotifyCatchUpRequest) GetHpChild() *base.HostPort { + if !p.IsSetHpChild() { + return NotifyCatchUpRequest_HpChild_DEFAULT + } + return p.HpChild +} +func (p *NotifyCatchUpRequest) IsSetParentGpid() bool { + return p.ParentGpid != nil +} + +func (p *NotifyCatchUpRequest) IsSetChildGpid() bool { + return p.ChildGpid != nil +} + +func (p *NotifyCatchUpRequest) IsSetChild() bool { + return p.Child != nil +} + +func (p *NotifyCatchUpRequest) IsSetHpChild() bool { + return p.HpChild != nil +} + +func (p *NotifyCatchUpRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *NotifyCatchUpRequest) ReadField1(iprot thrift.TProtocol) error { + p.ParentGpid = &base.Gpid{} + if err := p.ParentGpid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ParentGpid), err) + } + return nil +} + +func (p *NotifyCatchUpRequest) ReadField2(iprot thrift.TProtocol) error { + p.ChildGpid = &base.Gpid{} + if err := p.ChildGpid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ChildGpid), err) + } + return nil +} + +func (p *NotifyCatchUpRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ChildBallot = v + } + return nil +} + +func (p *NotifyCatchUpRequest) ReadField4(iprot thrift.TProtocol) error { + p.Child = &base.RPCAddress{} + if err := p.Child.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Child), err) + } + return nil +} + +func (p *NotifyCatchUpRequest) ReadField5(iprot thrift.TProtocol) error { + p.HpChild = &base.HostPort{} + if err := p.HpChild.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpChild), err) + } + return nil +} + +func (p *NotifyCatchUpRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("notify_catch_up_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *NotifyCatchUpRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("parent_gpid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:parent_gpid: ", p), err) + } + if err := p.ParentGpid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ParentGpid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:parent_gpid: ", p), err) + } + return err +} + +func (p *NotifyCatchUpRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("child_gpid", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:child_gpid: ", p), err) + } + if err := p.ChildGpid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ChildGpid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:child_gpid: ", p), err) + } + return err +} + +func (p *NotifyCatchUpRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("child_ballot", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:child_ballot: ", p), err) + } + if err := oprot.WriteI64(int64(p.ChildBallot)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.child_ballot (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:child_ballot: ", p), err) + } + return err +} + +func (p *NotifyCatchUpRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("child", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:child: ", p), err) + } + if err := p.Child.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Child), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:child: ", p), err) + } + return err +} + +func (p *NotifyCatchUpRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetHpChild() { + if err := oprot.WriteFieldBegin("hp_child", thrift.STRUCT, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:hp_child: ", p), err) + } + if err := p.HpChild.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpChild), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:hp_child: ", p), err) + } + } + return err +} + +func (p *NotifyCatchUpRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("NotifyCatchUpRequest(%+v)", *p) +} + +// Attributes: +// - Err +type NotifyCacthUpResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` +} + +func NewNotifyCacthUpResponse() *NotifyCacthUpResponse { + return &NotifyCacthUpResponse{} +} + +var NotifyCacthUpResponse_Err_DEFAULT *base.ErrorCode + +func (p *NotifyCacthUpResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return NotifyCacthUpResponse_Err_DEFAULT + } + return p.Err +} +func (p *NotifyCacthUpResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *NotifyCacthUpResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *NotifyCacthUpResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *NotifyCacthUpResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("notify_cacth_up_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *NotifyCacthUpResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *NotifyCacthUpResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("NotifyCacthUpResponse(%+v)", *p) +} + +// Attributes: +// - Target +// - NewPartitionCount_ +// - ChildPid +// - Ballot +// - HpTarget +type UpdateChildGroupPartitionCountRequest struct { + Target *base.RPCAddress `thrift:"target,1" db:"target" json:"target"` + NewPartitionCount_ int32 `thrift:"new_partition_count,2" db:"new_partition_count" json:"new_partition_count"` + ChildPid *base.Gpid `thrift:"child_pid,3" db:"child_pid" json:"child_pid"` + Ballot int64 `thrift:"ballot,4" db:"ballot" json:"ballot"` + HpTarget *base.HostPort `thrift:"hp_target,5" db:"hp_target" json:"hp_target,omitempty"` +} + +func NewUpdateChildGroupPartitionCountRequest() *UpdateChildGroupPartitionCountRequest { + return &UpdateChildGroupPartitionCountRequest{} +} + +var UpdateChildGroupPartitionCountRequest_Target_DEFAULT *base.RPCAddress + +func (p *UpdateChildGroupPartitionCountRequest) GetTarget() *base.RPCAddress { + if !p.IsSetTarget() { + return UpdateChildGroupPartitionCountRequest_Target_DEFAULT + } + return p.Target +} + +func (p *UpdateChildGroupPartitionCountRequest) GetNewPartitionCount_() int32 { + return p.NewPartitionCount_ +} + +var UpdateChildGroupPartitionCountRequest_ChildPid_DEFAULT *base.Gpid + +func (p *UpdateChildGroupPartitionCountRequest) GetChildPid() *base.Gpid { + if !p.IsSetChildPid() { + return UpdateChildGroupPartitionCountRequest_ChildPid_DEFAULT + } + return p.ChildPid +} + +func (p *UpdateChildGroupPartitionCountRequest) GetBallot() int64 { + return p.Ballot +} + +var UpdateChildGroupPartitionCountRequest_HpTarget_DEFAULT *base.HostPort + +func (p *UpdateChildGroupPartitionCountRequest) GetHpTarget() *base.HostPort { + if !p.IsSetHpTarget() { + return UpdateChildGroupPartitionCountRequest_HpTarget_DEFAULT + } + return p.HpTarget +} +func (p *UpdateChildGroupPartitionCountRequest) IsSetTarget() bool { + return p.Target != nil +} + +func (p *UpdateChildGroupPartitionCountRequest) IsSetChildPid() bool { + return p.ChildPid != nil +} + +func (p *UpdateChildGroupPartitionCountRequest) IsSetHpTarget() bool { + return p.HpTarget != nil +} + +func (p *UpdateChildGroupPartitionCountRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *UpdateChildGroupPartitionCountRequest) ReadField1(iprot thrift.TProtocol) error { + p.Target = &base.RPCAddress{} + if err := p.Target.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Target), err) + } + return nil +} + +func (p *UpdateChildGroupPartitionCountRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.NewPartitionCount_ = v + } + return nil +} + +func (p *UpdateChildGroupPartitionCountRequest) ReadField3(iprot thrift.TProtocol) error { + p.ChildPid = &base.Gpid{} + if err := p.ChildPid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ChildPid), err) + } + return nil +} + +func (p *UpdateChildGroupPartitionCountRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Ballot = v + } + return nil +} + +func (p *UpdateChildGroupPartitionCountRequest) ReadField5(iprot thrift.TProtocol) error { + p.HpTarget = &base.HostPort{} + if err := p.HpTarget.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpTarget), err) + } + return nil +} + +func (p *UpdateChildGroupPartitionCountRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("update_child_group_partition_count_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *UpdateChildGroupPartitionCountRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("target", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:target: ", p), err) + } + if err := p.Target.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Target), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:target: ", p), err) + } + return err +} + +func (p *UpdateChildGroupPartitionCountRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("new_partition_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:new_partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.NewPartitionCount_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.new_partition_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:new_partition_count: ", p), err) + } + return err +} + +func (p *UpdateChildGroupPartitionCountRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("child_pid", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:child_pid: ", p), err) + } + if err := p.ChildPid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ChildPid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:child_pid: ", p), err) + } + return err +} + +func (p *UpdateChildGroupPartitionCountRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ballot", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ballot: ", p), err) + } + if err := oprot.WriteI64(int64(p.Ballot)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ballot (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ballot: ", p), err) + } + return err +} + +func (p *UpdateChildGroupPartitionCountRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetHpTarget() { + if err := oprot.WriteFieldBegin("hp_target", thrift.STRUCT, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:hp_target: ", p), err) + } + if err := p.HpTarget.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpTarget), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:hp_target: ", p), err) + } + } + return err +} + +func (p *UpdateChildGroupPartitionCountRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("UpdateChildGroupPartitionCountRequest(%+v)", *p) +} + +// Attributes: +// - Err +type UpdateChildGroupPartitionCountResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` +} + +func NewUpdateChildGroupPartitionCountResponse() *UpdateChildGroupPartitionCountResponse { + return &UpdateChildGroupPartitionCountResponse{} +} + +var UpdateChildGroupPartitionCountResponse_Err_DEFAULT *base.ErrorCode + +func (p *UpdateChildGroupPartitionCountResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return UpdateChildGroupPartitionCountResponse_Err_DEFAULT + } + return p.Err +} +func (p *UpdateChildGroupPartitionCountResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *UpdateChildGroupPartitionCountResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *UpdateChildGroupPartitionCountResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *UpdateChildGroupPartitionCountResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("update_child_group_partition_count_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *UpdateChildGroupPartitionCountResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *UpdateChildGroupPartitionCountResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("UpdateChildGroupPartitionCountResponse(%+v)", *p) +} + +// Attributes: +// - App +// - ParentConfig +// - ChildConfig +// - Primary +// - HpPrimary +type RegisterChildRequest struct { + App *replication.AppInfo `thrift:"app,1" db:"app" json:"app"` + ParentConfig *replication.PartitionConfiguration `thrift:"parent_config,2" db:"parent_config" json:"parent_config"` + ChildConfig *replication.PartitionConfiguration `thrift:"child_config,3" db:"child_config" json:"child_config"` + Primary *base.RPCAddress `thrift:"primary,4" db:"primary" json:"primary"` + HpPrimary *base.HostPort `thrift:"hp_primary,5" db:"hp_primary" json:"hp_primary,omitempty"` +} + +func NewRegisterChildRequest() *RegisterChildRequest { + return &RegisterChildRequest{} +} + +var RegisterChildRequest_App_DEFAULT *replication.AppInfo + +func (p *RegisterChildRequest) GetApp() *replication.AppInfo { + if !p.IsSetApp() { + return RegisterChildRequest_App_DEFAULT + } + return p.App +} + +var RegisterChildRequest_ParentConfig_DEFAULT *replication.PartitionConfiguration + +func (p *RegisterChildRequest) GetParentConfig() *replication.PartitionConfiguration { + if !p.IsSetParentConfig() { + return RegisterChildRequest_ParentConfig_DEFAULT + } + return p.ParentConfig +} + +var RegisterChildRequest_ChildConfig_DEFAULT *replication.PartitionConfiguration + +func (p *RegisterChildRequest) GetChildConfig() *replication.PartitionConfiguration { + if !p.IsSetChildConfig() { + return RegisterChildRequest_ChildConfig_DEFAULT + } + return p.ChildConfig +} + +var RegisterChildRequest_Primary_DEFAULT *base.RPCAddress + +func (p *RegisterChildRequest) GetPrimary() *base.RPCAddress { + if !p.IsSetPrimary() { + return RegisterChildRequest_Primary_DEFAULT + } + return p.Primary +} + +var RegisterChildRequest_HpPrimary_DEFAULT *base.HostPort + +func (p *RegisterChildRequest) GetHpPrimary() *base.HostPort { + if !p.IsSetHpPrimary() { + return RegisterChildRequest_HpPrimary_DEFAULT + } + return p.HpPrimary +} +func (p *RegisterChildRequest) IsSetApp() bool { + return p.App != nil +} + +func (p *RegisterChildRequest) IsSetParentConfig() bool { + return p.ParentConfig != nil +} + +func (p *RegisterChildRequest) IsSetChildConfig() bool { + return p.ChildConfig != nil +} + +func (p *RegisterChildRequest) IsSetPrimary() bool { + return p.Primary != nil +} + +func (p *RegisterChildRequest) IsSetHpPrimary() bool { + return p.HpPrimary != nil +} + +func (p *RegisterChildRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RegisterChildRequest) ReadField1(iprot thrift.TProtocol) error { + p.App = &replication.AppInfo{ + Status: 0, + + InitPartitionCount: -1, + } + if err := p.App.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.App), err) + } + return nil +} + +func (p *RegisterChildRequest) ReadField2(iprot thrift.TProtocol) error { + p.ParentConfig = &replication.PartitionConfiguration{} + if err := p.ParentConfig.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ParentConfig), err) + } + return nil +} + +func (p *RegisterChildRequest) ReadField3(iprot thrift.TProtocol) error { + p.ChildConfig = &replication.PartitionConfiguration{} + if err := p.ChildConfig.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ChildConfig), err) + } + return nil +} + +func (p *RegisterChildRequest) ReadField4(iprot thrift.TProtocol) error { + p.Primary = &base.RPCAddress{} + if err := p.Primary.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Primary), err) + } + return nil +} + +func (p *RegisterChildRequest) ReadField5(iprot thrift.TProtocol) error { + p.HpPrimary = &base.HostPort{} + if err := p.HpPrimary.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpPrimary), err) + } + return nil +} + +func (p *RegisterChildRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("register_child_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RegisterChildRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app: ", p), err) + } + if err := p.App.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.App), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app: ", p), err) + } + return err +} + +func (p *RegisterChildRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("parent_config", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:parent_config: ", p), err) + } + if err := p.ParentConfig.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ParentConfig), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:parent_config: ", p), err) + } + return err +} + +func (p *RegisterChildRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("child_config", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:child_config: ", p), err) + } + if err := p.ChildConfig.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ChildConfig), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:child_config: ", p), err) + } + return err +} + +func (p *RegisterChildRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("primary", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:primary: ", p), err) + } + if err := p.Primary.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Primary), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:primary: ", p), err) + } + return err +} + +func (p *RegisterChildRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetHpPrimary() { + if err := oprot.WriteFieldBegin("hp_primary", thrift.STRUCT, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:hp_primary: ", p), err) + } + if err := p.HpPrimary.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpPrimary), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:hp_primary: ", p), err) + } + } + return err +} + +func (p *RegisterChildRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RegisterChildRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - App +// - ParentConfig +// - ChildConfig +type RegisterChildResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + App *replication.AppInfo `thrift:"app,2" db:"app" json:"app"` + ParentConfig *replication.PartitionConfiguration `thrift:"parent_config,3" db:"parent_config" json:"parent_config"` + ChildConfig *replication.PartitionConfiguration `thrift:"child_config,4" db:"child_config" json:"child_config"` +} + +func NewRegisterChildResponse() *RegisterChildResponse { + return &RegisterChildResponse{} +} + +var RegisterChildResponse_Err_DEFAULT *base.ErrorCode + +func (p *RegisterChildResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return RegisterChildResponse_Err_DEFAULT + } + return p.Err +} + +var RegisterChildResponse_App_DEFAULT *replication.AppInfo + +func (p *RegisterChildResponse) GetApp() *replication.AppInfo { + if !p.IsSetApp() { + return RegisterChildResponse_App_DEFAULT + } + return p.App +} + +var RegisterChildResponse_ParentConfig_DEFAULT *replication.PartitionConfiguration + +func (p *RegisterChildResponse) GetParentConfig() *replication.PartitionConfiguration { + if !p.IsSetParentConfig() { + return RegisterChildResponse_ParentConfig_DEFAULT + } + return p.ParentConfig +} + +var RegisterChildResponse_ChildConfig_DEFAULT *replication.PartitionConfiguration + +func (p *RegisterChildResponse) GetChildConfig() *replication.PartitionConfiguration { + if !p.IsSetChildConfig() { + return RegisterChildResponse_ChildConfig_DEFAULT + } + return p.ChildConfig +} +func (p *RegisterChildResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *RegisterChildResponse) IsSetApp() bool { + return p.App != nil +} + +func (p *RegisterChildResponse) IsSetParentConfig() bool { + return p.ParentConfig != nil +} + +func (p *RegisterChildResponse) IsSetChildConfig() bool { + return p.ChildConfig != nil +} + +func (p *RegisterChildResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RegisterChildResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *RegisterChildResponse) ReadField2(iprot thrift.TProtocol) error { + p.App = &replication.AppInfo{ + Status: 0, + + InitPartitionCount: -1, + } + if err := p.App.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.App), err) + } + return nil +} + +func (p *RegisterChildResponse) ReadField3(iprot thrift.TProtocol) error { + p.ParentConfig = &replication.PartitionConfiguration{} + if err := p.ParentConfig.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ParentConfig), err) + } + return nil +} + +func (p *RegisterChildResponse) ReadField4(iprot thrift.TProtocol) error { + p.ChildConfig = &replication.PartitionConfiguration{} + if err := p.ChildConfig.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ChildConfig), err) + } + return nil +} + +func (p *RegisterChildResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("register_child_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RegisterChildResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *RegisterChildResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app: ", p), err) + } + if err := p.App.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.App), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app: ", p), err) + } + return err +} + +func (p *RegisterChildResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("parent_config", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:parent_config: ", p), err) + } + if err := p.ParentConfig.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ParentConfig), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:parent_config: ", p), err) + } + return err +} + +func (p *RegisterChildResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("child_config", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:child_config: ", p), err) + } + if err := p.ChildConfig.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ChildConfig), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:child_config: ", p), err) + } + return err +} + +func (p *RegisterChildResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RegisterChildResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - ParentGpid +// - MetaSplitStatus +// - PartitionCount +type NotifyStopSplitRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + ParentGpid *base.Gpid `thrift:"parent_gpid,2" db:"parent_gpid" json:"parent_gpid"` + MetaSplitStatus SplitStatus `thrift:"meta_split_status,3" db:"meta_split_status" json:"meta_split_status"` + PartitionCount int32 `thrift:"partition_count,4" db:"partition_count" json:"partition_count"` +} + +func NewNotifyStopSplitRequest() *NotifyStopSplitRequest { + return &NotifyStopSplitRequest{} +} + +func (p *NotifyStopSplitRequest) GetAppName() string { + return p.AppName +} + +var NotifyStopSplitRequest_ParentGpid_DEFAULT *base.Gpid + +func (p *NotifyStopSplitRequest) GetParentGpid() *base.Gpid { + if !p.IsSetParentGpid() { + return NotifyStopSplitRequest_ParentGpid_DEFAULT + } + return p.ParentGpid +} + +func (p *NotifyStopSplitRequest) GetMetaSplitStatus() SplitStatus { + return p.MetaSplitStatus +} + +func (p *NotifyStopSplitRequest) GetPartitionCount() int32 { + return p.PartitionCount +} +func (p *NotifyStopSplitRequest) IsSetParentGpid() bool { + return p.ParentGpid != nil +} + +func (p *NotifyStopSplitRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *NotifyStopSplitRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *NotifyStopSplitRequest) ReadField2(iprot thrift.TProtocol) error { + p.ParentGpid = &base.Gpid{} + if err := p.ParentGpid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ParentGpid), err) + } + return nil +} + +func (p *NotifyStopSplitRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := SplitStatus(v) + p.MetaSplitStatus = temp + } + return nil +} + +func (p *NotifyStopSplitRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionCount = v + } + return nil +} + +func (p *NotifyStopSplitRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("notify_stop_split_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *NotifyStopSplitRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *NotifyStopSplitRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("parent_gpid", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:parent_gpid: ", p), err) + } + if err := p.ParentGpid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ParentGpid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:parent_gpid: ", p), err) + } + return err +} + +func (p *NotifyStopSplitRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("meta_split_status", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:meta_split_status: ", p), err) + } + if err := oprot.WriteI32(int32(p.MetaSplitStatus)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.meta_split_status (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:meta_split_status: ", p), err) + } + return err +} + +func (p *NotifyStopSplitRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_count", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_count (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_count: ", p), err) + } + return err +} + +func (p *NotifyStopSplitRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("NotifyStopSplitRequest(%+v)", *p) +} + +// Attributes: +// - Err +type NotifyStopSplitResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` +} + +func NewNotifyStopSplitResponse() *NotifyStopSplitResponse { + return &NotifyStopSplitResponse{} +} + +var NotifyStopSplitResponse_Err_DEFAULT *base.ErrorCode + +func (p *NotifyStopSplitResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return NotifyStopSplitResponse_Err_DEFAULT + } + return p.Err +} +func (p *NotifyStopSplitResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *NotifyStopSplitResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *NotifyStopSplitResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *NotifyStopSplitResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("notify_stop_split_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *NotifyStopSplitResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *NotifyStopSplitResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("NotifyStopSplitResponse(%+v)", *p) +} + +// Attributes: +// - AppName +// - Pid +// - PartitionCount +type QueryChildStateRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + Pid *base.Gpid `thrift:"pid,2" db:"pid" json:"pid"` + PartitionCount int32 `thrift:"partition_count,3" db:"partition_count" json:"partition_count"` +} + +func NewQueryChildStateRequest() *QueryChildStateRequest { + return &QueryChildStateRequest{} +} + +func (p *QueryChildStateRequest) GetAppName() string { + return p.AppName +} + +var QueryChildStateRequest_Pid_DEFAULT *base.Gpid + +func (p *QueryChildStateRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return QueryChildStateRequest_Pid_DEFAULT + } + return p.Pid +} + +func (p *QueryChildStateRequest) GetPartitionCount() int32 { + return p.PartitionCount +} +func (p *QueryChildStateRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *QueryChildStateRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryChildStateRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *QueryChildStateRequest) ReadField2(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *QueryChildStateRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.PartitionCount = v + } + return nil +} + +func (p *QueryChildStateRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_child_state_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryChildStateRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *QueryChildStateRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:pid: ", p), err) + } + return err +} + +func (p *QueryChildStateRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_count", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_count (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:partition_count: ", p), err) + } + return err +} + +func (p *QueryChildStateRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryChildStateRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - PartitionCount +// - ChildConfig +type QueryChildStateResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + PartitionCount *int32 `thrift:"partition_count,2" db:"partition_count" json:"partition_count,omitempty"` + ChildConfig *replication.PartitionConfiguration `thrift:"child_config,3" db:"child_config" json:"child_config,omitempty"` +} + +func NewQueryChildStateResponse() *QueryChildStateResponse { + return &QueryChildStateResponse{} +} + +var QueryChildStateResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryChildStateResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryChildStateResponse_Err_DEFAULT + } + return p.Err +} + +var QueryChildStateResponse_PartitionCount_DEFAULT int32 + +func (p *QueryChildStateResponse) GetPartitionCount() int32 { + if !p.IsSetPartitionCount() { + return QueryChildStateResponse_PartitionCount_DEFAULT + } + return *p.PartitionCount +} + +var QueryChildStateResponse_ChildConfig_DEFAULT *replication.PartitionConfiguration + +func (p *QueryChildStateResponse) GetChildConfig() *replication.PartitionConfiguration { + if !p.IsSetChildConfig() { + return QueryChildStateResponse_ChildConfig_DEFAULT + } + return p.ChildConfig +} +func (p *QueryChildStateResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryChildStateResponse) IsSetPartitionCount() bool { + return p.PartitionCount != nil +} + +func (p *QueryChildStateResponse) IsSetChildConfig() bool { + return p.ChildConfig != nil +} + +func (p *QueryChildStateResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryChildStateResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryChildStateResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.PartitionCount = &v + } + return nil +} + +func (p *QueryChildStateResponse) ReadField3(iprot thrift.TProtocol) error { + p.ChildConfig = &replication.PartitionConfiguration{} + if err := p.ChildConfig.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ChildConfig), err) + } + return nil +} + +func (p *QueryChildStateResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_child_state_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryChildStateResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryChildStateResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetPartitionCount() { + if err := oprot.WriteFieldBegin("partition_count", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(*p.PartitionCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:partition_count: ", p), err) + } + } + return err +} + +func (p *QueryChildStateResponse) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetChildConfig() { + if err := oprot.WriteFieldBegin("child_config", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:child_config: ", p), err) + } + if err := p.ChildConfig.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ChildConfig), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:child_config: ", p), err) + } + } + return err +} + +func (p *QueryChildStateResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryChildStateResponse(%+v)", *p) +} diff --git a/go-client/idl/cmd/GoUnusedProtection__.go b/go-client/idl/cmd/GoUnusedProtection__.go new file mode 100644 index 0000000000..b15aabc4ad --- /dev/null +++ b/go-client/idl/cmd/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package cmd + +var GoUnusedProtection__ int diff --git a/go-client/idl/cmd/command-consts.go b/go-client/idl/cmd/command-consts.go new file mode 100644 index 0000000000..7dba9de517 --- /dev/null +++ b/go-client/idl/cmd/command-consts.go @@ -0,0 +1,22 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package cmd + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +func init() { +} diff --git a/go-client/idl/cmd/command.go b/go-client/idl/cmd/command.go new file mode 100644 index 0000000000..edeb8b1da4 --- /dev/null +++ b/go-client/idl/cmd/command.go @@ -0,0 +1,535 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package cmd + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +// Attributes: +// - Cmd +// - Arguments +type Command struct { + Cmd string `thrift:"cmd,1" db:"cmd" json:"cmd"` + Arguments []string `thrift:"arguments,2" db:"arguments" json:"arguments"` +} + +func NewCommand() *Command { + return &Command{} +} + +func (p *Command) GetCmd() string { + return p.Cmd +} + +func (p *Command) GetArguments() []string { + return p.Arguments +} +func (p *Command) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Command) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Cmd = v + } + return nil +} + +func (p *Command) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.Arguments = tSlice + for i := 0; i < size; i++ { + var _elem0 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem0 = v + } + p.Arguments = append(p.Arguments, _elem0) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Command) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("command"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Command) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("cmd", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:cmd: ", p), err) + } + if err := oprot.WriteString(string(p.Cmd)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.cmd (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:cmd: ", p), err) + } + return err +} + +func (p *Command) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("arguments", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:arguments: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.Arguments)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Arguments { + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:arguments: ", p), err) + } + return err +} + +func (p *Command) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Command(%+v)", *p) +} + +type RemoteCmdService interface { + // Parameters: + // - Cmd + CallCommand(ctx context.Context, cmd *Command) (r string, err error) +} + +type RemoteCmdServiceClient struct { + c thrift.TClient +} + +func NewRemoteCmdServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *RemoteCmdServiceClient { + return &RemoteCmdServiceClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewRemoteCmdServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *RemoteCmdServiceClient { + return &RemoteCmdServiceClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewRemoteCmdServiceClient(c thrift.TClient) *RemoteCmdServiceClient { + return &RemoteCmdServiceClient{ + c: c, + } +} + +func (p *RemoteCmdServiceClient) Client_() thrift.TClient { + return p.c +} + +// Parameters: +// - Cmd +func (p *RemoteCmdServiceClient) CallCommand(ctx context.Context, cmd *Command) (r string, err error) { + var _args1 RemoteCmdServiceCallCommandArgs + _args1.Cmd = cmd + var _result2 RemoteCmdServiceCallCommandResult + if err = p.Client_().Call(ctx, "callCommand", &_args1, &_result2); err != nil { + return + } + return _result2.GetSuccess(), nil +} + +type RemoteCmdServiceProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler RemoteCmdService +} + +func (p *RemoteCmdServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *RemoteCmdServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *RemoteCmdServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewRemoteCmdServiceProcessor(handler RemoteCmdService) *RemoteCmdServiceProcessor { + + self3 := &RemoteCmdServiceProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self3.processorMap["callCommand"] = &remoteCmdServiceProcessorCallCommand{handler: handler} + return self3 +} + +func (p *RemoteCmdServiceProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x4.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, x4 + +} + +type remoteCmdServiceProcessorCallCommand struct { + handler RemoteCmdService +} + +func (p *remoteCmdServiceProcessorCallCommand) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RemoteCmdServiceCallCommandArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("callCommand", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RemoteCmdServiceCallCommandResult{} + var retval string + var err2 error + if retval, err2 = p.handler.CallCommand(ctx, args.Cmd); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing callCommand: "+err2.Error()) + oprot.WriteMessageBegin("callCommand", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = &retval + } + if err2 = oprot.WriteMessageBegin("callCommand", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Cmd +type RemoteCmdServiceCallCommandArgs struct { + Cmd *Command `thrift:"cmd,1" db:"cmd" json:"cmd"` +} + +func NewRemoteCmdServiceCallCommandArgs() *RemoteCmdServiceCallCommandArgs { + return &RemoteCmdServiceCallCommandArgs{} +} + +var RemoteCmdServiceCallCommandArgs_Cmd_DEFAULT *Command + +func (p *RemoteCmdServiceCallCommandArgs) GetCmd() *Command { + if !p.IsSetCmd() { + return RemoteCmdServiceCallCommandArgs_Cmd_DEFAULT + } + return p.Cmd +} +func (p *RemoteCmdServiceCallCommandArgs) IsSetCmd() bool { + return p.Cmd != nil +} + +func (p *RemoteCmdServiceCallCommandArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RemoteCmdServiceCallCommandArgs) ReadField1(iprot thrift.TProtocol) error { + p.Cmd = &Command{} + if err := p.Cmd.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Cmd), err) + } + return nil +} + +func (p *RemoteCmdServiceCallCommandArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("callCommand_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RemoteCmdServiceCallCommandArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("cmd", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:cmd: ", p), err) + } + if err := p.Cmd.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Cmd), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:cmd: ", p), err) + } + return err +} + +func (p *RemoteCmdServiceCallCommandArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RemoteCmdServiceCallCommandArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RemoteCmdServiceCallCommandResult struct { + Success *string `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRemoteCmdServiceCallCommandResult() *RemoteCmdServiceCallCommandResult { + return &RemoteCmdServiceCallCommandResult{} +} + +var RemoteCmdServiceCallCommandResult_Success_DEFAULT string + +func (p *RemoteCmdServiceCallCommandResult) GetSuccess() string { + if !p.IsSetSuccess() { + return RemoteCmdServiceCallCommandResult_Success_DEFAULT + } + return *p.Success +} +func (p *RemoteCmdServiceCallCommandResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RemoteCmdServiceCallCommandResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRING { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RemoteCmdServiceCallCommandResult) ReadField0(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + p.Success = &v + } + return nil +} + +func (p *RemoteCmdServiceCallCommandResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("callCommand_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RemoteCmdServiceCallCommandResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRING, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := oprot.WriteString(string(*p.Success)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.success (0) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RemoteCmdServiceCallCommandResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RemoteCmdServiceCallCommandResult(%+v)", *p) +} diff --git a/go-client/idl/radmin/GoUnusedProtection__.go b/go-client/idl/radmin/GoUnusedProtection__.go new file mode 100644 index 0000000000..c460900104 --- /dev/null +++ b/go-client/idl/radmin/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package radmin + +var GoUnusedProtection__ int diff --git a/go-client/idl/radmin/replica_admin-consts.go b/go-client/idl/radmin/replica_admin-consts.go new file mode 100644 index 0000000000..6e0cdbb21d --- /dev/null +++ b/go-client/idl/radmin/replica_admin-consts.go @@ -0,0 +1,29 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package radmin + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/admin" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ +var _ = admin.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/radmin/replica_admin.go b/go-client/idl/radmin/replica_admin.go new file mode 100644 index 0000000000..0ad97a8977 --- /dev/null +++ b/go-client/idl/radmin/replica_admin.go @@ -0,0 +1,3681 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package radmin + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/admin" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ +var _ = replication.GoUnusedProtection__ +var _ = admin.GoUnusedProtection__ + +type DiskMigrationStatus int64 + +const ( + DiskMigrationStatus_IDLE DiskMigrationStatus = 0 + DiskMigrationStatus_MOVING DiskMigrationStatus = 1 + DiskMigrationStatus_MOVED DiskMigrationStatus = 2 + DiskMigrationStatus_CLOSED DiskMigrationStatus = 3 +) + +func (p DiskMigrationStatus) String() string { + switch p { + case DiskMigrationStatus_IDLE: + return "IDLE" + case DiskMigrationStatus_MOVING: + return "MOVING" + case DiskMigrationStatus_MOVED: + return "MOVED" + case DiskMigrationStatus_CLOSED: + return "CLOSED" + } + return "" +} + +func DiskMigrationStatusFromString(s string) (DiskMigrationStatus, error) { + switch s { + case "IDLE": + return DiskMigrationStatus_IDLE, nil + case "MOVING": + return DiskMigrationStatus_MOVING, nil + case "MOVED": + return DiskMigrationStatus_MOVED, nil + case "CLOSED": + return DiskMigrationStatus_CLOSED, nil + } + return DiskMigrationStatus(0), fmt.Errorf("not a valid DiskMigrationStatus string") +} + +func DiskMigrationStatusPtr(v DiskMigrationStatus) *DiskMigrationStatus { return &v } + +func (p DiskMigrationStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *DiskMigrationStatus) UnmarshalText(text []byte) error { + q, err := DiskMigrationStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *DiskMigrationStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = DiskMigrationStatus(v) + return nil +} + +func (p *DiskMigrationStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type HotkeyType int64 + +const ( + HotkeyType_READ HotkeyType = 0 + HotkeyType_WRITE HotkeyType = 1 +) + +func (p HotkeyType) String() string { + switch p { + case HotkeyType_READ: + return "READ" + case HotkeyType_WRITE: + return "WRITE" + } + return "" +} + +func HotkeyTypeFromString(s string) (HotkeyType, error) { + switch s { + case "READ": + return HotkeyType_READ, nil + case "WRITE": + return HotkeyType_WRITE, nil + } + return HotkeyType(0), fmt.Errorf("not a valid HotkeyType string") +} + +func HotkeyTypePtr(v HotkeyType) *HotkeyType { return &v } + +func (p HotkeyType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *HotkeyType) UnmarshalText(text []byte) error { + q, err := HotkeyTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *HotkeyType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = HotkeyType(v) + return nil +} + +func (p *HotkeyType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type DetectAction int64 + +const ( + DetectAction_START DetectAction = 0 + DetectAction_STOP DetectAction = 1 + DetectAction_QUERY DetectAction = 2 +) + +func (p DetectAction) String() string { + switch p { + case DetectAction_START: + return "START" + case DetectAction_STOP: + return "STOP" + case DetectAction_QUERY: + return "QUERY" + } + return "" +} + +func DetectActionFromString(s string) (DetectAction, error) { + switch s { + case "START": + return DetectAction_START, nil + case "STOP": + return DetectAction_STOP, nil + case "QUERY": + return DetectAction_QUERY, nil + } + return DetectAction(0), fmt.Errorf("not a valid DetectAction string") +} + +func DetectActionPtr(v DetectAction) *DetectAction { return &v } + +func (p DetectAction) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *DetectAction) UnmarshalText(text []byte) error { + q, err := DetectActionFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *DetectAction) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = DetectAction(v) + return nil +} + +func (p *DetectAction) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - Pid +// - Node1 +// - HpNode1 +type QueryReplicaDecreeRequest struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + Node1 *base.RPCAddress `thrift:"node1,2" db:"node1" json:"node1"` + HpNode1 *base.HostPort `thrift:"hp_node1,3" db:"hp_node1" json:"hp_node1,omitempty"` +} + +func NewQueryReplicaDecreeRequest() *QueryReplicaDecreeRequest { + return &QueryReplicaDecreeRequest{} +} + +var QueryReplicaDecreeRequest_Pid_DEFAULT *base.Gpid + +func (p *QueryReplicaDecreeRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return QueryReplicaDecreeRequest_Pid_DEFAULT + } + return p.Pid +} + +var QueryReplicaDecreeRequest_Node1_DEFAULT *base.RPCAddress + +func (p *QueryReplicaDecreeRequest) GetNode1() *base.RPCAddress { + if !p.IsSetNode1() { + return QueryReplicaDecreeRequest_Node1_DEFAULT + } + return p.Node1 +} + +var QueryReplicaDecreeRequest_HpNode1_DEFAULT *base.HostPort + +func (p *QueryReplicaDecreeRequest) GetHpNode1() *base.HostPort { + if !p.IsSetHpNode1() { + return QueryReplicaDecreeRequest_HpNode1_DEFAULT + } + return p.HpNode1 +} +func (p *QueryReplicaDecreeRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *QueryReplicaDecreeRequest) IsSetNode1() bool { + return p.Node1 != nil +} + +func (p *QueryReplicaDecreeRequest) IsSetHpNode1() bool { + return p.HpNode1 != nil +} + +func (p *QueryReplicaDecreeRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryReplicaDecreeRequest) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *QueryReplicaDecreeRequest) ReadField2(iprot thrift.TProtocol) error { + p.Node1 = &base.RPCAddress{} + if err := p.Node1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node1), err) + } + return nil +} + +func (p *QueryReplicaDecreeRequest) ReadField3(iprot thrift.TProtocol) error { + p.HpNode1 = &base.HostPort{} + if err := p.HpNode1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode1), err) + } + return nil +} + +func (p *QueryReplicaDecreeRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_replica_decree_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryReplicaDecreeRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *QueryReplicaDecreeRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node1", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:node1: ", p), err) + } + if err := p.Node1.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node1), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:node1: ", p), err) + } + return err +} + +func (p *QueryReplicaDecreeRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetHpNode1() { + if err := oprot.WriteFieldBegin("hp_node1", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:hp_node1: ", p), err) + } + if err := p.HpNode1.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode1), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:hp_node1: ", p), err) + } + } + return err +} + +func (p *QueryReplicaDecreeRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryReplicaDecreeRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - LastDecree +type QueryReplicaDecreeResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + LastDecree int64 `thrift:"last_decree,2" db:"last_decree" json:"last_decree"` +} + +func NewQueryReplicaDecreeResponse() *QueryReplicaDecreeResponse { + return &QueryReplicaDecreeResponse{} +} + +var QueryReplicaDecreeResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryReplicaDecreeResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryReplicaDecreeResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QueryReplicaDecreeResponse) GetLastDecree() int64 { + return p.LastDecree +} +func (p *QueryReplicaDecreeResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryReplicaDecreeResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryReplicaDecreeResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryReplicaDecreeResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.LastDecree = v + } + return nil +} + +func (p *QueryReplicaDecreeResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_replica_decree_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryReplicaDecreeResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryReplicaDecreeResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("last_decree", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:last_decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.LastDecree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.last_decree (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:last_decree: ", p), err) + } + return err +} + +func (p *QueryReplicaDecreeResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryReplicaDecreeResponse(%+v)", *p) +} + +// Attributes: +// - Node1 +// - HpNode1 +type QueryReplicaInfoRequest struct { + Node1 *base.RPCAddress `thrift:"node1,1" db:"node1" json:"node1"` + HpNode1 *base.HostPort `thrift:"hp_node1,2" db:"hp_node1" json:"hp_node1,omitempty"` +} + +func NewQueryReplicaInfoRequest() *QueryReplicaInfoRequest { + return &QueryReplicaInfoRequest{} +} + +var QueryReplicaInfoRequest_Node1_DEFAULT *base.RPCAddress + +func (p *QueryReplicaInfoRequest) GetNode1() *base.RPCAddress { + if !p.IsSetNode1() { + return QueryReplicaInfoRequest_Node1_DEFAULT + } + return p.Node1 +} + +var QueryReplicaInfoRequest_HpNode1_DEFAULT *base.HostPort + +func (p *QueryReplicaInfoRequest) GetHpNode1() *base.HostPort { + if !p.IsSetHpNode1() { + return QueryReplicaInfoRequest_HpNode1_DEFAULT + } + return p.HpNode1 +} +func (p *QueryReplicaInfoRequest) IsSetNode1() bool { + return p.Node1 != nil +} + +func (p *QueryReplicaInfoRequest) IsSetHpNode1() bool { + return p.HpNode1 != nil +} + +func (p *QueryReplicaInfoRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryReplicaInfoRequest) ReadField1(iprot thrift.TProtocol) error { + p.Node1 = &base.RPCAddress{} + if err := p.Node1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node1), err) + } + return nil +} + +func (p *QueryReplicaInfoRequest) ReadField2(iprot thrift.TProtocol) error { + p.HpNode1 = &base.HostPort{} + if err := p.HpNode1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode1), err) + } + return nil +} + +func (p *QueryReplicaInfoRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_replica_info_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryReplicaInfoRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node1", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:node1: ", p), err) + } + if err := p.Node1.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node1), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:node1: ", p), err) + } + return err +} + +func (p *QueryReplicaInfoRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetHpNode1() { + if err := oprot.WriteFieldBegin("hp_node1", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hp_node1: ", p), err) + } + if err := p.HpNode1.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode1), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hp_node1: ", p), err) + } + } + return err +} + +func (p *QueryReplicaInfoRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryReplicaInfoRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Replicas +type QueryReplicaInfoResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Replicas []*admin.ReplicaInfo `thrift:"replicas,2" db:"replicas" json:"replicas"` +} + +func NewQueryReplicaInfoResponse() *QueryReplicaInfoResponse { + return &QueryReplicaInfoResponse{} +} + +var QueryReplicaInfoResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryReplicaInfoResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryReplicaInfoResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QueryReplicaInfoResponse) GetReplicas() []*admin.ReplicaInfo { + return p.Replicas +} +func (p *QueryReplicaInfoResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryReplicaInfoResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryReplicaInfoResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryReplicaInfoResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*admin.ReplicaInfo, 0, size) + p.Replicas = tSlice + for i := 0; i < size; i++ { + _elem0 := &admin.ReplicaInfo{} + if err := _elem0.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Replicas = append(p.Replicas, _elem0) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryReplicaInfoResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_replica_info_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryReplicaInfoResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryReplicaInfoResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("replicas", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:replicas: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Replicas)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Replicas { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:replicas: ", p), err) + } + return err +} + +func (p *QueryReplicaInfoResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryReplicaInfoResponse(%+v)", *p) +} + +// Attributes: +// - Tag +// - FullDir +// - DiskCapacityMb +// - DiskAvailableMb +// - HoldingPrimaryReplicas +// - HoldingSecondaryReplicas +type DiskInfo struct { + Tag string `thrift:"tag,1" db:"tag" json:"tag"` + FullDir string `thrift:"full_dir,2" db:"full_dir" json:"full_dir"` + DiskCapacityMb int64 `thrift:"disk_capacity_mb,3" db:"disk_capacity_mb" json:"disk_capacity_mb"` + DiskAvailableMb int64 `thrift:"disk_available_mb,4" db:"disk_available_mb" json:"disk_available_mb"` + HoldingPrimaryReplicas map[int32][]*base.Gpid `thrift:"holding_primary_replicas,5" db:"holding_primary_replicas" json:"holding_primary_replicas"` + HoldingSecondaryReplicas map[int32][]*base.Gpid `thrift:"holding_secondary_replicas,6" db:"holding_secondary_replicas" json:"holding_secondary_replicas"` +} + +func NewDiskInfo() *DiskInfo { + return &DiskInfo{} +} + +func (p *DiskInfo) GetTag() string { + return p.Tag +} + +func (p *DiskInfo) GetFullDir() string { + return p.FullDir +} + +func (p *DiskInfo) GetDiskCapacityMb() int64 { + return p.DiskCapacityMb +} + +func (p *DiskInfo) GetDiskAvailableMb() int64 { + return p.DiskAvailableMb +} + +func (p *DiskInfo) GetHoldingPrimaryReplicas() map[int32][]*base.Gpid { + return p.HoldingPrimaryReplicas +} + +func (p *DiskInfo) GetHoldingSecondaryReplicas() map[int32][]*base.Gpid { + return p.HoldingSecondaryReplicas +} +func (p *DiskInfo) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.MAP { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.MAP { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DiskInfo) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Tag = v + } + return nil +} + +func (p *DiskInfo) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.FullDir = v + } + return nil +} + +func (p *DiskInfo) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.DiskCapacityMb = v + } + return nil +} + +func (p *DiskInfo) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.DiskAvailableMb = v + } + return nil +} + +func (p *DiskInfo) ReadField5(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[int32][]*base.Gpid, size) + p.HoldingPrimaryReplicas = tMap + for i := 0; i < size; i++ { + var _key1 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key1 = v + } + _, size, err := iprot.ReadSetBegin() + if err != nil { + return thrift.PrependError("error reading set begin: ", err) + } + tSet := make([]*base.Gpid, 0, size) + _val2 := tSet + for i := 0; i < size; i++ { + _elem3 := &base.Gpid{} + if err := _elem3.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) + } + _val2 = append(_val2, _elem3) + } + if err := iprot.ReadSetEnd(); err != nil { + return thrift.PrependError("error reading set end: ", err) + } + p.HoldingPrimaryReplicas[_key1] = _val2 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *DiskInfo) ReadField6(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[int32][]*base.Gpid, size) + p.HoldingSecondaryReplicas = tMap + for i := 0; i < size; i++ { + var _key4 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key4 = v + } + _, size, err := iprot.ReadSetBegin() + if err != nil { + return thrift.PrependError("error reading set begin: ", err) + } + tSet := make([]*base.Gpid, 0, size) + _val5 := tSet + for i := 0; i < size; i++ { + _elem6 := &base.Gpid{} + if err := _elem6.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err) + } + _val5 = append(_val5, _elem6) + } + if err := iprot.ReadSetEnd(); err != nil { + return thrift.PrependError("error reading set end: ", err) + } + p.HoldingSecondaryReplicas[_key4] = _val5 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *DiskInfo) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("disk_info"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DiskInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("tag", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:tag: ", p), err) + } + if err := oprot.WriteString(string(p.Tag)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.tag (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:tag: ", p), err) + } + return err +} + +func (p *DiskInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("full_dir", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:full_dir: ", p), err) + } + if err := oprot.WriteString(string(p.FullDir)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.full_dir (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:full_dir: ", p), err) + } + return err +} + +func (p *DiskInfo) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("disk_capacity_mb", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:disk_capacity_mb: ", p), err) + } + if err := oprot.WriteI64(int64(p.DiskCapacityMb)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.disk_capacity_mb (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:disk_capacity_mb: ", p), err) + } + return err +} + +func (p *DiskInfo) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("disk_available_mb", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:disk_available_mb: ", p), err) + } + if err := oprot.WriteI64(int64(p.DiskAvailableMb)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.disk_available_mb (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:disk_available_mb: ", p), err) + } + return err +} + +func (p *DiskInfo) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("holding_primary_replicas", thrift.MAP, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:holding_primary_replicas: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.SET, len(p.HoldingPrimaryReplicas)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.HoldingPrimaryReplicas { + if err := oprot.WriteI32(int32(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := oprot.WriteSetBegin(thrift.STRUCT, len(v)); err != nil { + return thrift.PrependError("error writing set begin: ", err) + } + for i := 0; i < len(v); i++ { + for j := i + 1; j < len(v); j++ { + if reflect.DeepEqual(v[i], v[j]) { + return thrift.PrependError("", fmt.Errorf("%T error writing set field: slice is not unique", v[i])) + } + } + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteSetEnd(); err != nil { + return thrift.PrependError("error writing set end: ", err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:holding_primary_replicas: ", p), err) + } + return err +} + +func (p *DiskInfo) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("holding_secondary_replicas", thrift.MAP, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:holding_secondary_replicas: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.I32, thrift.SET, len(p.HoldingSecondaryReplicas)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.HoldingSecondaryReplicas { + if err := oprot.WriteI32(int32(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := oprot.WriteSetBegin(thrift.STRUCT, len(v)); err != nil { + return thrift.PrependError("error writing set begin: ", err) + } + for i := 0; i < len(v); i++ { + for j := i + 1; j < len(v); j++ { + if reflect.DeepEqual(v[i], v[j]) { + return thrift.PrependError("", fmt.Errorf("%T error writing set field: slice is not unique", v[i])) + } + } + } + for _, v := range v { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteSetEnd(); err != nil { + return thrift.PrependError("error writing set end: ", err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:holding_secondary_replicas: ", p), err) + } + return err +} + +func (p *DiskInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DiskInfo(%+v)", *p) +} + +// Attributes: +// - Node1 +// - AppName +// - HpNode1 +type QueryDiskInfoRequest struct { + Node1 *base.RPCAddress `thrift:"node1,1" db:"node1" json:"node1"` + AppName string `thrift:"app_name,2" db:"app_name" json:"app_name"` + HpNode1 *base.HostPort `thrift:"hp_node1,3" db:"hp_node1" json:"hp_node1,omitempty"` +} + +func NewQueryDiskInfoRequest() *QueryDiskInfoRequest { + return &QueryDiskInfoRequest{} +} + +var QueryDiskInfoRequest_Node1_DEFAULT *base.RPCAddress + +func (p *QueryDiskInfoRequest) GetNode1() *base.RPCAddress { + if !p.IsSetNode1() { + return QueryDiskInfoRequest_Node1_DEFAULT + } + return p.Node1 +} + +func (p *QueryDiskInfoRequest) GetAppName() string { + return p.AppName +} + +var QueryDiskInfoRequest_HpNode1_DEFAULT *base.HostPort + +func (p *QueryDiskInfoRequest) GetHpNode1() *base.HostPort { + if !p.IsSetHpNode1() { + return QueryDiskInfoRequest_HpNode1_DEFAULT + } + return p.HpNode1 +} +func (p *QueryDiskInfoRequest) IsSetNode1() bool { + return p.Node1 != nil +} + +func (p *QueryDiskInfoRequest) IsSetHpNode1() bool { + return p.HpNode1 != nil +} + +func (p *QueryDiskInfoRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryDiskInfoRequest) ReadField1(iprot thrift.TProtocol) error { + p.Node1 = &base.RPCAddress{} + if err := p.Node1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Node1), err) + } + return nil +} + +func (p *QueryDiskInfoRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *QueryDiskInfoRequest) ReadField3(iprot thrift.TProtocol) error { + p.HpNode1 = &base.HostPort{} + if err := p.HpNode1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpNode1), err) + } + return nil +} + +func (p *QueryDiskInfoRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_disk_info_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryDiskInfoRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("node1", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:node1: ", p), err) + } + if err := p.Node1.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Node1), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:node1: ", p), err) + } + return err +} + +func (p *QueryDiskInfoRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_name: ", p), err) + } + return err +} + +func (p *QueryDiskInfoRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetHpNode1() { + if err := oprot.WriteFieldBegin("hp_node1", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:hp_node1: ", p), err) + } + if err := p.HpNode1.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpNode1), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:hp_node1: ", p), err) + } + } + return err +} + +func (p *QueryDiskInfoRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryDiskInfoRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - TotalCapacityMb +// - TotalAvailableMb +// - DiskInfos +type QueryDiskInfoResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + TotalCapacityMb int64 `thrift:"total_capacity_mb,2" db:"total_capacity_mb" json:"total_capacity_mb"` + TotalAvailableMb int64 `thrift:"total_available_mb,3" db:"total_available_mb" json:"total_available_mb"` + DiskInfos []*DiskInfo `thrift:"disk_infos,4" db:"disk_infos" json:"disk_infos"` +} + +func NewQueryDiskInfoResponse() *QueryDiskInfoResponse { + return &QueryDiskInfoResponse{} +} + +var QueryDiskInfoResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryDiskInfoResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryDiskInfoResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QueryDiskInfoResponse) GetTotalCapacityMb() int64 { + return p.TotalCapacityMb +} + +func (p *QueryDiskInfoResponse) GetTotalAvailableMb() int64 { + return p.TotalAvailableMb +} + +func (p *QueryDiskInfoResponse) GetDiskInfos() []*DiskInfo { + return p.DiskInfos +} +func (p *QueryDiskInfoResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryDiskInfoResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.LIST { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryDiskInfoResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryDiskInfoResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.TotalCapacityMb = v + } + return nil +} + +func (p *QueryDiskInfoResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.TotalAvailableMb = v + } + return nil +} + +func (p *QueryDiskInfoResponse) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*DiskInfo, 0, size) + p.DiskInfos = tSlice + for i := 0; i < size; i++ { + _elem7 := &DiskInfo{} + if err := _elem7.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem7), err) + } + p.DiskInfos = append(p.DiskInfos, _elem7) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryDiskInfoResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_disk_info_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryDiskInfoResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryDiskInfoResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("total_capacity_mb", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:total_capacity_mb: ", p), err) + } + if err := oprot.WriteI64(int64(p.TotalCapacityMb)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.total_capacity_mb (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:total_capacity_mb: ", p), err) + } + return err +} + +func (p *QueryDiskInfoResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("total_available_mb", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:total_available_mb: ", p), err) + } + if err := oprot.WriteI64(int64(p.TotalAvailableMb)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.total_available_mb (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:total_available_mb: ", p), err) + } + return err +} + +func (p *QueryDiskInfoResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("disk_infos", thrift.LIST, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:disk_infos: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.DiskInfos)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.DiskInfos { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:disk_infos: ", p), err) + } + return err +} + +func (p *QueryDiskInfoResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryDiskInfoResponse(%+v)", *p) +} + +// Attributes: +// - Pid +// - OriginDisk +// - TargetDisk +type ReplicaDiskMigrateRequest struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + OriginDisk string `thrift:"origin_disk,2" db:"origin_disk" json:"origin_disk"` + TargetDisk string `thrift:"target_disk,3" db:"target_disk" json:"target_disk"` +} + +func NewReplicaDiskMigrateRequest() *ReplicaDiskMigrateRequest { + return &ReplicaDiskMigrateRequest{} +} + +var ReplicaDiskMigrateRequest_Pid_DEFAULT *base.Gpid + +func (p *ReplicaDiskMigrateRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return ReplicaDiskMigrateRequest_Pid_DEFAULT + } + return p.Pid +} + +func (p *ReplicaDiskMigrateRequest) GetOriginDisk() string { + return p.OriginDisk +} + +func (p *ReplicaDiskMigrateRequest) GetTargetDisk() string { + return p.TargetDisk +} +func (p *ReplicaDiskMigrateRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *ReplicaDiskMigrateRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaDiskMigrateRequest) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *ReplicaDiskMigrateRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.OriginDisk = v + } + return nil +} + +func (p *ReplicaDiskMigrateRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.TargetDisk = v + } + return nil +} + +func (p *ReplicaDiskMigrateRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("replica_disk_migrate_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaDiskMigrateRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *ReplicaDiskMigrateRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("origin_disk", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:origin_disk: ", p), err) + } + if err := oprot.WriteString(string(p.OriginDisk)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.origin_disk (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:origin_disk: ", p), err) + } + return err +} + +func (p *ReplicaDiskMigrateRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("target_disk", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:target_disk: ", p), err) + } + if err := oprot.WriteString(string(p.TargetDisk)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.target_disk (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:target_disk: ", p), err) + } + return err +} + +func (p *ReplicaDiskMigrateRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaDiskMigrateRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - Hint +type ReplicaDiskMigrateResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + Hint *string `thrift:"hint,2" db:"hint" json:"hint,omitempty"` +} + +func NewReplicaDiskMigrateResponse() *ReplicaDiskMigrateResponse { + return &ReplicaDiskMigrateResponse{} +} + +var ReplicaDiskMigrateResponse_Err_DEFAULT *base.ErrorCode + +func (p *ReplicaDiskMigrateResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return ReplicaDiskMigrateResponse_Err_DEFAULT + } + return p.Err +} + +var ReplicaDiskMigrateResponse_Hint_DEFAULT string + +func (p *ReplicaDiskMigrateResponse) GetHint() string { + if !p.IsSetHint() { + return ReplicaDiskMigrateResponse_Hint_DEFAULT + } + return *p.Hint +} +func (p *ReplicaDiskMigrateResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *ReplicaDiskMigrateResponse) IsSetHint() bool { + return p.Hint != nil +} + +func (p *ReplicaDiskMigrateResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaDiskMigrateResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *ReplicaDiskMigrateResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Hint = &v + } + return nil +} + +func (p *ReplicaDiskMigrateResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("replica_disk_migrate_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaDiskMigrateResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *ReplicaDiskMigrateResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetHint() { + if err := oprot.WriteFieldBegin("hint", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:hint: ", p), err) + } + if err := oprot.WriteString(string(*p.Hint)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hint (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:hint: ", p), err) + } + } + return err +} + +func (p *ReplicaDiskMigrateResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaDiskMigrateResponse(%+v)", *p) +} + +// Attributes: +// - Type +// - Action +// - Pid +type DetectHotkeyRequest struct { + Type HotkeyType `thrift:"type,1" db:"type" json:"type"` + Action DetectAction `thrift:"action,2" db:"action" json:"action"` + Pid *base.Gpid `thrift:"pid,3" db:"pid" json:"pid"` +} + +func NewDetectHotkeyRequest() *DetectHotkeyRequest { + return &DetectHotkeyRequest{} +} + +func (p *DetectHotkeyRequest) GetType() HotkeyType { + return p.Type +} + +func (p *DetectHotkeyRequest) GetAction() DetectAction { + return p.Action +} + +var DetectHotkeyRequest_Pid_DEFAULT *base.Gpid + +func (p *DetectHotkeyRequest) GetPid() *base.Gpid { + if !p.IsSetPid() { + return DetectHotkeyRequest_Pid_DEFAULT + } + return p.Pid +} +func (p *DetectHotkeyRequest) IsSetPid() bool { + return p.Pid != nil +} + +func (p *DetectHotkeyRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DetectHotkeyRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := HotkeyType(v) + p.Type = temp + } + return nil +} + +func (p *DetectHotkeyRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + temp := DetectAction(v) + p.Action = temp + } + return nil +} + +func (p *DetectHotkeyRequest) ReadField3(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *DetectHotkeyRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("detect_hotkey_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DetectHotkeyRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("type", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:type: ", p), err) + } + if err := oprot.WriteI32(int32(p.Type)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.type (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:type: ", p), err) + } + return err +} + +func (p *DetectHotkeyRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("action", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:action: ", p), err) + } + if err := oprot.WriteI32(int32(p.Action)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.action (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:action: ", p), err) + } + return err +} + +func (p *DetectHotkeyRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:pid: ", p), err) + } + return err +} + +func (p *DetectHotkeyRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DetectHotkeyRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - ErrHint +// - HotkeyResult_ +type DetectHotkeyResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + ErrHint *string `thrift:"err_hint,2" db:"err_hint" json:"err_hint,omitempty"` + HotkeyResult_ *string `thrift:"hotkey_result,3" db:"hotkey_result" json:"hotkey_result,omitempty"` +} + +func NewDetectHotkeyResponse() *DetectHotkeyResponse { + return &DetectHotkeyResponse{} +} + +var DetectHotkeyResponse_Err_DEFAULT *base.ErrorCode + +func (p *DetectHotkeyResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return DetectHotkeyResponse_Err_DEFAULT + } + return p.Err +} + +var DetectHotkeyResponse_ErrHint_DEFAULT string + +func (p *DetectHotkeyResponse) GetErrHint() string { + if !p.IsSetErrHint() { + return DetectHotkeyResponse_ErrHint_DEFAULT + } + return *p.ErrHint +} + +var DetectHotkeyResponse_HotkeyResult__DEFAULT string + +func (p *DetectHotkeyResponse) GetHotkeyResult_() string { + if !p.IsSetHotkeyResult_() { + return DetectHotkeyResponse_HotkeyResult__DEFAULT + } + return *p.HotkeyResult_ +} +func (p *DetectHotkeyResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *DetectHotkeyResponse) IsSetErrHint() bool { + return p.ErrHint != nil +} + +func (p *DetectHotkeyResponse) IsSetHotkeyResult_() bool { + return p.HotkeyResult_ != nil +} + +func (p *DetectHotkeyResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *DetectHotkeyResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *DetectHotkeyResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.ErrHint = &v + } + return nil +} + +func (p *DetectHotkeyResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.HotkeyResult_ = &v + } + return nil +} + +func (p *DetectHotkeyResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("detect_hotkey_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *DetectHotkeyResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *DetectHotkeyResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetErrHint() { + if err := oprot.WriteFieldBegin("err_hint", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:err_hint: ", p), err) + } + if err := oprot.WriteString(string(*p.ErrHint)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.err_hint (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:err_hint: ", p), err) + } + } + return err +} + +func (p *DetectHotkeyResponse) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetHotkeyResult_() { + if err := oprot.WriteFieldBegin("hotkey_result", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:hotkey_result: ", p), err) + } + if err := oprot.WriteString(string(*p.HotkeyResult_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hotkey_result (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:hotkey_result: ", p), err) + } + } + return err +} + +func (p *DetectHotkeyResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DetectHotkeyResponse(%+v)", *p) +} + +// Attributes: +// - DiskStr +type AddNewDiskRequest struct { + DiskStr string `thrift:"disk_str,1" db:"disk_str" json:"disk_str"` +} + +func NewAddNewDiskRequest() *AddNewDiskRequest { + return &AddNewDiskRequest{} +} + +func (p *AddNewDiskRequest) GetDiskStr() string { + return p.DiskStr +} +func (p *AddNewDiskRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AddNewDiskRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.DiskStr = v + } + return nil +} + +func (p *AddNewDiskRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("add_new_disk_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AddNewDiskRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("disk_str", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:disk_str: ", p), err) + } + if err := oprot.WriteString(string(p.DiskStr)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.disk_str (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:disk_str: ", p), err) + } + return err +} + +func (p *AddNewDiskRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AddNewDiskRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - ErrHint +type AddNewDiskResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + ErrHint *string `thrift:"err_hint,2" db:"err_hint" json:"err_hint,omitempty"` +} + +func NewAddNewDiskResponse() *AddNewDiskResponse { + return &AddNewDiskResponse{} +} + +var AddNewDiskResponse_Err_DEFAULT *base.ErrorCode + +func (p *AddNewDiskResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return AddNewDiskResponse_Err_DEFAULT + } + return p.Err +} + +var AddNewDiskResponse_ErrHint_DEFAULT string + +func (p *AddNewDiskResponse) GetErrHint() string { + if !p.IsSetErrHint() { + return AddNewDiskResponse_ErrHint_DEFAULT + } + return *p.ErrHint +} +func (p *AddNewDiskResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *AddNewDiskResponse) IsSetErrHint() bool { + return p.ErrHint != nil +} + +func (p *AddNewDiskResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AddNewDiskResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *AddNewDiskResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.ErrHint = &v + } + return nil +} + +func (p *AddNewDiskResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("add_new_disk_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AddNewDiskResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *AddNewDiskResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetErrHint() { + if err := oprot.WriteFieldBegin("err_hint", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:err_hint: ", p), err) + } + if err := oprot.WriteString(string(*p.ErrHint)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.err_hint (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:err_hint: ", p), err) + } + } + return err +} + +func (p *AddNewDiskResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AddNewDiskResponse(%+v)", *p) +} + +type ReplicaClient interface { + // Parameters: + // - Req + QueryDiskInfo(ctx context.Context, req *QueryDiskInfoRequest) (r *QueryDiskInfoResponse, err error) + // Parameters: + // - Req + DiskMigrate(ctx context.Context, req *ReplicaDiskMigrateRequest) (r *ReplicaDiskMigrateResponse, err error) + // Parameters: + // - Req + AddDisk(ctx context.Context, req *AddNewDiskRequest) (r *AddNewDiskResponse, err error) +} + +type ReplicaClientClient struct { + c thrift.TClient +} + +func NewReplicaClientClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ReplicaClientClient { + return &ReplicaClientClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewReplicaClientClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ReplicaClientClient { + return &ReplicaClientClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewReplicaClientClient(c thrift.TClient) *ReplicaClientClient { + return &ReplicaClientClient{ + c: c, + } +} + +func (p *ReplicaClientClient) Client_() thrift.TClient { + return p.c +} + +// Parameters: +// - Req +func (p *ReplicaClientClient) QueryDiskInfo(ctx context.Context, req *QueryDiskInfoRequest) (r *QueryDiskInfoResponse, err error) { + var _args8 ReplicaClientQueryDiskInfoArgs + _args8.Req = req + var _result9 ReplicaClientQueryDiskInfoResult + if err = p.Client_().Call(ctx, "query_disk_info", &_args8, &_result9); err != nil { + return + } + return _result9.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *ReplicaClientClient) DiskMigrate(ctx context.Context, req *ReplicaDiskMigrateRequest) (r *ReplicaDiskMigrateResponse, err error) { + var _args10 ReplicaClientDiskMigrateArgs + _args10.Req = req + var _result11 ReplicaClientDiskMigrateResult + if err = p.Client_().Call(ctx, "disk_migrate", &_args10, &_result11); err != nil { + return + } + return _result11.GetSuccess(), nil +} + +// Parameters: +// - Req +func (p *ReplicaClientClient) AddDisk(ctx context.Context, req *AddNewDiskRequest) (r *AddNewDiskResponse, err error) { + var _args12 ReplicaClientAddDiskArgs + _args12.Req = req + var _result13 ReplicaClientAddDiskResult + if err = p.Client_().Call(ctx, "add_disk", &_args12, &_result13); err != nil { + return + } + return _result13.GetSuccess(), nil +} + +type ReplicaClientProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler ReplicaClient +} + +func (p *ReplicaClientProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *ReplicaClientProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *ReplicaClientProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewReplicaClientProcessor(handler ReplicaClient) *ReplicaClientProcessor { + + self14 := &ReplicaClientProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self14.processorMap["query_disk_info"] = &replicaClientProcessorQueryDiskInfo{handler: handler} + self14.processorMap["disk_migrate"] = &replicaClientProcessorDiskMigrate{handler: handler} + self14.processorMap["add_disk"] = &replicaClientProcessorAddDisk{handler: handler} + return self14 +} + +func (p *ReplicaClientProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x15 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x15.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, x15 + +} + +type replicaClientProcessorQueryDiskInfo struct { + handler ReplicaClient +} + +func (p *replicaClientProcessorQueryDiskInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := ReplicaClientQueryDiskInfoArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_disk_info", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := ReplicaClientQueryDiskInfoResult{} + var retval *QueryDiskInfoResponse + var err2 error + if retval, err2 = p.handler.QueryDiskInfo(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_disk_info: "+err2.Error()) + oprot.WriteMessageBegin("query_disk_info", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_disk_info", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type replicaClientProcessorDiskMigrate struct { + handler ReplicaClient +} + +func (p *replicaClientProcessorDiskMigrate) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := ReplicaClientDiskMigrateArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("disk_migrate", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := ReplicaClientDiskMigrateResult{} + var retval *ReplicaDiskMigrateResponse + var err2 error + if retval, err2 = p.handler.DiskMigrate(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing disk_migrate: "+err2.Error()) + oprot.WriteMessageBegin("disk_migrate", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("disk_migrate", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type replicaClientProcessorAddDisk struct { + handler ReplicaClient +} + +func (p *replicaClientProcessorAddDisk) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := ReplicaClientAddDiskArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("add_disk", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := ReplicaClientAddDiskResult{} + var retval *AddNewDiskResponse + var err2 error + if retval, err2 = p.handler.AddDisk(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing add_disk: "+err2.Error()) + oprot.WriteMessageBegin("add_disk", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("add_disk", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Req +type ReplicaClientQueryDiskInfoArgs struct { + Req *QueryDiskInfoRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewReplicaClientQueryDiskInfoArgs() *ReplicaClientQueryDiskInfoArgs { + return &ReplicaClientQueryDiskInfoArgs{} +} + +var ReplicaClientQueryDiskInfoArgs_Req_DEFAULT *QueryDiskInfoRequest + +func (p *ReplicaClientQueryDiskInfoArgs) GetReq() *QueryDiskInfoRequest { + if !p.IsSetReq() { + return ReplicaClientQueryDiskInfoArgs_Req_DEFAULT + } + return p.Req +} +func (p *ReplicaClientQueryDiskInfoArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *ReplicaClientQueryDiskInfoArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaClientQueryDiskInfoArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &QueryDiskInfoRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *ReplicaClientQueryDiskInfoArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_disk_info_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaClientQueryDiskInfoArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *ReplicaClientQueryDiskInfoArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaClientQueryDiskInfoArgs(%+v)", *p) +} + +// Attributes: +// - Success +type ReplicaClientQueryDiskInfoResult struct { + Success *QueryDiskInfoResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewReplicaClientQueryDiskInfoResult() *ReplicaClientQueryDiskInfoResult { + return &ReplicaClientQueryDiskInfoResult{} +} + +var ReplicaClientQueryDiskInfoResult_Success_DEFAULT *QueryDiskInfoResponse + +func (p *ReplicaClientQueryDiskInfoResult) GetSuccess() *QueryDiskInfoResponse { + if !p.IsSetSuccess() { + return ReplicaClientQueryDiskInfoResult_Success_DEFAULT + } + return p.Success +} +func (p *ReplicaClientQueryDiskInfoResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *ReplicaClientQueryDiskInfoResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaClientQueryDiskInfoResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &QueryDiskInfoResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *ReplicaClientQueryDiskInfoResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_disk_info_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaClientQueryDiskInfoResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *ReplicaClientQueryDiskInfoResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaClientQueryDiskInfoResult(%+v)", *p) +} + +// Attributes: +// - Req +type ReplicaClientDiskMigrateArgs struct { + Req *ReplicaDiskMigrateRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewReplicaClientDiskMigrateArgs() *ReplicaClientDiskMigrateArgs { + return &ReplicaClientDiskMigrateArgs{} +} + +var ReplicaClientDiskMigrateArgs_Req_DEFAULT *ReplicaDiskMigrateRequest + +func (p *ReplicaClientDiskMigrateArgs) GetReq() *ReplicaDiskMigrateRequest { + if !p.IsSetReq() { + return ReplicaClientDiskMigrateArgs_Req_DEFAULT + } + return p.Req +} +func (p *ReplicaClientDiskMigrateArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *ReplicaClientDiskMigrateArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaClientDiskMigrateArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &ReplicaDiskMigrateRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *ReplicaClientDiskMigrateArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("disk_migrate_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaClientDiskMigrateArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *ReplicaClientDiskMigrateArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaClientDiskMigrateArgs(%+v)", *p) +} + +// Attributes: +// - Success +type ReplicaClientDiskMigrateResult struct { + Success *ReplicaDiskMigrateResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewReplicaClientDiskMigrateResult() *ReplicaClientDiskMigrateResult { + return &ReplicaClientDiskMigrateResult{} +} + +var ReplicaClientDiskMigrateResult_Success_DEFAULT *ReplicaDiskMigrateResponse + +func (p *ReplicaClientDiskMigrateResult) GetSuccess() *ReplicaDiskMigrateResponse { + if !p.IsSetSuccess() { + return ReplicaClientDiskMigrateResult_Success_DEFAULT + } + return p.Success +} +func (p *ReplicaClientDiskMigrateResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *ReplicaClientDiskMigrateResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaClientDiskMigrateResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ReplicaDiskMigrateResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *ReplicaClientDiskMigrateResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("disk_migrate_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaClientDiskMigrateResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *ReplicaClientDiskMigrateResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaClientDiskMigrateResult(%+v)", *p) +} + +// Attributes: +// - Req +type ReplicaClientAddDiskArgs struct { + Req *AddNewDiskRequest `thrift:"req,1" db:"req" json:"req"` +} + +func NewReplicaClientAddDiskArgs() *ReplicaClientAddDiskArgs { + return &ReplicaClientAddDiskArgs{} +} + +var ReplicaClientAddDiskArgs_Req_DEFAULT *AddNewDiskRequest + +func (p *ReplicaClientAddDiskArgs) GetReq() *AddNewDiskRequest { + if !p.IsSetReq() { + return ReplicaClientAddDiskArgs_Req_DEFAULT + } + return p.Req +} +func (p *ReplicaClientAddDiskArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *ReplicaClientAddDiskArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaClientAddDiskArgs) ReadField1(iprot thrift.TProtocol) error { + p.Req = &AddNewDiskRequest{} + if err := p.Req.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Req), err) + } + return nil +} + +func (p *ReplicaClientAddDiskArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("add_disk_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaClientAddDiskArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:req: ", p), err) + } + if err := p.Req.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Req), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:req: ", p), err) + } + return err +} + +func (p *ReplicaClientAddDiskArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaClientAddDiskArgs(%+v)", *p) +} + +// Attributes: +// - Success +type ReplicaClientAddDiskResult struct { + Success *AddNewDiskResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewReplicaClientAddDiskResult() *ReplicaClientAddDiskResult { + return &ReplicaClientAddDiskResult{} +} + +var ReplicaClientAddDiskResult_Success_DEFAULT *AddNewDiskResponse + +func (p *ReplicaClientAddDiskResult) GetSuccess() *AddNewDiskResponse { + if !p.IsSetSuccess() { + return ReplicaClientAddDiskResult_Success_DEFAULT + } + return p.Success +} +func (p *ReplicaClientAddDiskResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *ReplicaClientAddDiskResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReplicaClientAddDiskResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &AddNewDiskResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *ReplicaClientAddDiskResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("add_disk_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReplicaClientAddDiskResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *ReplicaClientAddDiskResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReplicaClientAddDiskResult(%+v)", *p) +} diff --git a/go-client/idl/replication/GoUnusedProtection__.go b/go-client/idl/replication/GoUnusedProtection__.go new file mode 100644 index 0000000000..20c1f2d56a --- /dev/null +++ b/go-client/idl/replication/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package replication + +var GoUnusedProtection__ int diff --git a/go-client/idl/replication/dsn.layer2-consts.go b/go-client/idl/replication/dsn.layer2-consts.go new file mode 100644 index 0000000000..da6f576153 --- /dev/null +++ b/go-client/idl/replication/dsn.layer2-consts.go @@ -0,0 +1,25 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package replication + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/replication/dsn.layer2.go b/go-client/idl/replication/dsn.layer2.go new file mode 100644 index 0000000000..0ee6ec2e90 --- /dev/null +++ b/go-client/idl/replication/dsn.layer2.go @@ -0,0 +1,2136 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package replication + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = base.GoUnusedProtection__ + +type AppStatus int64 + +const ( + AppStatus_AS_INVALID AppStatus = 0 + AppStatus_AS_AVAILABLE AppStatus = 1 + AppStatus_AS_CREATING AppStatus = 2 + AppStatus_AS_CREATE_FAILED AppStatus = 3 + AppStatus_AS_DROPPING AppStatus = 4 + AppStatus_AS_DROP_FAILED AppStatus = 5 + AppStatus_AS_DROPPED AppStatus = 6 + AppStatus_AS_RECALLING AppStatus = 7 +) + +func (p AppStatus) String() string { + switch p { + case AppStatus_AS_INVALID: + return "AS_INVALID" + case AppStatus_AS_AVAILABLE: + return "AS_AVAILABLE" + case AppStatus_AS_CREATING: + return "AS_CREATING" + case AppStatus_AS_CREATE_FAILED: + return "AS_CREATE_FAILED" + case AppStatus_AS_DROPPING: + return "AS_DROPPING" + case AppStatus_AS_DROP_FAILED: + return "AS_DROP_FAILED" + case AppStatus_AS_DROPPED: + return "AS_DROPPED" + case AppStatus_AS_RECALLING: + return "AS_RECALLING" + } + return "" +} + +func AppStatusFromString(s string) (AppStatus, error) { + switch s { + case "AS_INVALID": + return AppStatus_AS_INVALID, nil + case "AS_AVAILABLE": + return AppStatus_AS_AVAILABLE, nil + case "AS_CREATING": + return AppStatus_AS_CREATING, nil + case "AS_CREATE_FAILED": + return AppStatus_AS_CREATE_FAILED, nil + case "AS_DROPPING": + return AppStatus_AS_DROPPING, nil + case "AS_DROP_FAILED": + return AppStatus_AS_DROP_FAILED, nil + case "AS_DROPPED": + return AppStatus_AS_DROPPED, nil + case "AS_RECALLING": + return AppStatus_AS_RECALLING, nil + } + return AppStatus(0), fmt.Errorf("not a valid AppStatus string") +} + +func AppStatusPtr(v AppStatus) *AppStatus { return &v } + +func (p AppStatus) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *AppStatus) UnmarshalText(text []byte) error { + q, err := AppStatusFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *AppStatus) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = AppStatus(v) + return nil +} + +func (p *AppStatus) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - Pid +// - Ballot +// - MaxReplicaCount +// - Primary +// - Secondaries +// - LastDrops +// - LastCommittedDecree +// - PartitionFlags +// - HpPrimary +// - HpSecondaries +// - HpLastDrops +type PartitionConfiguration struct { + Pid *base.Gpid `thrift:"pid,1" db:"pid" json:"pid"` + Ballot int64 `thrift:"ballot,2" db:"ballot" json:"ballot"` + MaxReplicaCount int32 `thrift:"max_replica_count,3" db:"max_replica_count" json:"max_replica_count"` + Primary *base.RPCAddress `thrift:"primary,4" db:"primary" json:"primary"` + Secondaries []*base.RPCAddress `thrift:"secondaries,5" db:"secondaries" json:"secondaries"` + LastDrops []*base.RPCAddress `thrift:"last_drops,6" db:"last_drops" json:"last_drops"` + LastCommittedDecree int64 `thrift:"last_committed_decree,7" db:"last_committed_decree" json:"last_committed_decree"` + PartitionFlags int32 `thrift:"partition_flags,8" db:"partition_flags" json:"partition_flags"` + HpPrimary *base.HostPort `thrift:"hp_primary,9" db:"hp_primary" json:"hp_primary,omitempty"` + HpSecondaries []*base.HostPort `thrift:"hp_secondaries,10" db:"hp_secondaries" json:"hp_secondaries,omitempty"` + HpLastDrops []*base.HostPort `thrift:"hp_last_drops,11" db:"hp_last_drops" json:"hp_last_drops,omitempty"` +} + +func NewPartitionConfiguration() *PartitionConfiguration { + return &PartitionConfiguration{} +} + +var PartitionConfiguration_Pid_DEFAULT *base.Gpid + +func (p *PartitionConfiguration) GetPid() *base.Gpid { + if !p.IsSetPid() { + return PartitionConfiguration_Pid_DEFAULT + } + return p.Pid +} + +func (p *PartitionConfiguration) GetBallot() int64 { + return p.Ballot +} + +func (p *PartitionConfiguration) GetMaxReplicaCount() int32 { + return p.MaxReplicaCount +} + +var PartitionConfiguration_Primary_DEFAULT *base.RPCAddress + +func (p *PartitionConfiguration) GetPrimary() *base.RPCAddress { + if !p.IsSetPrimary() { + return PartitionConfiguration_Primary_DEFAULT + } + return p.Primary +} + +func (p *PartitionConfiguration) GetSecondaries() []*base.RPCAddress { + return p.Secondaries +} + +func (p *PartitionConfiguration) GetLastDrops() []*base.RPCAddress { + return p.LastDrops +} + +func (p *PartitionConfiguration) GetLastCommittedDecree() int64 { + return p.LastCommittedDecree +} + +func (p *PartitionConfiguration) GetPartitionFlags() int32 { + return p.PartitionFlags +} + +var PartitionConfiguration_HpPrimary_DEFAULT *base.HostPort + +func (p *PartitionConfiguration) GetHpPrimary() *base.HostPort { + if !p.IsSetHpPrimary() { + return PartitionConfiguration_HpPrimary_DEFAULT + } + return p.HpPrimary +} + +var PartitionConfiguration_HpSecondaries_DEFAULT []*base.HostPort + +func (p *PartitionConfiguration) GetHpSecondaries() []*base.HostPort { + return p.HpSecondaries +} + +var PartitionConfiguration_HpLastDrops_DEFAULT []*base.HostPort + +func (p *PartitionConfiguration) GetHpLastDrops() []*base.HostPort { + return p.HpLastDrops +} +func (p *PartitionConfiguration) IsSetPid() bool { + return p.Pid != nil +} + +func (p *PartitionConfiguration) IsSetPrimary() bool { + return p.Primary != nil +} + +func (p *PartitionConfiguration) IsSetHpPrimary() bool { + return p.HpPrimary != nil +} + +func (p *PartitionConfiguration) IsSetHpSecondaries() bool { + return p.HpSecondaries != nil +} + +func (p *PartitionConfiguration) IsSetHpLastDrops() bool { + return p.HpLastDrops != nil +} + +func (p *PartitionConfiguration) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.LIST { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.LIST { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I64 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.I32 { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.LIST { + if err := p.ReadField10(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.LIST { + if err := p.ReadField11(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *PartitionConfiguration) ReadField1(iprot thrift.TProtocol) error { + p.Pid = &base.Gpid{} + if err := p.Pid.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Pid), err) + } + return nil +} + +func (p *PartitionConfiguration) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Ballot = v + } + return nil +} + +func (p *PartitionConfiguration) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.MaxReplicaCount = v + } + return nil +} + +func (p *PartitionConfiguration) ReadField4(iprot thrift.TProtocol) error { + p.Primary = &base.RPCAddress{} + if err := p.Primary.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Primary), err) + } + return nil +} + +func (p *PartitionConfiguration) ReadField5(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.RPCAddress, 0, size) + p.Secondaries = tSlice + for i := 0; i < size; i++ { + _elem0 := &base.RPCAddress{} + if err := _elem0.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Secondaries = append(p.Secondaries, _elem0) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *PartitionConfiguration) ReadField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.RPCAddress, 0, size) + p.LastDrops = tSlice + for i := 0; i < size; i++ { + _elem1 := &base.RPCAddress{} + if err := _elem1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + } + p.LastDrops = append(p.LastDrops, _elem1) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *PartitionConfiguration) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.LastCommittedDecree = v + } + return nil +} + +func (p *PartitionConfiguration) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.PartitionFlags = v + } + return nil +} + +func (p *PartitionConfiguration) ReadField9(iprot thrift.TProtocol) error { + p.HpPrimary = &base.HostPort{} + if err := p.HpPrimary.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HpPrimary), err) + } + return nil +} + +func (p *PartitionConfiguration) ReadField10(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.HostPort, 0, size) + p.HpSecondaries = tSlice + for i := 0; i < size; i++ { + _elem2 := &base.HostPort{} + if err := _elem2.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) + } + p.HpSecondaries = append(p.HpSecondaries, _elem2) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *PartitionConfiguration) ReadField11(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.HostPort, 0, size) + p.HpLastDrops = tSlice + for i := 0; i < size; i++ { + _elem3 := &base.HostPort{} + if err := _elem3.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) + } + p.HpLastDrops = append(p.HpLastDrops, _elem3) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *PartitionConfiguration) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("partition_configuration"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + if err := p.writeField10(oprot); err != nil { + return err + } + if err := p.writeField11(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *PartitionConfiguration) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("pid", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:pid: ", p), err) + } + if err := p.Pid.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Pid), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:pid: ", p), err) + } + return err +} + +func (p *PartitionConfiguration) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ballot", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:ballot: ", p), err) + } + if err := oprot.WriteI64(int64(p.Ballot)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ballot (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:ballot: ", p), err) + } + return err +} + +func (p *PartitionConfiguration) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("max_replica_count", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:max_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.MaxReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_replica_count (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:max_replica_count: ", p), err) + } + return err +} + +func (p *PartitionConfiguration) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("primary", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:primary: ", p), err) + } + if err := p.Primary.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Primary), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:primary: ", p), err) + } + return err +} + +func (p *PartitionConfiguration) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("secondaries", thrift.LIST, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:secondaries: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Secondaries)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Secondaries { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:secondaries: ", p), err) + } + return err +} + +func (p *PartitionConfiguration) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("last_drops", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:last_drops: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.LastDrops)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.LastDrops { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:last_drops: ", p), err) + } + return err +} + +func (p *PartitionConfiguration) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("last_committed_decree", thrift.I64, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:last_committed_decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.LastCommittedDecree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.last_committed_decree (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:last_committed_decree: ", p), err) + } + return err +} + +func (p *PartitionConfiguration) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_flags", thrift.I32, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:partition_flags: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionFlags)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_flags (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:partition_flags: ", p), err) + } + return err +} + +func (p *PartitionConfiguration) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetHpPrimary() { + if err := oprot.WriteFieldBegin("hp_primary", thrift.STRUCT, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:hp_primary: ", p), err) + } + if err := p.HpPrimary.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HpPrimary), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:hp_primary: ", p), err) + } + } + return err +} + +func (p *PartitionConfiguration) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetHpSecondaries() { + if err := oprot.WriteFieldBegin("hp_secondaries", thrift.LIST, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:hp_secondaries: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.HpSecondaries)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.HpSecondaries { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:hp_secondaries: ", p), err) + } + } + return err +} + +func (p *PartitionConfiguration) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetHpLastDrops() { + if err := oprot.WriteFieldBegin("hp_last_drops", thrift.LIST, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:hp_last_drops: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.HpLastDrops)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.HpLastDrops { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:hp_last_drops: ", p), err) + } + } + return err +} + +func (p *PartitionConfiguration) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("PartitionConfiguration(%+v)", *p) +} + +// Attributes: +// - AppName +// - PartitionIndices +type QueryCfgRequest struct { + AppName string `thrift:"app_name,1" db:"app_name" json:"app_name"` + PartitionIndices []int32 `thrift:"partition_indices,2" db:"partition_indices" json:"partition_indices"` +} + +func NewQueryCfgRequest() *QueryCfgRequest { + return &QueryCfgRequest{} +} + +func (p *QueryCfgRequest) GetAppName() string { + return p.AppName +} + +func (p *QueryCfgRequest) GetPartitionIndices() []int32 { + return p.PartitionIndices +} +func (p *QueryCfgRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryCfgRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *QueryCfgRequest) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]int32, 0, size) + p.PartitionIndices = tSlice + for i := 0; i < size; i++ { + var _elem4 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _elem4 = v + } + p.PartitionIndices = append(p.PartitionIndices, _elem4) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryCfgRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_cfg_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryCfgRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_name: ", p), err) + } + return err +} + +func (p *QueryCfgRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_indices", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:partition_indices: ", p), err) + } + if err := oprot.WriteListBegin(thrift.I32, len(p.PartitionIndices)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.PartitionIndices { + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:partition_indices: ", p), err) + } + return err +} + +func (p *QueryCfgRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryCfgRequest(%+v)", *p) +} + +// Attributes: +// - Err +// - AppID +// - PartitionCount +// - IsStateful +// - Partitions +type QueryCfgResponse struct { + Err *base.ErrorCode `thrift:"err,1" db:"err" json:"err"` + AppID int32 `thrift:"app_id,2" db:"app_id" json:"app_id"` + PartitionCount int32 `thrift:"partition_count,3" db:"partition_count" json:"partition_count"` + IsStateful bool `thrift:"is_stateful,4" db:"is_stateful" json:"is_stateful"` + Partitions []*PartitionConfiguration `thrift:"partitions,5" db:"partitions" json:"partitions"` +} + +func NewQueryCfgResponse() *QueryCfgResponse { + return &QueryCfgResponse{} +} + +var QueryCfgResponse_Err_DEFAULT *base.ErrorCode + +func (p *QueryCfgResponse) GetErr() *base.ErrorCode { + if !p.IsSetErr() { + return QueryCfgResponse_Err_DEFAULT + } + return p.Err +} + +func (p *QueryCfgResponse) GetAppID() int32 { + return p.AppID +} + +func (p *QueryCfgResponse) GetPartitionCount() int32 { + return p.PartitionCount +} + +func (p *QueryCfgResponse) GetIsStateful() bool { + return p.IsStateful +} + +func (p *QueryCfgResponse) GetPartitions() []*PartitionConfiguration { + return p.Partitions +} +func (p *QueryCfgResponse) IsSetErr() bool { + return p.Err != nil +} + +func (p *QueryCfgResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.LIST { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueryCfgResponse) ReadField1(iprot thrift.TProtocol) error { + p.Err = &base.ErrorCode{} + if err := p.Err.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Err), err) + } + return nil +} + +func (p *QueryCfgResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *QueryCfgResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.PartitionCount = v + } + return nil +} + +func (p *QueryCfgResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.IsStateful = v + } + return nil +} + +func (p *QueryCfgResponse) ReadField5(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*PartitionConfiguration, 0, size) + p.Partitions = tSlice + for i := 0; i < size; i++ { + _elem5 := &PartitionConfiguration{} + if err := _elem5.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) + } + p.Partitions = append(p.Partitions, _elem5) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *QueryCfgResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_cfg_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *QueryCfgResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("err", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:err: ", p), err) + } + if err := p.Err.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Err), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:err: ", p), err) + } + return err +} + +func (p *QueryCfgResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_id: ", p), err) + } + return err +} + +func (p *QueryCfgResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_count", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_count (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:partition_count: ", p), err) + } + return err +} + +func (p *QueryCfgResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("is_stateful", thrift.BOOL, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:is_stateful: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsStateful)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_stateful (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:is_stateful: ", p), err) + } + return err +} + +func (p *QueryCfgResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partitions", thrift.LIST, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:partitions: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Partitions)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Partitions { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:partitions: ", p), err) + } + return err +} + +func (p *QueryCfgResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueryCfgResponse(%+v)", *p) +} + +// Attributes: +// - AppID +// - PartitionIndex +// - ClientTimeout +// - PartitionHash +// - IsBackupRequest +type RequestMeta struct { + AppID int32 `thrift:"app_id,1" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,2" db:"partition_index" json:"partition_index"` + ClientTimeout int32 `thrift:"client_timeout,3" db:"client_timeout" json:"client_timeout"` + PartitionHash int64 `thrift:"partition_hash,4" db:"partition_hash" json:"partition_hash"` + IsBackupRequest bool `thrift:"is_backup_request,5" db:"is_backup_request" json:"is_backup_request"` +} + +func NewRequestMeta() *RequestMeta { + return &RequestMeta{} +} + +func (p *RequestMeta) GetAppID() int32 { + return p.AppID +} + +func (p *RequestMeta) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *RequestMeta) GetClientTimeout() int32 { + return p.ClientTimeout +} + +func (p *RequestMeta) GetPartitionHash() int64 { + return p.PartitionHash +} + +func (p *RequestMeta) GetIsBackupRequest() bool { + return p.IsBackupRequest +} +func (p *RequestMeta) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RequestMeta) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *RequestMeta) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *RequestMeta) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ClientTimeout = v + } + return nil +} + +func (p *RequestMeta) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionHash = v + } + return nil +} + +func (p *RequestMeta) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.IsBackupRequest = v + } + return nil +} + +func (p *RequestMeta) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("request_meta"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RequestMeta) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:app_id: ", p), err) + } + return err +} + +func (p *RequestMeta) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:partition_index: ", p), err) + } + return err +} + +func (p *RequestMeta) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("client_timeout", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:client_timeout: ", p), err) + } + if err := oprot.WriteI32(int32(p.ClientTimeout)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.client_timeout (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:client_timeout: ", p), err) + } + return err +} + +func (p *RequestMeta) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_hash", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_hash: ", p), err) + } + if err := oprot.WriteI64(int64(p.PartitionHash)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_hash (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_hash: ", p), err) + } + return err +} + +func (p *RequestMeta) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("is_backup_request", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:is_backup_request: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsBackupRequest)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_backup_request (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:is_backup_request: ", p), err) + } + return err +} + +func (p *RequestMeta) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RequestMeta(%+v)", *p) +} + +// Attributes: +// - Status +// - AppType +// - AppName +// - AppID +// - PartitionCount +// - Envs +// - IsStateful +// - MaxReplicaCount +// - ExpireSecond +// - CreateSecond +// - DropSecond +// - Duplicating +// - InitPartitionCount +// - IsBulkLoading +type AppInfo struct { + Status AppStatus `thrift:"status,1" db:"status" json:"status"` + AppType string `thrift:"app_type,2" db:"app_type" json:"app_type"` + AppName string `thrift:"app_name,3" db:"app_name" json:"app_name"` + AppID int32 `thrift:"app_id,4" db:"app_id" json:"app_id"` + PartitionCount int32 `thrift:"partition_count,5" db:"partition_count" json:"partition_count"` + Envs map[string]string `thrift:"envs,6" db:"envs" json:"envs"` + IsStateful bool `thrift:"is_stateful,7" db:"is_stateful" json:"is_stateful"` + MaxReplicaCount int32 `thrift:"max_replica_count,8" db:"max_replica_count" json:"max_replica_count"` + ExpireSecond int64 `thrift:"expire_second,9" db:"expire_second" json:"expire_second"` + CreateSecond int64 `thrift:"create_second,10" db:"create_second" json:"create_second"` + DropSecond int64 `thrift:"drop_second,11" db:"drop_second" json:"drop_second"` + Duplicating bool `thrift:"duplicating,12" db:"duplicating" json:"duplicating"` + InitPartitionCount int32 `thrift:"init_partition_count,13" db:"init_partition_count" json:"init_partition_count"` + IsBulkLoading bool `thrift:"is_bulk_loading,14" db:"is_bulk_loading" json:"is_bulk_loading"` +} + +func NewAppInfo() *AppInfo { + return &AppInfo{ + Status: 0, + + InitPartitionCount: -1, + } +} + +func (p *AppInfo) GetStatus() AppStatus { + return p.Status +} + +func (p *AppInfo) GetAppType() string { + return p.AppType +} + +func (p *AppInfo) GetAppName() string { + return p.AppName +} + +func (p *AppInfo) GetAppID() int32 { + return p.AppID +} + +func (p *AppInfo) GetPartitionCount() int32 { + return p.PartitionCount +} + +func (p *AppInfo) GetEnvs() map[string]string { + return p.Envs +} + +func (p *AppInfo) GetIsStateful() bool { + return p.IsStateful +} + +func (p *AppInfo) GetMaxReplicaCount() int32 { + return p.MaxReplicaCount +} + +func (p *AppInfo) GetExpireSecond() int64 { + return p.ExpireSecond +} + +func (p *AppInfo) GetCreateSecond() int64 { + return p.CreateSecond +} + +func (p *AppInfo) GetDropSecond() int64 { + return p.DropSecond +} + +var AppInfo_Duplicating_DEFAULT bool = false + +func (p *AppInfo) GetDuplicating() bool { + return p.Duplicating +} + +func (p *AppInfo) GetInitPartitionCount() int32 { + return p.InitPartitionCount +} + +var AppInfo_IsBulkLoading_DEFAULT bool = false + +func (p *AppInfo) GetIsBulkLoading() bool { + return p.IsBulkLoading +} +func (p *AppInfo) IsSetDuplicating() bool { + return p.Duplicating != AppInfo_Duplicating_DEFAULT +} + +func (p *AppInfo) IsSetIsBulkLoading() bool { + return p.IsBulkLoading != AppInfo_IsBulkLoading_DEFAULT +} + +func (p *AppInfo) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.MAP { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.I32 { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.I64 { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.I64 { + if err := p.ReadField10(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.I64 { + if err := p.ReadField11(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField12(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 13: + if fieldTypeId == thrift.I32 { + if err := p.ReadField13(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 14: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField14(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AppInfo) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := AppStatus(v) + p.Status = temp + } + return nil +} + +func (p *AppInfo) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.AppType = v + } + return nil +} + +func (p *AppInfo) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppName = v + } + return nil +} + +func (p *AppInfo) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *AppInfo) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.PartitionCount = v + } + return nil +} + +func (p *AppInfo) ReadField6(iprot thrift.TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return thrift.PrependError("error reading map begin: ", err) + } + tMap := make(map[string]string, size) + p.Envs = tMap + for i := 0; i < size; i++ { + var _key6 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _key6 = v + } + var _val7 string + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 0: ", err) + } else { + _val7 = v + } + p.Envs[_key6] = _val7 + } + if err := iprot.ReadMapEnd(); err != nil { + return thrift.PrependError("error reading map end: ", err) + } + return nil +} + +func (p *AppInfo) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.IsStateful = v + } + return nil +} + +func (p *AppInfo) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.MaxReplicaCount = v + } + return nil +} + +func (p *AppInfo) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.ExpireSecond = v + } + return nil +} + +func (p *AppInfo) ReadField10(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 10: ", err) + } else { + p.CreateSecond = v + } + return nil +} + +func (p *AppInfo) ReadField11(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 11: ", err) + } else { + p.DropSecond = v + } + return nil +} + +func (p *AppInfo) ReadField12(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 12: ", err) + } else { + p.Duplicating = v + } + return nil +} + +func (p *AppInfo) ReadField13(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 13: ", err) + } else { + p.InitPartitionCount = v + } + return nil +} + +func (p *AppInfo) ReadField14(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 14: ", err) + } else { + p.IsBulkLoading = v + } + return nil +} + +func (p *AppInfo) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("app_info"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + if err := p.writeField10(oprot); err != nil { + return err + } + if err := p.writeField11(oprot); err != nil { + return err + } + if err := p.writeField12(oprot); err != nil { + return err + } + if err := p.writeField13(oprot); err != nil { + return err + } + if err := p.writeField14(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AppInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("status", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:status: ", p), err) + } + if err := oprot.WriteI32(int32(p.Status)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.status (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:status: ", p), err) + } + return err +} + +func (p *AppInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_type", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_type: ", p), err) + } + if err := oprot.WriteString(string(p.AppType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_type (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_type: ", p), err) + } + return err +} + +func (p *AppInfo) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_name: ", p), err) + } + if err := oprot.WriteString(string(p.AppName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_name: ", p), err) + } + return err +} + +func (p *AppInfo) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:app_id: ", p), err) + } + return err +} + +func (p *AppInfo) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_count", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_count (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:partition_count: ", p), err) + } + return err +} + +func (p *AppInfo) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("envs", thrift.MAP, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:envs: ", p), err) + } + if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.Envs)); err != nil { + return thrift.PrependError("error writing map begin: ", err) + } + for k, v := range p.Envs { + if err := oprot.WriteString(string(k)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := oprot.WriteString(string(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return thrift.PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:envs: ", p), err) + } + return err +} + +func (p *AppInfo) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("is_stateful", thrift.BOOL, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:is_stateful: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsStateful)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_stateful (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:is_stateful: ", p), err) + } + return err +} + +func (p *AppInfo) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("max_replica_count", thrift.I32, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:max_replica_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.MaxReplicaCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_replica_count (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:max_replica_count: ", p), err) + } + return err +} + +func (p *AppInfo) writeField9(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("expire_second", thrift.I64, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:expire_second: ", p), err) + } + if err := oprot.WriteI64(int64(p.ExpireSecond)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.expire_second (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:expire_second: ", p), err) + } + return err +} + +func (p *AppInfo) writeField10(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("create_second", thrift.I64, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:create_second: ", p), err) + } + if err := oprot.WriteI64(int64(p.CreateSecond)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.create_second (10) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:create_second: ", p), err) + } + return err +} + +func (p *AppInfo) writeField11(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("drop_second", thrift.I64, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:drop_second: ", p), err) + } + if err := oprot.WriteI64(int64(p.DropSecond)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.drop_second (11) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:drop_second: ", p), err) + } + return err +} + +func (p *AppInfo) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetDuplicating() { + if err := oprot.WriteFieldBegin("duplicating", thrift.BOOL, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:duplicating: ", p), err) + } + if err := oprot.WriteBool(bool(p.Duplicating)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.duplicating (12) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:duplicating: ", p), err) + } + } + return err +} + +func (p *AppInfo) writeField13(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("init_partition_count", thrift.I32, 13); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 13:init_partition_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.InitPartitionCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.init_partition_count (13) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 13:init_partition_count: ", p), err) + } + return err +} + +func (p *AppInfo) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetIsBulkLoading() { + if err := oprot.WriteFieldBegin("is_bulk_loading", thrift.BOOL, 14); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 14:is_bulk_loading: ", p), err) + } + if err := oprot.WriteBool(bool(p.IsBulkLoading)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.is_bulk_loading (14) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 14:is_bulk_loading: ", p), err) + } + } + return err +} + +func (p *AppInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AppInfo(%+v)", *p) +} diff --git a/go-client/idl/rrdb/GoUnusedProtection__.go b/go-client/idl/rrdb/GoUnusedProtection__.go new file mode 100644 index 0000000000..ba179697d3 --- /dev/null +++ b/go-client/idl/rrdb/GoUnusedProtection__.go @@ -0,0 +1,6 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package rrdb + +var GoUnusedProtection__ int diff --git a/go-client/idl/rrdb/meta-remote/meta-remote.go b/go-client/idl/rrdb/meta-remote/meta-remote.go new file mode 100755 index 0000000000..af4fe0b133 --- /dev/null +++ b/go-client/idl/rrdb/meta-remote/meta-remote.go @@ -0,0 +1,183 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package main + +import ( + "context" + "flag" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/incubator-pegasus/go-client/idl/rrdb" + "github.com/apache/thrift/lib/go/thrift" + "math" + "net" + "net/url" + "os" + "strconv" + "strings" +) + +var _ = replication.GoUnusedProtection__ +var _ = base.GoUnusedProtection__ +var _ = rrdb.GoUnusedProtection__ + +func Usage() { + fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") + flag.PrintDefaults() + fmt.Fprintln(os.Stderr, "\nFunctions:") + fmt.Fprintln(os.Stderr, " query_cfg_response query_cfg(query_cfg_request query)") + fmt.Fprintln(os.Stderr) + os.Exit(0) +} + +type httpHeaders map[string]string + +func (h httpHeaders) String() string { + var m map[string]string = h + return fmt.Sprintf("%s", m) +} + +func (h httpHeaders) Set(value string) error { + parts := strings.Split(value, ": ") + if len(parts) != 2 { + return fmt.Errorf("header should be of format 'Key: Value'") + } + h[parts[0]] = parts[1] + return nil +} + +func main() { + flag.Usage = Usage + var host string + var port int + var protocol string + var urlString string + var framed bool + var useHttp bool + headers := make(httpHeaders) + var parsedUrl *url.URL + var trans thrift.TTransport + _ = strconv.Atoi + _ = math.Abs + flag.Usage = Usage + flag.StringVar(&host, "h", "localhost", "Specify host and port") + flag.IntVar(&port, "p", 9090, "Specify port") + flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") + flag.StringVar(&urlString, "u", "", "Specify the url") + flag.BoolVar(&framed, "framed", false, "Use framed transport") + flag.BoolVar(&useHttp, "http", false, "Use http") + flag.Var(headers, "H", "Headers to set on the http(s) request (e.g. -H \"Key: Value\")") + flag.Parse() + + if len(urlString) > 0 { + var err error + parsedUrl, err = url.Parse(urlString) + if err != nil { + fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) + flag.Usage() + } + host = parsedUrl.Host + useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https" + } else if useHttp { + _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) + if err != nil { + fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) + flag.Usage() + } + } + + cmd := flag.Arg(0) + var err error + if useHttp { + trans, err = thrift.NewTHttpClient(parsedUrl.String()) + if len(headers) > 0 { + httptrans := trans.(*thrift.THttpClient) + for key, value := range headers { + httptrans.SetHeader(key, value) + } + } + } else { + portStr := fmt.Sprint(port) + if strings.Contains(host, ":") { + host, portStr, err = net.SplitHostPort(host) + if err != nil { + fmt.Fprintln(os.Stderr, "error with host:", err) + os.Exit(1) + } + } + trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) + if err != nil { + fmt.Fprintln(os.Stderr, "error resolving address:", err) + os.Exit(1) + } + if framed { + trans = thrift.NewTFramedTransport(trans) + } + } + if err != nil { + fmt.Fprintln(os.Stderr, "Error creating transport", err) + os.Exit(1) + } + defer trans.Close() + var protocolFactory thrift.TProtocolFactory + switch protocol { + case "compact": + protocolFactory = thrift.NewTCompactProtocolFactory() + break + case "simplejson": + protocolFactory = thrift.NewTSimpleJSONProtocolFactory() + break + case "json": + protocolFactory = thrift.NewTJSONProtocolFactory() + break + case "binary", "": + protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() + break + default: + fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) + Usage() + os.Exit(1) + } + iprot := protocolFactory.GetProtocol(trans) + oprot := protocolFactory.GetProtocol(trans) + client := rrdb.NewMetaClient(thrift.NewTStandardClient(iprot, oprot)) + if err := trans.Open(); err != nil { + fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) + os.Exit(1) + } + + switch cmd { + case "query_cfg": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "QueryCfg requires 1 args") + flag.Usage() + } + arg128 := flag.Arg(1) + mbTrans129 := thrift.NewTMemoryBufferLen(len(arg128)) + defer mbTrans129.Close() + _, err130 := mbTrans129.WriteString(arg128) + if err130 != nil { + Usage() + return + } + factory131 := thrift.NewTJSONProtocolFactory() + jsProt132 := factory131.GetProtocol(mbTrans129) + argvalue0 := replication.NewQueryCfgRequest() + err133 := argvalue0.Read(jsProt132) + if err133 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.QueryCfg(context.Background(), value0)) + fmt.Print("\n") + break + case "": + Usage() + break + default: + fmt.Fprintln(os.Stderr, "Invalid function ", cmd) + } +} diff --git a/go-client/idl/rrdb/rrdb-consts.go b/go-client/idl/rrdb/rrdb-consts.go new file mode 100644 index 0000000000..a888b636ff --- /dev/null +++ b/go-client/idl/rrdb/rrdb-consts.go @@ -0,0 +1,27 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package rrdb + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = replication.GoUnusedProtection__ +var _ = base.GoUnusedProtection__ + +func init() { +} diff --git a/go-client/idl/rrdb/rrdb-remote/rrdb-remote.go b/go-client/idl/rrdb/rrdb-remote/rrdb-remote.go new file mode 100755 index 0000000000..a74084a45a --- /dev/null +++ b/go-client/idl/rrdb/rrdb-remote/rrdb-remote.go @@ -0,0 +1,536 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package main + +import ( + "context" + "flag" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/incubator-pegasus/go-client/idl/rrdb" + "github.com/apache/thrift/lib/go/thrift" + "math" + "net" + "net/url" + "os" + "strconv" + "strings" +) + +var _ = replication.GoUnusedProtection__ +var _ = base.GoUnusedProtection__ +var _ = rrdb.GoUnusedProtection__ + +func Usage() { + fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") + flag.PrintDefaults() + fmt.Fprintln(os.Stderr, "\nFunctions:") + fmt.Fprintln(os.Stderr, " update_response put(update_request update)") + fmt.Fprintln(os.Stderr, " update_response multi_put(multi_put_request request)") + fmt.Fprintln(os.Stderr, " update_response remove(blob key)") + fmt.Fprintln(os.Stderr, " multi_remove_response multi_remove(multi_remove_request request)") + fmt.Fprintln(os.Stderr, " incr_response incr(incr_request request)") + fmt.Fprintln(os.Stderr, " check_and_set_response check_and_set(check_and_set_request request)") + fmt.Fprintln(os.Stderr, " check_and_mutate_response check_and_mutate(check_and_mutate_request request)") + fmt.Fprintln(os.Stderr, " read_response get(blob key)") + fmt.Fprintln(os.Stderr, " multi_get_response multi_get(multi_get_request request)") + fmt.Fprintln(os.Stderr, " batch_get_response batch_get(batch_get_request request)") + fmt.Fprintln(os.Stderr, " count_response sortkey_count(blob hash_key)") + fmt.Fprintln(os.Stderr, " ttl_response ttl(blob key)") + fmt.Fprintln(os.Stderr, " scan_response get_scanner(get_scanner_request request)") + fmt.Fprintln(os.Stderr, " scan_response scan(scan_request request)") + fmt.Fprintln(os.Stderr, " void clear_scanner(i64 context_id)") + fmt.Fprintln(os.Stderr) + os.Exit(0) +} + +type httpHeaders map[string]string + +func (h httpHeaders) String() string { + var m map[string]string = h + return fmt.Sprintf("%s", m) +} + +func (h httpHeaders) Set(value string) error { + parts := strings.Split(value, ": ") + if len(parts) != 2 { + return fmt.Errorf("header should be of format 'Key: Value'") + } + h[parts[0]] = parts[1] + return nil +} + +func main() { + flag.Usage = Usage + var host string + var port int + var protocol string + var urlString string + var framed bool + var useHttp bool + headers := make(httpHeaders) + var parsedUrl *url.URL + var trans thrift.TTransport + _ = strconv.Atoi + _ = math.Abs + flag.Usage = Usage + flag.StringVar(&host, "h", "localhost", "Specify host and port") + flag.IntVar(&port, "p", 9090, "Specify port") + flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") + flag.StringVar(&urlString, "u", "", "Specify the url") + flag.BoolVar(&framed, "framed", false, "Use framed transport") + flag.BoolVar(&useHttp, "http", false, "Use http") + flag.Var(headers, "H", "Headers to set on the http(s) request (e.g. -H \"Key: Value\")") + flag.Parse() + + if len(urlString) > 0 { + var err error + parsedUrl, err = url.Parse(urlString) + if err != nil { + fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) + flag.Usage() + } + host = parsedUrl.Host + useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https" + } else if useHttp { + _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) + if err != nil { + fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) + flag.Usage() + } + } + + cmd := flag.Arg(0) + var err error + if useHttp { + trans, err = thrift.NewTHttpClient(parsedUrl.String()) + if len(headers) > 0 { + httptrans := trans.(*thrift.THttpClient) + for key, value := range headers { + httptrans.SetHeader(key, value) + } + } + } else { + portStr := fmt.Sprint(port) + if strings.Contains(host, ":") { + host, portStr, err = net.SplitHostPort(host) + if err != nil { + fmt.Fprintln(os.Stderr, "error with host:", err) + os.Exit(1) + } + } + trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) + if err != nil { + fmt.Fprintln(os.Stderr, "error resolving address:", err) + os.Exit(1) + } + if framed { + trans = thrift.NewTFramedTransport(trans) + } + } + if err != nil { + fmt.Fprintln(os.Stderr, "Error creating transport", err) + os.Exit(1) + } + defer trans.Close() + var protocolFactory thrift.TProtocolFactory + switch protocol { + case "compact": + protocolFactory = thrift.NewTCompactProtocolFactory() + break + case "simplejson": + protocolFactory = thrift.NewTSimpleJSONProtocolFactory() + break + case "json": + protocolFactory = thrift.NewTJSONProtocolFactory() + break + case "binary", "": + protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() + break + default: + fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) + Usage() + os.Exit(1) + } + iprot := protocolFactory.GetProtocol(trans) + oprot := protocolFactory.GetProtocol(trans) + client := rrdb.NewRrdbClient(thrift.NewTStandardClient(iprot, oprot)) + if err := trans.Open(); err != nil { + fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) + os.Exit(1) + } + + switch cmd { + case "put": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "Put requires 1 args") + flag.Usage() + } + arg39 := flag.Arg(1) + mbTrans40 := thrift.NewTMemoryBufferLen(len(arg39)) + defer mbTrans40.Close() + _, err41 := mbTrans40.WriteString(arg39) + if err41 != nil { + Usage() + return + } + factory42 := thrift.NewTJSONProtocolFactory() + jsProt43 := factory42.GetProtocol(mbTrans40) + argvalue0 := rrdb.NewUpdateRequest() + err44 := argvalue0.Read(jsProt43) + if err44 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.Put(context.Background(), value0)) + fmt.Print("\n") + break + case "multi_put": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "MultiPut requires 1 args") + flag.Usage() + } + arg45 := flag.Arg(1) + mbTrans46 := thrift.NewTMemoryBufferLen(len(arg45)) + defer mbTrans46.Close() + _, err47 := mbTrans46.WriteString(arg45) + if err47 != nil { + Usage() + return + } + factory48 := thrift.NewTJSONProtocolFactory() + jsProt49 := factory48.GetProtocol(mbTrans46) + argvalue0 := rrdb.NewMultiPutRequest() + err50 := argvalue0.Read(jsProt49) + if err50 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.MultiPut(context.Background(), value0)) + fmt.Print("\n") + break + case "remove": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "Remove requires 1 args") + flag.Usage() + } + arg51 := flag.Arg(1) + mbTrans52 := thrift.NewTMemoryBufferLen(len(arg51)) + defer mbTrans52.Close() + _, err53 := mbTrans52.WriteString(arg51) + if err53 != nil { + Usage() + return + } + factory54 := thrift.NewTJSONProtocolFactory() + jsProt55 := factory54.GetProtocol(mbTrans52) + argvalue0 := base.NewBlob() + err56 := argvalue0.Read(jsProt55) + if err56 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.Remove(context.Background(), value0)) + fmt.Print("\n") + break + case "multi_remove": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "MultiRemove requires 1 args") + flag.Usage() + } + arg57 := flag.Arg(1) + mbTrans58 := thrift.NewTMemoryBufferLen(len(arg57)) + defer mbTrans58.Close() + _, err59 := mbTrans58.WriteString(arg57) + if err59 != nil { + Usage() + return + } + factory60 := thrift.NewTJSONProtocolFactory() + jsProt61 := factory60.GetProtocol(mbTrans58) + argvalue0 := rrdb.NewMultiRemoveRequest() + err62 := argvalue0.Read(jsProt61) + if err62 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.MultiRemove(context.Background(), value0)) + fmt.Print("\n") + break + case "incr": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "Incr requires 1 args") + flag.Usage() + } + arg63 := flag.Arg(1) + mbTrans64 := thrift.NewTMemoryBufferLen(len(arg63)) + defer mbTrans64.Close() + _, err65 := mbTrans64.WriteString(arg63) + if err65 != nil { + Usage() + return + } + factory66 := thrift.NewTJSONProtocolFactory() + jsProt67 := factory66.GetProtocol(mbTrans64) + argvalue0 := rrdb.NewIncrRequest() + err68 := argvalue0.Read(jsProt67) + if err68 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.Incr(context.Background(), value0)) + fmt.Print("\n") + break + case "check_and_set": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "CheckAndSet requires 1 args") + flag.Usage() + } + arg69 := flag.Arg(1) + mbTrans70 := thrift.NewTMemoryBufferLen(len(arg69)) + defer mbTrans70.Close() + _, err71 := mbTrans70.WriteString(arg69) + if err71 != nil { + Usage() + return + } + factory72 := thrift.NewTJSONProtocolFactory() + jsProt73 := factory72.GetProtocol(mbTrans70) + argvalue0 := rrdb.NewCheckAndSetRequest() + err74 := argvalue0.Read(jsProt73) + if err74 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.CheckAndSet(context.Background(), value0)) + fmt.Print("\n") + break + case "check_and_mutate": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "CheckAndMutate requires 1 args") + flag.Usage() + } + arg75 := flag.Arg(1) + mbTrans76 := thrift.NewTMemoryBufferLen(len(arg75)) + defer mbTrans76.Close() + _, err77 := mbTrans76.WriteString(arg75) + if err77 != nil { + Usage() + return + } + factory78 := thrift.NewTJSONProtocolFactory() + jsProt79 := factory78.GetProtocol(mbTrans76) + argvalue0 := rrdb.NewCheckAndMutateRequest() + err80 := argvalue0.Read(jsProt79) + if err80 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.CheckAndMutate(context.Background(), value0)) + fmt.Print("\n") + break + case "get": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "Get requires 1 args") + flag.Usage() + } + arg81 := flag.Arg(1) + mbTrans82 := thrift.NewTMemoryBufferLen(len(arg81)) + defer mbTrans82.Close() + _, err83 := mbTrans82.WriteString(arg81) + if err83 != nil { + Usage() + return + } + factory84 := thrift.NewTJSONProtocolFactory() + jsProt85 := factory84.GetProtocol(mbTrans82) + argvalue0 := base.NewBlob() + err86 := argvalue0.Read(jsProt85) + if err86 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.Get(context.Background(), value0)) + fmt.Print("\n") + break + case "multi_get": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "MultiGet requires 1 args") + flag.Usage() + } + arg87 := flag.Arg(1) + mbTrans88 := thrift.NewTMemoryBufferLen(len(arg87)) + defer mbTrans88.Close() + _, err89 := mbTrans88.WriteString(arg87) + if err89 != nil { + Usage() + return + } + factory90 := thrift.NewTJSONProtocolFactory() + jsProt91 := factory90.GetProtocol(mbTrans88) + argvalue0 := rrdb.NewMultiGetRequest() + err92 := argvalue0.Read(jsProt91) + if err92 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.MultiGet(context.Background(), value0)) + fmt.Print("\n") + break + case "batch_get": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "BatchGet requires 1 args") + flag.Usage() + } + arg93 := flag.Arg(1) + mbTrans94 := thrift.NewTMemoryBufferLen(len(arg93)) + defer mbTrans94.Close() + _, err95 := mbTrans94.WriteString(arg93) + if err95 != nil { + Usage() + return + } + factory96 := thrift.NewTJSONProtocolFactory() + jsProt97 := factory96.GetProtocol(mbTrans94) + argvalue0 := rrdb.NewBatchGetRequest() + err98 := argvalue0.Read(jsProt97) + if err98 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.BatchGet(context.Background(), value0)) + fmt.Print("\n") + break + case "sortkey_count": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "SortkeyCount requires 1 args") + flag.Usage() + } + arg99 := flag.Arg(1) + mbTrans100 := thrift.NewTMemoryBufferLen(len(arg99)) + defer mbTrans100.Close() + _, err101 := mbTrans100.WriteString(arg99) + if err101 != nil { + Usage() + return + } + factory102 := thrift.NewTJSONProtocolFactory() + jsProt103 := factory102.GetProtocol(mbTrans100) + argvalue0 := base.NewBlob() + err104 := argvalue0.Read(jsProt103) + if err104 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.SortkeyCount(context.Background(), value0)) + fmt.Print("\n") + break + case "ttl": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "TTL requires 1 args") + flag.Usage() + } + arg105 := flag.Arg(1) + mbTrans106 := thrift.NewTMemoryBufferLen(len(arg105)) + defer mbTrans106.Close() + _, err107 := mbTrans106.WriteString(arg105) + if err107 != nil { + Usage() + return + } + factory108 := thrift.NewTJSONProtocolFactory() + jsProt109 := factory108.GetProtocol(mbTrans106) + argvalue0 := base.NewBlob() + err110 := argvalue0.Read(jsProt109) + if err110 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.TTL(context.Background(), value0)) + fmt.Print("\n") + break + case "get_scanner": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "GetScanner requires 1 args") + flag.Usage() + } + arg111 := flag.Arg(1) + mbTrans112 := thrift.NewTMemoryBufferLen(len(arg111)) + defer mbTrans112.Close() + _, err113 := mbTrans112.WriteString(arg111) + if err113 != nil { + Usage() + return + } + factory114 := thrift.NewTJSONProtocolFactory() + jsProt115 := factory114.GetProtocol(mbTrans112) + argvalue0 := rrdb.NewGetScannerRequest() + err116 := argvalue0.Read(jsProt115) + if err116 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.GetScanner(context.Background(), value0)) + fmt.Print("\n") + break + case "scan": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "Scan requires 1 args") + flag.Usage() + } + arg117 := flag.Arg(1) + mbTrans118 := thrift.NewTMemoryBufferLen(len(arg117)) + defer mbTrans118.Close() + _, err119 := mbTrans118.WriteString(arg117) + if err119 != nil { + Usage() + return + } + factory120 := thrift.NewTJSONProtocolFactory() + jsProt121 := factory120.GetProtocol(mbTrans118) + argvalue0 := rrdb.NewScanRequest() + err122 := argvalue0.Read(jsProt121) + if err122 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.Scan(context.Background(), value0)) + fmt.Print("\n") + break + case "clear_scanner": + if flag.NArg()-1 != 1 { + fmt.Fprintln(os.Stderr, "ClearScanner requires 1 args") + flag.Usage() + } + argvalue0, err123 := (strconv.ParseInt(flag.Arg(1), 10, 64)) + if err123 != nil { + Usage() + return + } + value0 := argvalue0 + fmt.Print(client.ClearScanner(context.Background(), value0)) + fmt.Print("\n") + break + case "": + Usage() + break + default: + fmt.Fprintln(os.Stderr, "Invalid function ", cmd) + } +} diff --git a/go-client/idl/rrdb/rrdb.go b/go-client/idl/rrdb/rrdb.go new file mode 100644 index 0000000000..31782bc3be --- /dev/null +++ b/go-client/idl/rrdb/rrdb.go @@ -0,0 +1,12123 @@ +// Autogenerated by Thrift Compiler (0.13.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package rrdb + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/thrift/lib/go/thrift" + "reflect" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +var _ = replication.GoUnusedProtection__ +var _ = base.GoUnusedProtection__ + +type FilterType int64 + +const ( + FilterType_FT_NO_FILTER FilterType = 0 + FilterType_FT_MATCH_ANYWHERE FilterType = 1 + FilterType_FT_MATCH_PREFIX FilterType = 2 + FilterType_FT_MATCH_POSTFIX FilterType = 3 +) + +func (p FilterType) String() string { + switch p { + case FilterType_FT_NO_FILTER: + return "FT_NO_FILTER" + case FilterType_FT_MATCH_ANYWHERE: + return "FT_MATCH_ANYWHERE" + case FilterType_FT_MATCH_PREFIX: + return "FT_MATCH_PREFIX" + case FilterType_FT_MATCH_POSTFIX: + return "FT_MATCH_POSTFIX" + } + return "" +} + +func FilterTypeFromString(s string) (FilterType, error) { + switch s { + case "FT_NO_FILTER": + return FilterType_FT_NO_FILTER, nil + case "FT_MATCH_ANYWHERE": + return FilterType_FT_MATCH_ANYWHERE, nil + case "FT_MATCH_PREFIX": + return FilterType_FT_MATCH_PREFIX, nil + case "FT_MATCH_POSTFIX": + return FilterType_FT_MATCH_POSTFIX, nil + } + return FilterType(0), fmt.Errorf("not a valid FilterType string") +} + +func FilterTypePtr(v FilterType) *FilterType { return &v } + +func (p FilterType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *FilterType) UnmarshalText(text []byte) error { + q, err := FilterTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *FilterType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = FilterType(v) + return nil +} + +func (p *FilterType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type CasCheckType int64 + +const ( + CasCheckType_CT_NO_CHECK CasCheckType = 0 + CasCheckType_CT_VALUE_NOT_EXIST CasCheckType = 1 + CasCheckType_CT_VALUE_NOT_EXIST_OR_EMPTY CasCheckType = 2 + CasCheckType_CT_VALUE_EXIST CasCheckType = 3 + CasCheckType_CT_VALUE_NOT_EMPTY CasCheckType = 4 + CasCheckType_CT_VALUE_MATCH_ANYWHERE CasCheckType = 5 + CasCheckType_CT_VALUE_MATCH_PREFIX CasCheckType = 6 + CasCheckType_CT_VALUE_MATCH_POSTFIX CasCheckType = 7 + CasCheckType_CT_VALUE_BYTES_LESS CasCheckType = 8 + CasCheckType_CT_VALUE_BYTES_LESS_OR_EQUAL CasCheckType = 9 + CasCheckType_CT_VALUE_BYTES_EQUAL CasCheckType = 10 + CasCheckType_CT_VALUE_BYTES_GREATER_OR_EQUAL CasCheckType = 11 + CasCheckType_CT_VALUE_BYTES_GREATER CasCheckType = 12 + CasCheckType_CT_VALUE_INT_LESS CasCheckType = 13 + CasCheckType_CT_VALUE_INT_LESS_OR_EQUAL CasCheckType = 14 + CasCheckType_CT_VALUE_INT_EQUAL CasCheckType = 15 + CasCheckType_CT_VALUE_INT_GREATER_OR_EQUAL CasCheckType = 16 + CasCheckType_CT_VALUE_INT_GREATER CasCheckType = 17 +) + +func (p CasCheckType) String() string { + switch p { + case CasCheckType_CT_NO_CHECK: + return "CT_NO_CHECK" + case CasCheckType_CT_VALUE_NOT_EXIST: + return "CT_VALUE_NOT_EXIST" + case CasCheckType_CT_VALUE_NOT_EXIST_OR_EMPTY: + return "CT_VALUE_NOT_EXIST_OR_EMPTY" + case CasCheckType_CT_VALUE_EXIST: + return "CT_VALUE_EXIST" + case CasCheckType_CT_VALUE_NOT_EMPTY: + return "CT_VALUE_NOT_EMPTY" + case CasCheckType_CT_VALUE_MATCH_ANYWHERE: + return "CT_VALUE_MATCH_ANYWHERE" + case CasCheckType_CT_VALUE_MATCH_PREFIX: + return "CT_VALUE_MATCH_PREFIX" + case CasCheckType_CT_VALUE_MATCH_POSTFIX: + return "CT_VALUE_MATCH_POSTFIX" + case CasCheckType_CT_VALUE_BYTES_LESS: + return "CT_VALUE_BYTES_LESS" + case CasCheckType_CT_VALUE_BYTES_LESS_OR_EQUAL: + return "CT_VALUE_BYTES_LESS_OR_EQUAL" + case CasCheckType_CT_VALUE_BYTES_EQUAL: + return "CT_VALUE_BYTES_EQUAL" + case CasCheckType_CT_VALUE_BYTES_GREATER_OR_EQUAL: + return "CT_VALUE_BYTES_GREATER_OR_EQUAL" + case CasCheckType_CT_VALUE_BYTES_GREATER: + return "CT_VALUE_BYTES_GREATER" + case CasCheckType_CT_VALUE_INT_LESS: + return "CT_VALUE_INT_LESS" + case CasCheckType_CT_VALUE_INT_LESS_OR_EQUAL: + return "CT_VALUE_INT_LESS_OR_EQUAL" + case CasCheckType_CT_VALUE_INT_EQUAL: + return "CT_VALUE_INT_EQUAL" + case CasCheckType_CT_VALUE_INT_GREATER_OR_EQUAL: + return "CT_VALUE_INT_GREATER_OR_EQUAL" + case CasCheckType_CT_VALUE_INT_GREATER: + return "CT_VALUE_INT_GREATER" + } + return "" +} + +func CasCheckTypeFromString(s string) (CasCheckType, error) { + switch s { + case "CT_NO_CHECK": + return CasCheckType_CT_NO_CHECK, nil + case "CT_VALUE_NOT_EXIST": + return CasCheckType_CT_VALUE_NOT_EXIST, nil + case "CT_VALUE_NOT_EXIST_OR_EMPTY": + return CasCheckType_CT_VALUE_NOT_EXIST_OR_EMPTY, nil + case "CT_VALUE_EXIST": + return CasCheckType_CT_VALUE_EXIST, nil + case "CT_VALUE_NOT_EMPTY": + return CasCheckType_CT_VALUE_NOT_EMPTY, nil + case "CT_VALUE_MATCH_ANYWHERE": + return CasCheckType_CT_VALUE_MATCH_ANYWHERE, nil + case "CT_VALUE_MATCH_PREFIX": + return CasCheckType_CT_VALUE_MATCH_PREFIX, nil + case "CT_VALUE_MATCH_POSTFIX": + return CasCheckType_CT_VALUE_MATCH_POSTFIX, nil + case "CT_VALUE_BYTES_LESS": + return CasCheckType_CT_VALUE_BYTES_LESS, nil + case "CT_VALUE_BYTES_LESS_OR_EQUAL": + return CasCheckType_CT_VALUE_BYTES_LESS_OR_EQUAL, nil + case "CT_VALUE_BYTES_EQUAL": + return CasCheckType_CT_VALUE_BYTES_EQUAL, nil + case "CT_VALUE_BYTES_GREATER_OR_EQUAL": + return CasCheckType_CT_VALUE_BYTES_GREATER_OR_EQUAL, nil + case "CT_VALUE_BYTES_GREATER": + return CasCheckType_CT_VALUE_BYTES_GREATER, nil + case "CT_VALUE_INT_LESS": + return CasCheckType_CT_VALUE_INT_LESS, nil + case "CT_VALUE_INT_LESS_OR_EQUAL": + return CasCheckType_CT_VALUE_INT_LESS_OR_EQUAL, nil + case "CT_VALUE_INT_EQUAL": + return CasCheckType_CT_VALUE_INT_EQUAL, nil + case "CT_VALUE_INT_GREATER_OR_EQUAL": + return CasCheckType_CT_VALUE_INT_GREATER_OR_EQUAL, nil + case "CT_VALUE_INT_GREATER": + return CasCheckType_CT_VALUE_INT_GREATER, nil + } + return CasCheckType(0), fmt.Errorf("not a valid CasCheckType string") +} + +func CasCheckTypePtr(v CasCheckType) *CasCheckType { return &v } + +func (p CasCheckType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *CasCheckType) UnmarshalText(text []byte) error { + q, err := CasCheckTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *CasCheckType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = CasCheckType(v) + return nil +} + +func (p *CasCheckType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +type MutateOperation int64 + +const ( + MutateOperation_MO_PUT MutateOperation = 0 + MutateOperation_MO_DELETE MutateOperation = 1 +) + +func (p MutateOperation) String() string { + switch p { + case MutateOperation_MO_PUT: + return "MO_PUT" + case MutateOperation_MO_DELETE: + return "MO_DELETE" + } + return "" +} + +func MutateOperationFromString(s string) (MutateOperation, error) { + switch s { + case "MO_PUT": + return MutateOperation_MO_PUT, nil + case "MO_DELETE": + return MutateOperation_MO_DELETE, nil + } + return MutateOperation(0), fmt.Errorf("not a valid MutateOperation string") +} + +func MutateOperationPtr(v MutateOperation) *MutateOperation { return &v } + +func (p MutateOperation) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *MutateOperation) UnmarshalText(text []byte) error { + q, err := MutateOperationFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *MutateOperation) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = MutateOperation(v) + return nil +} + +func (p *MutateOperation) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Attributes: +// - Key +// - Value +// - ExpireTsSeconds +type UpdateRequest struct { + Key *base.Blob `thrift:"key,1" db:"key" json:"key"` + Value *base.Blob `thrift:"value,2" db:"value" json:"value"` + ExpireTsSeconds int32 `thrift:"expire_ts_seconds,3" db:"expire_ts_seconds" json:"expire_ts_seconds"` +} + +func NewUpdateRequest() *UpdateRequest { + return &UpdateRequest{} +} + +var UpdateRequest_Key_DEFAULT *base.Blob + +func (p *UpdateRequest) GetKey() *base.Blob { + if !p.IsSetKey() { + return UpdateRequest_Key_DEFAULT + } + return p.Key +} + +var UpdateRequest_Value_DEFAULT *base.Blob + +func (p *UpdateRequest) GetValue() *base.Blob { + if !p.IsSetValue() { + return UpdateRequest_Value_DEFAULT + } + return p.Value +} + +func (p *UpdateRequest) GetExpireTsSeconds() int32 { + return p.ExpireTsSeconds +} +func (p *UpdateRequest) IsSetKey() bool { + return p.Key != nil +} + +func (p *UpdateRequest) IsSetValue() bool { + return p.Value != nil +} + +func (p *UpdateRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *UpdateRequest) ReadField1(iprot thrift.TProtocol) error { + p.Key = &base.Blob{} + if err := p.Key.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) + } + return nil +} + +func (p *UpdateRequest) ReadField2(iprot thrift.TProtocol) error { + p.Value = &base.Blob{} + if err := p.Value.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Value), err) + } + return nil +} + +func (p *UpdateRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ExpireTsSeconds = v + } + return nil +} + +func (p *UpdateRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("update_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *UpdateRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := p.Key.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *UpdateRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := p.Value.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Value), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *UpdateRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("expire_ts_seconds", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:expire_ts_seconds: ", p), err) + } + if err := oprot.WriteI32(int32(p.ExpireTsSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.expire_ts_seconds (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:expire_ts_seconds: ", p), err) + } + return err +} + +func (p *UpdateRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("UpdateRequest(%+v)", *p) +} + +// Attributes: +// - Error +// - AppID +// - PartitionIndex +// - Decree +// - Server +type UpdateResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + AppID int32 `thrift:"app_id,2" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,3" db:"partition_index" json:"partition_index"` + Decree int64 `thrift:"decree,4" db:"decree" json:"decree"` + Server string `thrift:"server,5" db:"server" json:"server"` +} + +func NewUpdateResponse() *UpdateResponse { + return &UpdateResponse{} +} + +func (p *UpdateResponse) GetError() int32 { + return p.Error +} + +func (p *UpdateResponse) GetAppID() int32 { + return p.AppID +} + +func (p *UpdateResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *UpdateResponse) GetDecree() int64 { + return p.Decree +} + +func (p *UpdateResponse) GetServer() string { + return p.Server +} +func (p *UpdateResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.STRING { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *UpdateResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *UpdateResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *UpdateResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *UpdateResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Decree = v + } + return nil +} + +func (p *UpdateResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *UpdateResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("update_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *UpdateResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *UpdateResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:app_id: ", p), err) + } + return err +} + +func (p *UpdateResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:partition_index: ", p), err) + } + return err +} + +func (p *UpdateResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("decree", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.Decree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.decree (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:decree: ", p), err) + } + return err +} + +func (p *UpdateResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:server: ", p), err) + } + return err +} + +func (p *UpdateResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("UpdateResponse(%+v)", *p) +} + +// Attributes: +// - Error +// - Value +// - AppID +// - PartitionIndex +// - Server +type ReadResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + Value *base.Blob `thrift:"value,2" db:"value" json:"value"` + AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` + // unused field # 5 + Server string `thrift:"server,6" db:"server" json:"server"` +} + +func NewReadResponse() *ReadResponse { + return &ReadResponse{} +} + +func (p *ReadResponse) GetError() int32 { + return p.Error +} + +var ReadResponse_Value_DEFAULT *base.Blob + +func (p *ReadResponse) GetValue() *base.Blob { + if !p.IsSetValue() { + return ReadResponse_Value_DEFAULT + } + return p.Value +} + +func (p *ReadResponse) GetAppID() int32 { + return p.AppID +} + +func (p *ReadResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *ReadResponse) GetServer() string { + return p.Server +} +func (p *ReadResponse) IsSetValue() bool { + return p.Value != nil +} + +func (p *ReadResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ReadResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *ReadResponse) ReadField2(iprot thrift.TProtocol) error { + p.Value = &base.Blob{} + if err := p.Value.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Value), err) + } + return nil +} + +func (p *ReadResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *ReadResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *ReadResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *ReadResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("read_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ReadResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *ReadResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := p.Value.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Value), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *ReadResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) + } + return err +} + +func (p *ReadResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) + } + return err +} + +func (p *ReadResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) + } + return err +} + +func (p *ReadResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ReadResponse(%+v)", *p) +} + +// Attributes: +// - Error +// - TTLSeconds +// - AppID +// - PartitionIndex +// - Server +type TTLResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + TTLSeconds int32 `thrift:"ttl_seconds,2" db:"ttl_seconds" json:"ttl_seconds"` + AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` + // unused field # 5 + Server string `thrift:"server,6" db:"server" json:"server"` +} + +func NewTTLResponse() *TTLResponse { + return &TTLResponse{} +} + +func (p *TTLResponse) GetError() int32 { + return p.Error +} + +func (p *TTLResponse) GetTTLSeconds() int32 { + return p.TTLSeconds +} + +func (p *TTLResponse) GetAppID() int32 { + return p.AppID +} + +func (p *TTLResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *TTLResponse) GetServer() string { + return p.Server +} +func (p *TTLResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I32 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *TTLResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *TTLResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.TTLSeconds = v + } + return nil +} + +func (p *TTLResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *TTLResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *TTLResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *TTLResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ttl_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *TTLResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *TTLResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ttl_seconds", thrift.I32, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:ttl_seconds: ", p), err) + } + if err := oprot.WriteI32(int32(p.TTLSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ttl_seconds (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:ttl_seconds: ", p), err) + } + return err +} + +func (p *TTLResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) + } + return err +} + +func (p *TTLResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) + } + return err +} + +func (p *TTLResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) + } + return err +} + +func (p *TTLResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TTLResponse(%+v)", *p) +} + +// Attributes: +// - Error +// - Count +// - AppID +// - PartitionIndex +// - Server +type CountResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + Count int64 `thrift:"count,2" db:"count" json:"count"` + AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` + // unused field # 5 + Server string `thrift:"server,6" db:"server" json:"server"` +} + +func NewCountResponse() *CountResponse { + return &CountResponse{} +} + +func (p *CountResponse) GetError() int32 { + return p.Error +} + +func (p *CountResponse) GetCount() int64 { + return p.Count +} + +func (p *CountResponse) GetAppID() int32 { + return p.AppID +} + +func (p *CountResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *CountResponse) GetServer() string { + return p.Server +} +func (p *CountResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CountResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *CountResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Count = v + } + return nil +} + +func (p *CountResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *CountResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *CountResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *CountResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("count_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *CountResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *CountResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("count", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:count: ", p), err) + } + if err := oprot.WriteI64(int64(p.Count)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:count: ", p), err) + } + return err +} + +func (p *CountResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) + } + return err +} + +func (p *CountResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) + } + return err +} + +func (p *CountResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) + } + return err +} + +func (p *CountResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CountResponse(%+v)", *p) +} + +// Attributes: +// - Key +// - Value +// - ExpireTsSeconds +type KeyValue struct { + Key *base.Blob `thrift:"key,1" db:"key" json:"key"` + Value *base.Blob `thrift:"value,2" db:"value" json:"value"` + ExpireTsSeconds *int32 `thrift:"expire_ts_seconds,3" db:"expire_ts_seconds" json:"expire_ts_seconds,omitempty"` +} + +func NewKeyValue() *KeyValue { + return &KeyValue{} +} + +var KeyValue_Key_DEFAULT *base.Blob + +func (p *KeyValue) GetKey() *base.Blob { + if !p.IsSetKey() { + return KeyValue_Key_DEFAULT + } + return p.Key +} + +var KeyValue_Value_DEFAULT *base.Blob + +func (p *KeyValue) GetValue() *base.Blob { + if !p.IsSetValue() { + return KeyValue_Value_DEFAULT + } + return p.Value +} + +var KeyValue_ExpireTsSeconds_DEFAULT int32 + +func (p *KeyValue) GetExpireTsSeconds() int32 { + if !p.IsSetExpireTsSeconds() { + return KeyValue_ExpireTsSeconds_DEFAULT + } + return *p.ExpireTsSeconds +} +func (p *KeyValue) IsSetKey() bool { + return p.Key != nil +} + +func (p *KeyValue) IsSetValue() bool { + return p.Value != nil +} + +func (p *KeyValue) IsSetExpireTsSeconds() bool { + return p.ExpireTsSeconds != nil +} + +func (p *KeyValue) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *KeyValue) ReadField1(iprot thrift.TProtocol) error { + p.Key = &base.Blob{} + if err := p.Key.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) + } + return nil +} + +func (p *KeyValue) ReadField2(iprot thrift.TProtocol) error { + p.Value = &base.Blob{} + if err := p.Value.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Value), err) + } + return nil +} + +func (p *KeyValue) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ExpireTsSeconds = &v + } + return nil +} + +func (p *KeyValue) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("key_value"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *KeyValue) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := p.Key.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *KeyValue) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := p.Value.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Value), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *KeyValue) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetExpireTsSeconds() { + if err := oprot.WriteFieldBegin("expire_ts_seconds", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:expire_ts_seconds: ", p), err) + } + if err := oprot.WriteI32(int32(*p.ExpireTsSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.expire_ts_seconds (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:expire_ts_seconds: ", p), err) + } + } + return err +} + +func (p *KeyValue) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("KeyValue(%+v)", *p) +} + +// Attributes: +// - HashKey +// - Kvs +// - ExpireTsSeconds +type MultiPutRequest struct { + HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` + Kvs []*KeyValue `thrift:"kvs,2" db:"kvs" json:"kvs"` + ExpireTsSeconds int32 `thrift:"expire_ts_seconds,3" db:"expire_ts_seconds" json:"expire_ts_seconds"` +} + +func NewMultiPutRequest() *MultiPutRequest { + return &MultiPutRequest{} +} + +var MultiPutRequest_HashKey_DEFAULT *base.Blob + +func (p *MultiPutRequest) GetHashKey() *base.Blob { + if !p.IsSetHashKey() { + return MultiPutRequest_HashKey_DEFAULT + } + return p.HashKey +} + +func (p *MultiPutRequest) GetKvs() []*KeyValue { + return p.Kvs +} + +func (p *MultiPutRequest) GetExpireTsSeconds() int32 { + return p.ExpireTsSeconds +} +func (p *MultiPutRequest) IsSetHashKey() bool { + return p.HashKey != nil +} + +func (p *MultiPutRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MultiPutRequest) ReadField1(iprot thrift.TProtocol) error { + p.HashKey = &base.Blob{} + if err := p.HashKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) + } + return nil +} + +func (p *MultiPutRequest) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*KeyValue, 0, size) + p.Kvs = tSlice + for i := 0; i < size; i++ { + _elem0 := &KeyValue{} + if err := _elem0.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Kvs = append(p.Kvs, _elem0) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *MultiPutRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ExpireTsSeconds = v + } + return nil +} + +func (p *MultiPutRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_put_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MultiPutRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) + } + if err := p.HashKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) + } + return err +} + +func (p *MultiPutRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("kvs", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:kvs: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Kvs)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Kvs { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:kvs: ", p), err) + } + return err +} + +func (p *MultiPutRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("expire_ts_seconds", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:expire_ts_seconds: ", p), err) + } + if err := oprot.WriteI32(int32(p.ExpireTsSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.expire_ts_seconds (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:expire_ts_seconds: ", p), err) + } + return err +} + +func (p *MultiPutRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MultiPutRequest(%+v)", *p) +} + +// Attributes: +// - HashKey +// - SortKeys +// - MaxCount +type MultiRemoveRequest struct { + HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` + SortKeys []*base.Blob `thrift:"sort_keys,2" db:"sort_keys" json:"sort_keys"` + MaxCount int64 `thrift:"max_count,3" db:"max_count" json:"max_count"` +} + +func NewMultiRemoveRequest() *MultiRemoveRequest { + return &MultiRemoveRequest{} +} + +var MultiRemoveRequest_HashKey_DEFAULT *base.Blob + +func (p *MultiRemoveRequest) GetHashKey() *base.Blob { + if !p.IsSetHashKey() { + return MultiRemoveRequest_HashKey_DEFAULT + } + return p.HashKey +} + +func (p *MultiRemoveRequest) GetSortKeys() []*base.Blob { + return p.SortKeys +} + +func (p *MultiRemoveRequest) GetMaxCount() int64 { + return p.MaxCount +} +func (p *MultiRemoveRequest) IsSetHashKey() bool { + return p.HashKey != nil +} + +func (p *MultiRemoveRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MultiRemoveRequest) ReadField1(iprot thrift.TProtocol) error { + p.HashKey = &base.Blob{} + if err := p.HashKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) + } + return nil +} + +func (p *MultiRemoveRequest) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.Blob, 0, size) + p.SortKeys = tSlice + for i := 0; i < size; i++ { + _elem1 := &base.Blob{} + if err := _elem1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + } + p.SortKeys = append(p.SortKeys, _elem1) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *MultiRemoveRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.MaxCount = v + } + return nil +} + +func (p *MultiRemoveRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_remove_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MultiRemoveRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) + } + if err := p.HashKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) + } + return err +} + +func (p *MultiRemoveRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_keys", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:sort_keys: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.SortKeys)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.SortKeys { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:sort_keys: ", p), err) + } + return err +} + +func (p *MultiRemoveRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("max_count", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:max_count: ", p), err) + } + if err := oprot.WriteI64(int64(p.MaxCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_count (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:max_count: ", p), err) + } + return err +} + +func (p *MultiRemoveRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MultiRemoveRequest(%+v)", *p) +} + +// Attributes: +// - Error +// - Count +// - AppID +// - PartitionIndex +// - Decree +// - Server +type MultiRemoveResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + Count int64 `thrift:"count,2" db:"count" json:"count"` + AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` + Decree int64 `thrift:"decree,5" db:"decree" json:"decree"` + Server string `thrift:"server,6" db:"server" json:"server"` +} + +func NewMultiRemoveResponse() *MultiRemoveResponse { + return &MultiRemoveResponse{} +} + +func (p *MultiRemoveResponse) GetError() int32 { + return p.Error +} + +func (p *MultiRemoveResponse) GetCount() int64 { + return p.Count +} + +func (p *MultiRemoveResponse) GetAppID() int32 { + return p.AppID +} + +func (p *MultiRemoveResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *MultiRemoveResponse) GetDecree() int64 { + return p.Decree +} + +func (p *MultiRemoveResponse) GetServer() string { + return p.Server +} +func (p *MultiRemoveResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MultiRemoveResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *MultiRemoveResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Count = v + } + return nil +} + +func (p *MultiRemoveResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *MultiRemoveResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *MultiRemoveResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.Decree = v + } + return nil +} + +func (p *MultiRemoveResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *MultiRemoveResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_remove_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MultiRemoveResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *MultiRemoveResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("count", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:count: ", p), err) + } + if err := oprot.WriteI64(int64(p.Count)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.count (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:count: ", p), err) + } + return err +} + +func (p *MultiRemoveResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) + } + return err +} + +func (p *MultiRemoveResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) + } + return err +} + +func (p *MultiRemoveResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("decree", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.Decree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.decree (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:decree: ", p), err) + } + return err +} + +func (p *MultiRemoveResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) + } + return err +} + +func (p *MultiRemoveResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MultiRemoveResponse(%+v)", *p) +} + +// Attributes: +// - HashKey +// - SortKeys +// - MaxKvCount +// - MaxKvSize +// - NoValue +// - StartSortkey +// - StopSortkey +// - StartInclusive +// - StopInclusive +// - SortKeyFilterType +// - SortKeyFilterPattern +// - Reverse +type MultiGetRequest struct { + HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` + SortKeys []*base.Blob `thrift:"sort_keys,2" db:"sort_keys" json:"sort_keys"` + MaxKvCount int32 `thrift:"max_kv_count,3" db:"max_kv_count" json:"max_kv_count"` + MaxKvSize int32 `thrift:"max_kv_size,4" db:"max_kv_size" json:"max_kv_size"` + NoValue bool `thrift:"no_value,5" db:"no_value" json:"no_value"` + StartSortkey *base.Blob `thrift:"start_sortkey,6" db:"start_sortkey" json:"start_sortkey"` + StopSortkey *base.Blob `thrift:"stop_sortkey,7" db:"stop_sortkey" json:"stop_sortkey"` + StartInclusive bool `thrift:"start_inclusive,8" db:"start_inclusive" json:"start_inclusive"` + StopInclusive bool `thrift:"stop_inclusive,9" db:"stop_inclusive" json:"stop_inclusive"` + SortKeyFilterType FilterType `thrift:"sort_key_filter_type,10" db:"sort_key_filter_type" json:"sort_key_filter_type"` + SortKeyFilterPattern *base.Blob `thrift:"sort_key_filter_pattern,11" db:"sort_key_filter_pattern" json:"sort_key_filter_pattern"` + Reverse bool `thrift:"reverse,12" db:"reverse" json:"reverse"` +} + +func NewMultiGetRequest() *MultiGetRequest { + return &MultiGetRequest{} +} + +var MultiGetRequest_HashKey_DEFAULT *base.Blob + +func (p *MultiGetRequest) GetHashKey() *base.Blob { + if !p.IsSetHashKey() { + return MultiGetRequest_HashKey_DEFAULT + } + return p.HashKey +} + +func (p *MultiGetRequest) GetSortKeys() []*base.Blob { + return p.SortKeys +} + +func (p *MultiGetRequest) GetMaxKvCount() int32 { + return p.MaxKvCount +} + +func (p *MultiGetRequest) GetMaxKvSize() int32 { + return p.MaxKvSize +} + +func (p *MultiGetRequest) GetNoValue() bool { + return p.NoValue +} + +var MultiGetRequest_StartSortkey_DEFAULT *base.Blob + +func (p *MultiGetRequest) GetStartSortkey() *base.Blob { + if !p.IsSetStartSortkey() { + return MultiGetRequest_StartSortkey_DEFAULT + } + return p.StartSortkey +} + +var MultiGetRequest_StopSortkey_DEFAULT *base.Blob + +func (p *MultiGetRequest) GetStopSortkey() *base.Blob { + if !p.IsSetStopSortkey() { + return MultiGetRequest_StopSortkey_DEFAULT + } + return p.StopSortkey +} + +func (p *MultiGetRequest) GetStartInclusive() bool { + return p.StartInclusive +} + +func (p *MultiGetRequest) GetStopInclusive() bool { + return p.StopInclusive +} + +func (p *MultiGetRequest) GetSortKeyFilterType() FilterType { + return p.SortKeyFilterType +} + +var MultiGetRequest_SortKeyFilterPattern_DEFAULT *base.Blob + +func (p *MultiGetRequest) GetSortKeyFilterPattern() *base.Blob { + if !p.IsSetSortKeyFilterPattern() { + return MultiGetRequest_SortKeyFilterPattern_DEFAULT + } + return p.SortKeyFilterPattern +} + +func (p *MultiGetRequest) GetReverse() bool { + return p.Reverse +} +func (p *MultiGetRequest) IsSetHashKey() bool { + return p.HashKey != nil +} + +func (p *MultiGetRequest) IsSetStartSortkey() bool { + return p.StartSortkey != nil +} + +func (p *MultiGetRequest) IsSetStopSortkey() bool { + return p.StopSortkey != nil +} + +func (p *MultiGetRequest) IsSetSortKeyFilterPattern() bool { + return p.SortKeyFilterPattern != nil +} + +func (p *MultiGetRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.I32 { + if err := p.ReadField10(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField11(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField12(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MultiGetRequest) ReadField1(iprot thrift.TProtocol) error { + p.HashKey = &base.Blob{} + if err := p.HashKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) + } + return nil +} + +func (p *MultiGetRequest) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*base.Blob, 0, size) + p.SortKeys = tSlice + for i := 0; i < size; i++ { + _elem2 := &base.Blob{} + if err := _elem2.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) + } + p.SortKeys = append(p.SortKeys, _elem2) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *MultiGetRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.MaxKvCount = v + } + return nil +} + +func (p *MultiGetRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.MaxKvSize = v + } + return nil +} + +func (p *MultiGetRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.NoValue = v + } + return nil +} + +func (p *MultiGetRequest) ReadField6(iprot thrift.TProtocol) error { + p.StartSortkey = &base.Blob{} + if err := p.StartSortkey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.StartSortkey), err) + } + return nil +} + +func (p *MultiGetRequest) ReadField7(iprot thrift.TProtocol) error { + p.StopSortkey = &base.Blob{} + if err := p.StopSortkey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.StopSortkey), err) + } + return nil +} + +func (p *MultiGetRequest) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.StartInclusive = v + } + return nil +} + +func (p *MultiGetRequest) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.StopInclusive = v + } + return nil +} + +func (p *MultiGetRequest) ReadField10(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 10: ", err) + } else { + temp := FilterType(v) + p.SortKeyFilterType = temp + } + return nil +} + +func (p *MultiGetRequest) ReadField11(iprot thrift.TProtocol) error { + p.SortKeyFilterPattern = &base.Blob{} + if err := p.SortKeyFilterPattern.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SortKeyFilterPattern), err) + } + return nil +} + +func (p *MultiGetRequest) ReadField12(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 12: ", err) + } else { + p.Reverse = v + } + return nil +} + +func (p *MultiGetRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_get_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + if err := p.writeField10(oprot); err != nil { + return err + } + if err := p.writeField11(oprot); err != nil { + return err + } + if err := p.writeField12(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MultiGetRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) + } + if err := p.HashKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_keys", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:sort_keys: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.SortKeys)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.SortKeys { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:sort_keys: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("max_kv_count", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:max_kv_count: ", p), err) + } + if err := oprot.WriteI32(int32(p.MaxKvCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_kv_count (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:max_kv_count: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("max_kv_size", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:max_kv_size: ", p), err) + } + if err := oprot.WriteI32(int32(p.MaxKvSize)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.max_kv_size (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:max_kv_size: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("no_value", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:no_value: ", p), err) + } + if err := oprot.WriteBool(bool(p.NoValue)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.no_value (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:no_value: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("start_sortkey", thrift.STRUCT, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:start_sortkey: ", p), err) + } + if err := p.StartSortkey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.StartSortkey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:start_sortkey: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("stop_sortkey", thrift.STRUCT, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:stop_sortkey: ", p), err) + } + if err := p.StopSortkey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.StopSortkey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:stop_sortkey: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("start_inclusive", thrift.BOOL, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:start_inclusive: ", p), err) + } + if err := oprot.WriteBool(bool(p.StartInclusive)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.start_inclusive (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:start_inclusive: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField9(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("stop_inclusive", thrift.BOOL, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:stop_inclusive: ", p), err) + } + if err := oprot.WriteBool(bool(p.StopInclusive)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.stop_inclusive (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:stop_inclusive: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField10(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_key_filter_type", thrift.I32, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:sort_key_filter_type: ", p), err) + } + if err := oprot.WriteI32(int32(p.SortKeyFilterType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.sort_key_filter_type (10) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:sort_key_filter_type: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField11(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_key_filter_pattern", thrift.STRUCT, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:sort_key_filter_pattern: ", p), err) + } + if err := p.SortKeyFilterPattern.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SortKeyFilterPattern), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:sort_key_filter_pattern: ", p), err) + } + return err +} + +func (p *MultiGetRequest) writeField12(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("reverse", thrift.BOOL, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:reverse: ", p), err) + } + if err := oprot.WriteBool(bool(p.Reverse)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.reverse (12) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:reverse: ", p), err) + } + return err +} + +func (p *MultiGetRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MultiGetRequest(%+v)", *p) +} + +// Attributes: +// - Error +// - Kvs +// - AppID +// - PartitionIndex +// - Server +type MultiGetResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + Kvs []*KeyValue `thrift:"kvs,2" db:"kvs" json:"kvs"` + AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` + // unused field # 5 + Server string `thrift:"server,6" db:"server" json:"server"` +} + +func NewMultiGetResponse() *MultiGetResponse { + return &MultiGetResponse{} +} + +func (p *MultiGetResponse) GetError() int32 { + return p.Error +} + +func (p *MultiGetResponse) GetKvs() []*KeyValue { + return p.Kvs +} + +func (p *MultiGetResponse) GetAppID() int32 { + return p.AppID +} + +func (p *MultiGetResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *MultiGetResponse) GetServer() string { + return p.Server +} +func (p *MultiGetResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MultiGetResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *MultiGetResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*KeyValue, 0, size) + p.Kvs = tSlice + for i := 0; i < size; i++ { + _elem3 := &KeyValue{} + if err := _elem3.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) + } + p.Kvs = append(p.Kvs, _elem3) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *MultiGetResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *MultiGetResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *MultiGetResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *MultiGetResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_get_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MultiGetResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *MultiGetResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("kvs", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:kvs: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Kvs)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Kvs { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:kvs: ", p), err) + } + return err +} + +func (p *MultiGetResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) + } + return err +} + +func (p *MultiGetResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) + } + return err +} + +func (p *MultiGetResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) + } + return err +} + +func (p *MultiGetResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MultiGetResponse(%+v)", *p) +} + +// Attributes: +// - HashKey +// - SortKey +type FullKey struct { + HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` + SortKey *base.Blob `thrift:"sort_key,2" db:"sort_key" json:"sort_key"` +} + +func NewFullKey() *FullKey { + return &FullKey{} +} + +var FullKey_HashKey_DEFAULT *base.Blob + +func (p *FullKey) GetHashKey() *base.Blob { + if !p.IsSetHashKey() { + return FullKey_HashKey_DEFAULT + } + return p.HashKey +} + +var FullKey_SortKey_DEFAULT *base.Blob + +func (p *FullKey) GetSortKey() *base.Blob { + if !p.IsSetSortKey() { + return FullKey_SortKey_DEFAULT + } + return p.SortKey +} +func (p *FullKey) IsSetHashKey() bool { + return p.HashKey != nil +} + +func (p *FullKey) IsSetSortKey() bool { + return p.SortKey != nil +} + +func (p *FullKey) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *FullKey) ReadField1(iprot thrift.TProtocol) error { + p.HashKey = &base.Blob{} + if err := p.HashKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) + } + return nil +} + +func (p *FullKey) ReadField2(iprot thrift.TProtocol) error { + p.SortKey = &base.Blob{} + if err := p.SortKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SortKey), err) + } + return nil +} + +func (p *FullKey) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("full_key"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *FullKey) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) + } + if err := p.HashKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) + } + return err +} + +func (p *FullKey) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_key", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:sort_key: ", p), err) + } + if err := p.SortKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SortKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:sort_key: ", p), err) + } + return err +} + +func (p *FullKey) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("FullKey(%+v)", *p) +} + +// Attributes: +// - Keys +type BatchGetRequest struct { + Keys []*FullKey `thrift:"keys,1" db:"keys" json:"keys"` +} + +func NewBatchGetRequest() *BatchGetRequest { + return &BatchGetRequest{} +} + +func (p *BatchGetRequest) GetKeys() []*FullKey { + return p.Keys +} +func (p *BatchGetRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BatchGetRequest) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*FullKey, 0, size) + p.Keys = tSlice + for i := 0; i < size; i++ { + _elem4 := &FullKey{} + if err := _elem4.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) + } + p.Keys = append(p.Keys, _elem4) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *BatchGetRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("batch_get_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BatchGetRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("keys", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:keys: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Keys)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Keys { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:keys: ", p), err) + } + return err +} + +func (p *BatchGetRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BatchGetRequest(%+v)", *p) +} + +// Attributes: +// - HashKey +// - SortKey +// - Value +type FullData struct { + HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` + SortKey *base.Blob `thrift:"sort_key,2" db:"sort_key" json:"sort_key"` + Value *base.Blob `thrift:"value,3" db:"value" json:"value"` +} + +func NewFullData() *FullData { + return &FullData{} +} + +var FullData_HashKey_DEFAULT *base.Blob + +func (p *FullData) GetHashKey() *base.Blob { + if !p.IsSetHashKey() { + return FullData_HashKey_DEFAULT + } + return p.HashKey +} + +var FullData_SortKey_DEFAULT *base.Blob + +func (p *FullData) GetSortKey() *base.Blob { + if !p.IsSetSortKey() { + return FullData_SortKey_DEFAULT + } + return p.SortKey +} + +var FullData_Value_DEFAULT *base.Blob + +func (p *FullData) GetValue() *base.Blob { + if !p.IsSetValue() { + return FullData_Value_DEFAULT + } + return p.Value +} +func (p *FullData) IsSetHashKey() bool { + return p.HashKey != nil +} + +func (p *FullData) IsSetSortKey() bool { + return p.SortKey != nil +} + +func (p *FullData) IsSetValue() bool { + return p.Value != nil +} + +func (p *FullData) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *FullData) ReadField1(iprot thrift.TProtocol) error { + p.HashKey = &base.Blob{} + if err := p.HashKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) + } + return nil +} + +func (p *FullData) ReadField2(iprot thrift.TProtocol) error { + p.SortKey = &base.Blob{} + if err := p.SortKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SortKey), err) + } + return nil +} + +func (p *FullData) ReadField3(iprot thrift.TProtocol) error { + p.Value = &base.Blob{} + if err := p.Value.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Value), err) + } + return nil +} + +func (p *FullData) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("full_data"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *FullData) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) + } + if err := p.HashKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) + } + return err +} + +func (p *FullData) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_key", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:sort_key: ", p), err) + } + if err := p.SortKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SortKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:sort_key: ", p), err) + } + return err +} + +func (p *FullData) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:value: ", p), err) + } + if err := p.Value.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Value), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:value: ", p), err) + } + return err +} + +func (p *FullData) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("FullData(%+v)", *p) +} + +// Attributes: +// - Error +// - Data +// - AppID +// - PartitionIndex +// - Server +type BatchGetResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + Data []*FullData `thrift:"data,2" db:"data" json:"data"` + AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` + // unused field # 5 + Server string `thrift:"server,6" db:"server" json:"server"` +} + +func NewBatchGetResponse() *BatchGetResponse { + return &BatchGetResponse{} +} + +func (p *BatchGetResponse) GetError() int32 { + return p.Error +} + +func (p *BatchGetResponse) GetData() []*FullData { + return p.Data +} + +func (p *BatchGetResponse) GetAppID() int32 { + return p.AppID +} + +func (p *BatchGetResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *BatchGetResponse) GetServer() string { + return p.Server +} +func (p *BatchGetResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BatchGetResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *BatchGetResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*FullData, 0, size) + p.Data = tSlice + for i := 0; i < size; i++ { + _elem5 := &FullData{} + if err := _elem5.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) + } + p.Data = append(p.Data, _elem5) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *BatchGetResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *BatchGetResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *BatchGetResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *BatchGetResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("batch_get_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BatchGetResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *BatchGetResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("data", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:data: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Data)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Data { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:data: ", p), err) + } + return err +} + +func (p *BatchGetResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) + } + return err +} + +func (p *BatchGetResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) + } + return err +} + +func (p *BatchGetResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) + } + return err +} + +func (p *BatchGetResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BatchGetResponse(%+v)", *p) +} + +// Attributes: +// - Key +// - Increment +// - ExpireTsSeconds +type IncrRequest struct { + Key *base.Blob `thrift:"key,1" db:"key" json:"key"` + Increment int64 `thrift:"increment,2" db:"increment" json:"increment"` + ExpireTsSeconds int32 `thrift:"expire_ts_seconds,3" db:"expire_ts_seconds" json:"expire_ts_seconds"` +} + +func NewIncrRequest() *IncrRequest { + return &IncrRequest{} +} + +var IncrRequest_Key_DEFAULT *base.Blob + +func (p *IncrRequest) GetKey() *base.Blob { + if !p.IsSetKey() { + return IncrRequest_Key_DEFAULT + } + return p.Key +} + +func (p *IncrRequest) GetIncrement() int64 { + return p.Increment +} + +func (p *IncrRequest) GetExpireTsSeconds() int32 { + return p.ExpireTsSeconds +} +func (p *IncrRequest) IsSetKey() bool { + return p.Key != nil +} + +func (p *IncrRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *IncrRequest) ReadField1(iprot thrift.TProtocol) error { + p.Key = &base.Blob{} + if err := p.Key.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) + } + return nil +} + +func (p *IncrRequest) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Increment = v + } + return nil +} + +func (p *IncrRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ExpireTsSeconds = v + } + return nil +} + +func (p *IncrRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("incr_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *IncrRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := p.Key.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *IncrRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("increment", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:increment: ", p), err) + } + if err := oprot.WriteI64(int64(p.Increment)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.increment (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:increment: ", p), err) + } + return err +} + +func (p *IncrRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("expire_ts_seconds", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:expire_ts_seconds: ", p), err) + } + if err := oprot.WriteI32(int32(p.ExpireTsSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.expire_ts_seconds (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:expire_ts_seconds: ", p), err) + } + return err +} + +func (p *IncrRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("IncrRequest(%+v)", *p) +} + +// Attributes: +// - Error +// - NewValue_ +// - AppID +// - PartitionIndex +// - Decree +// - Server +type IncrResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + NewValue_ int64 `thrift:"new_value,2" db:"new_value" json:"new_value"` + AppID int32 `thrift:"app_id,3" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,4" db:"partition_index" json:"partition_index"` + Decree int64 `thrift:"decree,5" db:"decree" json:"decree"` + Server string `thrift:"server,6" db:"server" json:"server"` +} + +func NewIncrResponse() *IncrResponse { + return &IncrResponse{} +} + +func (p *IncrResponse) GetError() int32 { + return p.Error +} + +func (p *IncrResponse) GetNewValue_() int64 { + return p.NewValue_ +} + +func (p *IncrResponse) GetAppID() int32 { + return p.AppID +} + +func (p *IncrResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *IncrResponse) GetDecree() int64 { + return p.Decree +} + +func (p *IncrResponse) GetServer() string { + return p.Server +} +func (p *IncrResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I64 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *IncrResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *IncrResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.NewValue_ = v + } + return nil +} + +func (p *IncrResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *IncrResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *IncrResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.Decree = v + } + return nil +} + +func (p *IncrResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *IncrResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("incr_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *IncrResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *IncrResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("new_value", thrift.I64, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:new_value: ", p), err) + } + if err := oprot.WriteI64(int64(p.NewValue_)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.new_value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:new_value: ", p), err) + } + return err +} + +func (p *IncrResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:app_id: ", p), err) + } + return err +} + +func (p *IncrResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:partition_index: ", p), err) + } + return err +} + +func (p *IncrResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("decree", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.Decree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.decree (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:decree: ", p), err) + } + return err +} + +func (p *IncrResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) + } + return err +} + +func (p *IncrResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("IncrResponse(%+v)", *p) +} + +// Attributes: +// - HashKey +// - CheckSortKey +// - CheckType +// - CheckOperand +// - SetDiffSortKey +// - SetSortKey +// - SetValue +// - SetExpireTsSeconds +// - ReturnCheckValue +type CheckAndSetRequest struct { + HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` + CheckSortKey *base.Blob `thrift:"check_sort_key,2" db:"check_sort_key" json:"check_sort_key"` + CheckType CasCheckType `thrift:"check_type,3" db:"check_type" json:"check_type"` + CheckOperand *base.Blob `thrift:"check_operand,4" db:"check_operand" json:"check_operand"` + SetDiffSortKey bool `thrift:"set_diff_sort_key,5" db:"set_diff_sort_key" json:"set_diff_sort_key"` + SetSortKey *base.Blob `thrift:"set_sort_key,6" db:"set_sort_key" json:"set_sort_key"` + SetValue *base.Blob `thrift:"set_value,7" db:"set_value" json:"set_value"` + SetExpireTsSeconds int32 `thrift:"set_expire_ts_seconds,8" db:"set_expire_ts_seconds" json:"set_expire_ts_seconds"` + ReturnCheckValue bool `thrift:"return_check_value,9" db:"return_check_value" json:"return_check_value"` +} + +func NewCheckAndSetRequest() *CheckAndSetRequest { + return &CheckAndSetRequest{} +} + +var CheckAndSetRequest_HashKey_DEFAULT *base.Blob + +func (p *CheckAndSetRequest) GetHashKey() *base.Blob { + if !p.IsSetHashKey() { + return CheckAndSetRequest_HashKey_DEFAULT + } + return p.HashKey +} + +var CheckAndSetRequest_CheckSortKey_DEFAULT *base.Blob + +func (p *CheckAndSetRequest) GetCheckSortKey() *base.Blob { + if !p.IsSetCheckSortKey() { + return CheckAndSetRequest_CheckSortKey_DEFAULT + } + return p.CheckSortKey +} + +func (p *CheckAndSetRequest) GetCheckType() CasCheckType { + return p.CheckType +} + +var CheckAndSetRequest_CheckOperand_DEFAULT *base.Blob + +func (p *CheckAndSetRequest) GetCheckOperand() *base.Blob { + if !p.IsSetCheckOperand() { + return CheckAndSetRequest_CheckOperand_DEFAULT + } + return p.CheckOperand +} + +func (p *CheckAndSetRequest) GetSetDiffSortKey() bool { + return p.SetDiffSortKey +} + +var CheckAndSetRequest_SetSortKey_DEFAULT *base.Blob + +func (p *CheckAndSetRequest) GetSetSortKey() *base.Blob { + if !p.IsSetSetSortKey() { + return CheckAndSetRequest_SetSortKey_DEFAULT + } + return p.SetSortKey +} + +var CheckAndSetRequest_SetValue_DEFAULT *base.Blob + +func (p *CheckAndSetRequest) GetSetValue() *base.Blob { + if !p.IsSetSetValue() { + return CheckAndSetRequest_SetValue_DEFAULT + } + return p.SetValue +} + +func (p *CheckAndSetRequest) GetSetExpireTsSeconds() int32 { + return p.SetExpireTsSeconds +} + +func (p *CheckAndSetRequest) GetReturnCheckValue() bool { + return p.ReturnCheckValue +} +func (p *CheckAndSetRequest) IsSetHashKey() bool { + return p.HashKey != nil +} + +func (p *CheckAndSetRequest) IsSetCheckSortKey() bool { + return p.CheckSortKey != nil +} + +func (p *CheckAndSetRequest) IsSetCheckOperand() bool { + return p.CheckOperand != nil +} + +func (p *CheckAndSetRequest) IsSetSetSortKey() bool { + return p.SetSortKey != nil +} + +func (p *CheckAndSetRequest) IsSetSetValue() bool { + return p.SetValue != nil +} + +func (p *CheckAndSetRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.I32 { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CheckAndSetRequest) ReadField1(iprot thrift.TProtocol) error { + p.HashKey = &base.Blob{} + if err := p.HashKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) + } + return nil +} + +func (p *CheckAndSetRequest) ReadField2(iprot thrift.TProtocol) error { + p.CheckSortKey = &base.Blob{} + if err := p.CheckSortKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CheckSortKey), err) + } + return nil +} + +func (p *CheckAndSetRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := CasCheckType(v) + p.CheckType = temp + } + return nil +} + +func (p *CheckAndSetRequest) ReadField4(iprot thrift.TProtocol) error { + p.CheckOperand = &base.Blob{} + if err := p.CheckOperand.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CheckOperand), err) + } + return nil +} + +func (p *CheckAndSetRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.SetDiffSortKey = v + } + return nil +} + +func (p *CheckAndSetRequest) ReadField6(iprot thrift.TProtocol) error { + p.SetSortKey = &base.Blob{} + if err := p.SetSortKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SetSortKey), err) + } + return nil +} + +func (p *CheckAndSetRequest) ReadField7(iprot thrift.TProtocol) error { + p.SetValue = &base.Blob{} + if err := p.SetValue.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SetValue), err) + } + return nil +} + +func (p *CheckAndSetRequest) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.SetExpireTsSeconds = v + } + return nil +} + +func (p *CheckAndSetRequest) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.ReturnCheckValue = v + } + return nil +} + +func (p *CheckAndSetRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("check_and_set_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *CheckAndSetRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) + } + if err := p.HashKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_sort_key", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:check_sort_key: ", p), err) + } + if err := p.CheckSortKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CheckSortKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:check_sort_key: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:check_type: ", p), err) + } + if err := oprot.WriteI32(int32(p.CheckType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.check_type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:check_type: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_operand", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:check_operand: ", p), err) + } + if err := p.CheckOperand.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CheckOperand), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:check_operand: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("set_diff_sort_key", thrift.BOOL, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:set_diff_sort_key: ", p), err) + } + if err := oprot.WriteBool(bool(p.SetDiffSortKey)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.set_diff_sort_key (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:set_diff_sort_key: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("set_sort_key", thrift.STRUCT, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:set_sort_key: ", p), err) + } + if err := p.SetSortKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SetSortKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:set_sort_key: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("set_value", thrift.STRUCT, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:set_value: ", p), err) + } + if err := p.SetValue.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SetValue), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:set_value: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("set_expire_ts_seconds", thrift.I32, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:set_expire_ts_seconds: ", p), err) + } + if err := oprot.WriteI32(int32(p.SetExpireTsSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.set_expire_ts_seconds (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:set_expire_ts_seconds: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) writeField9(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("return_check_value", thrift.BOOL, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:return_check_value: ", p), err) + } + if err := oprot.WriteBool(bool(p.ReturnCheckValue)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.return_check_value (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:return_check_value: ", p), err) + } + return err +} + +func (p *CheckAndSetRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CheckAndSetRequest(%+v)", *p) +} + +// Attributes: +// - Error +// - CheckValueReturned +// - CheckValueExist +// - CheckValue +// - AppID +// - PartitionIndex +// - Decree +// - Server +type CheckAndSetResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + CheckValueReturned bool `thrift:"check_value_returned,2" db:"check_value_returned" json:"check_value_returned"` + CheckValueExist bool `thrift:"check_value_exist,3" db:"check_value_exist" json:"check_value_exist"` + CheckValue *base.Blob `thrift:"check_value,4" db:"check_value" json:"check_value"` + AppID int32 `thrift:"app_id,5" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,6" db:"partition_index" json:"partition_index"` + Decree int64 `thrift:"decree,7" db:"decree" json:"decree"` + Server string `thrift:"server,8" db:"server" json:"server"` +} + +func NewCheckAndSetResponse() *CheckAndSetResponse { + return &CheckAndSetResponse{} +} + +func (p *CheckAndSetResponse) GetError() int32 { + return p.Error +} + +func (p *CheckAndSetResponse) GetCheckValueReturned() bool { + return p.CheckValueReturned +} + +func (p *CheckAndSetResponse) GetCheckValueExist() bool { + return p.CheckValueExist +} + +var CheckAndSetResponse_CheckValue_DEFAULT *base.Blob + +func (p *CheckAndSetResponse) GetCheckValue() *base.Blob { + if !p.IsSetCheckValue() { + return CheckAndSetResponse_CheckValue_DEFAULT + } + return p.CheckValue +} + +func (p *CheckAndSetResponse) GetAppID() int32 { + return p.AppID +} + +func (p *CheckAndSetResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *CheckAndSetResponse) GetDecree() int64 { + return p.Decree +} + +func (p *CheckAndSetResponse) GetServer() string { + return p.Server +} +func (p *CheckAndSetResponse) IsSetCheckValue() bool { + return p.CheckValue != nil +} + +func (p *CheckAndSetResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I32 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I64 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRING { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CheckAndSetResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *CheckAndSetResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.CheckValueReturned = v + } + return nil +} + +func (p *CheckAndSetResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.CheckValueExist = v + } + return nil +} + +func (p *CheckAndSetResponse) ReadField4(iprot thrift.TProtocol) error { + p.CheckValue = &base.Blob{} + if err := p.CheckValue.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CheckValue), err) + } + return nil +} + +func (p *CheckAndSetResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *CheckAndSetResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *CheckAndSetResponse) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.Decree = v + } + return nil +} + +func (p *CheckAndSetResponse) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *CheckAndSetResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("check_and_set_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *CheckAndSetResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *CheckAndSetResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_value_returned", thrift.BOOL, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:check_value_returned: ", p), err) + } + if err := oprot.WriteBool(bool(p.CheckValueReturned)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.check_value_returned (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:check_value_returned: ", p), err) + } + return err +} + +func (p *CheckAndSetResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_value_exist", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:check_value_exist: ", p), err) + } + if err := oprot.WriteBool(bool(p.CheckValueExist)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.check_value_exist (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:check_value_exist: ", p), err) + } + return err +} + +func (p *CheckAndSetResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_value", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:check_value: ", p), err) + } + if err := p.CheckValue.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CheckValue), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:check_value: ", p), err) + } + return err +} + +func (p *CheckAndSetResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:app_id: ", p), err) + } + return err +} + +func (p *CheckAndSetResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:partition_index: ", p), err) + } + return err +} + +func (p *CheckAndSetResponse) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("decree", thrift.I64, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.Decree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.decree (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:decree: ", p), err) + } + return err +} + +func (p *CheckAndSetResponse) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:server: ", p), err) + } + return err +} + +func (p *CheckAndSetResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CheckAndSetResponse(%+v)", *p) +} + +// Attributes: +// - Operation +// - SortKey +// - Value +// - SetExpireTsSeconds +type Mutate struct { + Operation MutateOperation `thrift:"operation,1" db:"operation" json:"operation"` + SortKey *base.Blob `thrift:"sort_key,2" db:"sort_key" json:"sort_key"` + Value *base.Blob `thrift:"value,3" db:"value" json:"value"` + SetExpireTsSeconds int32 `thrift:"set_expire_ts_seconds,4" db:"set_expire_ts_seconds" json:"set_expire_ts_seconds"` +} + +func NewMutate() *Mutate { + return &Mutate{} +} + +func (p *Mutate) GetOperation() MutateOperation { + return p.Operation +} + +var Mutate_SortKey_DEFAULT *base.Blob + +func (p *Mutate) GetSortKey() *base.Blob { + if !p.IsSetSortKey() { + return Mutate_SortKey_DEFAULT + } + return p.SortKey +} + +var Mutate_Value_DEFAULT *base.Blob + +func (p *Mutate) GetValue() *base.Blob { + if !p.IsSetValue() { + return Mutate_Value_DEFAULT + } + return p.Value +} + +func (p *Mutate) GetSetExpireTsSeconds() int32 { + return p.SetExpireTsSeconds +} +func (p *Mutate) IsSetSortKey() bool { + return p.SortKey != nil +} + +func (p *Mutate) IsSetValue() bool { + return p.Value != nil +} + +func (p *Mutate) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Mutate) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + temp := MutateOperation(v) + p.Operation = temp + } + return nil +} + +func (p *Mutate) ReadField2(iprot thrift.TProtocol) error { + p.SortKey = &base.Blob{} + if err := p.SortKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SortKey), err) + } + return nil +} + +func (p *Mutate) ReadField3(iprot thrift.TProtocol) error { + p.Value = &base.Blob{} + if err := p.Value.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Value), err) + } + return nil +} + +func (p *Mutate) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.SetExpireTsSeconds = v + } + return nil +} + +func (p *Mutate) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("mutate"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Mutate) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("operation", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err) + } + if err := oprot.WriteI32(int32(p.Operation)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err) + } + return err +} + +func (p *Mutate) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_key", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:sort_key: ", p), err) + } + if err := p.SortKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SortKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:sort_key: ", p), err) + } + return err +} + +func (p *Mutate) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:value: ", p), err) + } + if err := p.Value.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Value), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:value: ", p), err) + } + return err +} + +func (p *Mutate) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("set_expire_ts_seconds", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:set_expire_ts_seconds: ", p), err) + } + if err := oprot.WriteI32(int32(p.SetExpireTsSeconds)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.set_expire_ts_seconds (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:set_expire_ts_seconds: ", p), err) + } + return err +} + +func (p *Mutate) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Mutate(%+v)", *p) +} + +// Attributes: +// - HashKey +// - CheckSortKey +// - CheckType +// - CheckOperand +// - MutateList +// - ReturnCheckValue +type CheckAndMutateRequest struct { + HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` + CheckSortKey *base.Blob `thrift:"check_sort_key,2" db:"check_sort_key" json:"check_sort_key"` + CheckType CasCheckType `thrift:"check_type,3" db:"check_type" json:"check_type"` + CheckOperand *base.Blob `thrift:"check_operand,4" db:"check_operand" json:"check_operand"` + MutateList []*Mutate `thrift:"mutate_list,5" db:"mutate_list" json:"mutate_list"` + ReturnCheckValue bool `thrift:"return_check_value,6" db:"return_check_value" json:"return_check_value"` +} + +func NewCheckAndMutateRequest() *CheckAndMutateRequest { + return &CheckAndMutateRequest{} +} + +var CheckAndMutateRequest_HashKey_DEFAULT *base.Blob + +func (p *CheckAndMutateRequest) GetHashKey() *base.Blob { + if !p.IsSetHashKey() { + return CheckAndMutateRequest_HashKey_DEFAULT + } + return p.HashKey +} + +var CheckAndMutateRequest_CheckSortKey_DEFAULT *base.Blob + +func (p *CheckAndMutateRequest) GetCheckSortKey() *base.Blob { + if !p.IsSetCheckSortKey() { + return CheckAndMutateRequest_CheckSortKey_DEFAULT + } + return p.CheckSortKey +} + +func (p *CheckAndMutateRequest) GetCheckType() CasCheckType { + return p.CheckType +} + +var CheckAndMutateRequest_CheckOperand_DEFAULT *base.Blob + +func (p *CheckAndMutateRequest) GetCheckOperand() *base.Blob { + if !p.IsSetCheckOperand() { + return CheckAndMutateRequest_CheckOperand_DEFAULT + } + return p.CheckOperand +} + +func (p *CheckAndMutateRequest) GetMutateList() []*Mutate { + return p.MutateList +} + +func (p *CheckAndMutateRequest) GetReturnCheckValue() bool { + return p.ReturnCheckValue +} +func (p *CheckAndMutateRequest) IsSetHashKey() bool { + return p.HashKey != nil +} + +func (p *CheckAndMutateRequest) IsSetCheckSortKey() bool { + return p.CheckSortKey != nil +} + +func (p *CheckAndMutateRequest) IsSetCheckOperand() bool { + return p.CheckOperand != nil +} + +func (p *CheckAndMutateRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.LIST { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CheckAndMutateRequest) ReadField1(iprot thrift.TProtocol) error { + p.HashKey = &base.Blob{} + if err := p.HashKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) + } + return nil +} + +func (p *CheckAndMutateRequest) ReadField2(iprot thrift.TProtocol) error { + p.CheckSortKey = &base.Blob{} + if err := p.CheckSortKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CheckSortKey), err) + } + return nil +} + +func (p *CheckAndMutateRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := CasCheckType(v) + p.CheckType = temp + } + return nil +} + +func (p *CheckAndMutateRequest) ReadField4(iprot thrift.TProtocol) error { + p.CheckOperand = &base.Blob{} + if err := p.CheckOperand.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CheckOperand), err) + } + return nil +} + +func (p *CheckAndMutateRequest) ReadField5(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Mutate, 0, size) + p.MutateList = tSlice + for i := 0; i < size; i++ { + _elem6 := &Mutate{} + if err := _elem6.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem6), err) + } + p.MutateList = append(p.MutateList, _elem6) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *CheckAndMutateRequest) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.ReturnCheckValue = v + } + return nil +} + +func (p *CheckAndMutateRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("check_and_mutate_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *CheckAndMutateRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) + } + if err := p.HashKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) + } + return err +} + +func (p *CheckAndMutateRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_sort_key", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:check_sort_key: ", p), err) + } + if err := p.CheckSortKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CheckSortKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:check_sort_key: ", p), err) + } + return err +} + +func (p *CheckAndMutateRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:check_type: ", p), err) + } + if err := oprot.WriteI32(int32(p.CheckType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.check_type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:check_type: ", p), err) + } + return err +} + +func (p *CheckAndMutateRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_operand", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:check_operand: ", p), err) + } + if err := p.CheckOperand.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CheckOperand), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:check_operand: ", p), err) + } + return err +} + +func (p *CheckAndMutateRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("mutate_list", thrift.LIST, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:mutate_list: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.MutateList)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.MutateList { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:mutate_list: ", p), err) + } + return err +} + +func (p *CheckAndMutateRequest) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("return_check_value", thrift.BOOL, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:return_check_value: ", p), err) + } + if err := oprot.WriteBool(bool(p.ReturnCheckValue)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.return_check_value (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:return_check_value: ", p), err) + } + return err +} + +func (p *CheckAndMutateRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CheckAndMutateRequest(%+v)", *p) +} + +// Attributes: +// - Error +// - CheckValueReturned +// - CheckValueExist +// - CheckValue +// - AppID +// - PartitionIndex +// - Decree +// - Server +type CheckAndMutateResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + CheckValueReturned bool `thrift:"check_value_returned,2" db:"check_value_returned" json:"check_value_returned"` + CheckValueExist bool `thrift:"check_value_exist,3" db:"check_value_exist" json:"check_value_exist"` + CheckValue *base.Blob `thrift:"check_value,4" db:"check_value" json:"check_value"` + AppID int32 `thrift:"app_id,5" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,6" db:"partition_index" json:"partition_index"` + Decree int64 `thrift:"decree,7" db:"decree" json:"decree"` + Server string `thrift:"server,8" db:"server" json:"server"` +} + +func NewCheckAndMutateResponse() *CheckAndMutateResponse { + return &CheckAndMutateResponse{} +} + +func (p *CheckAndMutateResponse) GetError() int32 { + return p.Error +} + +func (p *CheckAndMutateResponse) GetCheckValueReturned() bool { + return p.CheckValueReturned +} + +func (p *CheckAndMutateResponse) GetCheckValueExist() bool { + return p.CheckValueExist +} + +var CheckAndMutateResponse_CheckValue_DEFAULT *base.Blob + +func (p *CheckAndMutateResponse) GetCheckValue() *base.Blob { + if !p.IsSetCheckValue() { + return CheckAndMutateResponse_CheckValue_DEFAULT + } + return p.CheckValue +} + +func (p *CheckAndMutateResponse) GetAppID() int32 { + return p.AppID +} + +func (p *CheckAndMutateResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *CheckAndMutateResponse) GetDecree() int64 { + return p.Decree +} + +func (p *CheckAndMutateResponse) GetServer() string { + return p.Server +} +func (p *CheckAndMutateResponse) IsSetCheckValue() bool { + return p.CheckValue != nil +} + +func (p *CheckAndMutateResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.I32 { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I64 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRING { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *CheckAndMutateResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *CheckAndMutateResponse) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.CheckValueReturned = v + } + return nil +} + +func (p *CheckAndMutateResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.CheckValueExist = v + } + return nil +} + +func (p *CheckAndMutateResponse) ReadField4(iprot thrift.TProtocol) error { + p.CheckValue = &base.Blob{} + if err := p.CheckValue.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CheckValue), err) + } + return nil +} + +func (p *CheckAndMutateResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *CheckAndMutateResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *CheckAndMutateResponse) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.Decree = v + } + return nil +} + +func (p *CheckAndMutateResponse) ReadField8(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 8: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *CheckAndMutateResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("check_and_mutate_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *CheckAndMutateResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *CheckAndMutateResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_value_returned", thrift.BOOL, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:check_value_returned: ", p), err) + } + if err := oprot.WriteBool(bool(p.CheckValueReturned)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.check_value_returned (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:check_value_returned: ", p), err) + } + return err +} + +func (p *CheckAndMutateResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_value_exist", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:check_value_exist: ", p), err) + } + if err := oprot.WriteBool(bool(p.CheckValueExist)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.check_value_exist (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:check_value_exist: ", p), err) + } + return err +} + +func (p *CheckAndMutateResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("check_value", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:check_value: ", p), err) + } + if err := p.CheckValue.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CheckValue), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:check_value: ", p), err) + } + return err +} + +func (p *CheckAndMutateResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:app_id: ", p), err) + } + return err +} + +func (p *CheckAndMutateResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:partition_index: ", p), err) + } + return err +} + +func (p *CheckAndMutateResponse) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("decree", thrift.I64, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:decree: ", p), err) + } + if err := oprot.WriteI64(int64(p.Decree)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.decree (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:decree: ", p), err) + } + return err +} + +func (p *CheckAndMutateResponse) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:server: ", p), err) + } + return err +} + +func (p *CheckAndMutateResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CheckAndMutateResponse(%+v)", *p) +} + +// Attributes: +// - StartKey +// - StopKey +// - StartInclusive +// - StopInclusive +// - BatchSize +// - NoValue +// - HashKeyFilterType +// - HashKeyFilterPattern +// - SortKeyFilterType +// - SortKeyFilterPattern +// - ValidatePartitionHash +// - ReturnExpireTs +// - FullScan +// - OnlyReturnCount +type GetScannerRequest struct { + StartKey *base.Blob `thrift:"start_key,1" db:"start_key" json:"start_key"` + StopKey *base.Blob `thrift:"stop_key,2" db:"stop_key" json:"stop_key"` + StartInclusive bool `thrift:"start_inclusive,3" db:"start_inclusive" json:"start_inclusive"` + StopInclusive bool `thrift:"stop_inclusive,4" db:"stop_inclusive" json:"stop_inclusive"` + BatchSize int32 `thrift:"batch_size,5" db:"batch_size" json:"batch_size"` + NoValue bool `thrift:"no_value,6" db:"no_value" json:"no_value"` + HashKeyFilterType FilterType `thrift:"hash_key_filter_type,7" db:"hash_key_filter_type" json:"hash_key_filter_type"` + HashKeyFilterPattern *base.Blob `thrift:"hash_key_filter_pattern,8" db:"hash_key_filter_pattern" json:"hash_key_filter_pattern"` + SortKeyFilterType FilterType `thrift:"sort_key_filter_type,9" db:"sort_key_filter_type" json:"sort_key_filter_type"` + SortKeyFilterPattern *base.Blob `thrift:"sort_key_filter_pattern,10" db:"sort_key_filter_pattern" json:"sort_key_filter_pattern"` + ValidatePartitionHash *bool `thrift:"validate_partition_hash,11" db:"validate_partition_hash" json:"validate_partition_hash,omitempty"` + ReturnExpireTs *bool `thrift:"return_expire_ts,12" db:"return_expire_ts" json:"return_expire_ts,omitempty"` + FullScan *bool `thrift:"full_scan,13" db:"full_scan" json:"full_scan,omitempty"` + OnlyReturnCount bool `thrift:"only_return_count,14" db:"only_return_count" json:"only_return_count"` +} + +func NewGetScannerRequest() *GetScannerRequest { + return &GetScannerRequest{} +} + +var GetScannerRequest_StartKey_DEFAULT *base.Blob + +func (p *GetScannerRequest) GetStartKey() *base.Blob { + if !p.IsSetStartKey() { + return GetScannerRequest_StartKey_DEFAULT + } + return p.StartKey +} + +var GetScannerRequest_StopKey_DEFAULT *base.Blob + +func (p *GetScannerRequest) GetStopKey() *base.Blob { + if !p.IsSetStopKey() { + return GetScannerRequest_StopKey_DEFAULT + } + return p.StopKey +} + +func (p *GetScannerRequest) GetStartInclusive() bool { + return p.StartInclusive +} + +func (p *GetScannerRequest) GetStopInclusive() bool { + return p.StopInclusive +} + +func (p *GetScannerRequest) GetBatchSize() int32 { + return p.BatchSize +} + +func (p *GetScannerRequest) GetNoValue() bool { + return p.NoValue +} + +func (p *GetScannerRequest) GetHashKeyFilterType() FilterType { + return p.HashKeyFilterType +} + +var GetScannerRequest_HashKeyFilterPattern_DEFAULT *base.Blob + +func (p *GetScannerRequest) GetHashKeyFilterPattern() *base.Blob { + if !p.IsSetHashKeyFilterPattern() { + return GetScannerRequest_HashKeyFilterPattern_DEFAULT + } + return p.HashKeyFilterPattern +} + +func (p *GetScannerRequest) GetSortKeyFilterType() FilterType { + return p.SortKeyFilterType +} + +var GetScannerRequest_SortKeyFilterPattern_DEFAULT *base.Blob + +func (p *GetScannerRequest) GetSortKeyFilterPattern() *base.Blob { + if !p.IsSetSortKeyFilterPattern() { + return GetScannerRequest_SortKeyFilterPattern_DEFAULT + } + return p.SortKeyFilterPattern +} + +var GetScannerRequest_ValidatePartitionHash_DEFAULT bool + +func (p *GetScannerRequest) GetValidatePartitionHash() bool { + if !p.IsSetValidatePartitionHash() { + return GetScannerRequest_ValidatePartitionHash_DEFAULT + } + return *p.ValidatePartitionHash +} + +var GetScannerRequest_ReturnExpireTs_DEFAULT bool + +func (p *GetScannerRequest) GetReturnExpireTs() bool { + if !p.IsSetReturnExpireTs() { + return GetScannerRequest_ReturnExpireTs_DEFAULT + } + return *p.ReturnExpireTs +} + +var GetScannerRequest_FullScan_DEFAULT bool + +func (p *GetScannerRequest) GetFullScan() bool { + if !p.IsSetFullScan() { + return GetScannerRequest_FullScan_DEFAULT + } + return *p.FullScan +} + +var GetScannerRequest_OnlyReturnCount_DEFAULT bool = false + +func (p *GetScannerRequest) GetOnlyReturnCount() bool { + return p.OnlyReturnCount +} +func (p *GetScannerRequest) IsSetStartKey() bool { + return p.StartKey != nil +} + +func (p *GetScannerRequest) IsSetStopKey() bool { + return p.StopKey != nil +} + +func (p *GetScannerRequest) IsSetHashKeyFilterPattern() bool { + return p.HashKeyFilterPattern != nil +} + +func (p *GetScannerRequest) IsSetSortKeyFilterPattern() bool { + return p.SortKeyFilterPattern != nil +} + +func (p *GetScannerRequest) IsSetValidatePartitionHash() bool { + return p.ValidatePartitionHash != nil +} + +func (p *GetScannerRequest) IsSetReturnExpireTs() bool { + return p.ReturnExpireTs != nil +} + +func (p *GetScannerRequest) IsSetFullScan() bool { + return p.FullScan != nil +} + +func (p *GetScannerRequest) IsSetOnlyReturnCount() bool { + return p.OnlyReturnCount != GetScannerRequest_OnlyReturnCount_DEFAULT +} + +func (p *GetScannerRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I32 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.I32 { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField10(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField11(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField12(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 13: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField13(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 14: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField14(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *GetScannerRequest) ReadField1(iprot thrift.TProtocol) error { + p.StartKey = &base.Blob{} + if err := p.StartKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.StartKey), err) + } + return nil +} + +func (p *GetScannerRequest) ReadField2(iprot thrift.TProtocol) error { + p.StopKey = &base.Blob{} + if err := p.StopKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.StopKey), err) + } + return nil +} + +func (p *GetScannerRequest) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.StartInclusive = v + } + return nil +} + +func (p *GetScannerRequest) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.StopInclusive = v + } + return nil +} + +func (p *GetScannerRequest) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.BatchSize = v + } + return nil +} + +func (p *GetScannerRequest) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.NoValue = v + } + return nil +} + +func (p *GetScannerRequest) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + temp := FilterType(v) + p.HashKeyFilterType = temp + } + return nil +} + +func (p *GetScannerRequest) ReadField8(iprot thrift.TProtocol) error { + p.HashKeyFilterPattern = &base.Blob{} + if err := p.HashKeyFilterPattern.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKeyFilterPattern), err) + } + return nil +} + +func (p *GetScannerRequest) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + temp := FilterType(v) + p.SortKeyFilterType = temp + } + return nil +} + +func (p *GetScannerRequest) ReadField10(iprot thrift.TProtocol) error { + p.SortKeyFilterPattern = &base.Blob{} + if err := p.SortKeyFilterPattern.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.SortKeyFilterPattern), err) + } + return nil +} + +func (p *GetScannerRequest) ReadField11(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 11: ", err) + } else { + p.ValidatePartitionHash = &v + } + return nil +} + +func (p *GetScannerRequest) ReadField12(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 12: ", err) + } else { + p.ReturnExpireTs = &v + } + return nil +} + +func (p *GetScannerRequest) ReadField13(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 13: ", err) + } else { + p.FullScan = &v + } + return nil +} + +func (p *GetScannerRequest) ReadField14(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 14: ", err) + } else { + p.OnlyReturnCount = v + } + return nil +} + +func (p *GetScannerRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("get_scanner_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + if err := p.writeField10(oprot); err != nil { + return err + } + if err := p.writeField11(oprot); err != nil { + return err + } + if err := p.writeField12(oprot); err != nil { + return err + } + if err := p.writeField13(oprot); err != nil { + return err + } + if err := p.writeField14(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *GetScannerRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("start_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:start_key: ", p), err) + } + if err := p.StartKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.StartKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:start_key: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("stop_key", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:stop_key: ", p), err) + } + if err := p.StopKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.StopKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:stop_key: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("start_inclusive", thrift.BOOL, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:start_inclusive: ", p), err) + } + if err := oprot.WriteBool(bool(p.StartInclusive)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.start_inclusive (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:start_inclusive: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("stop_inclusive", thrift.BOOL, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stop_inclusive: ", p), err) + } + if err := oprot.WriteBool(bool(p.StopInclusive)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.stop_inclusive (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stop_inclusive: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("batch_size", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:batch_size: ", p), err) + } + if err := oprot.WriteI32(int32(p.BatchSize)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.batch_size (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:batch_size: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("no_value", thrift.BOOL, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:no_value: ", p), err) + } + if err := oprot.WriteBool(bool(p.NoValue)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.no_value (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:no_value: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField7(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key_filter_type", thrift.I32, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:hash_key_filter_type: ", p), err) + } + if err := oprot.WriteI32(int32(p.HashKeyFilterType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.hash_key_filter_type (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:hash_key_filter_type: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key_filter_pattern", thrift.STRUCT, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:hash_key_filter_pattern: ", p), err) + } + if err := p.HashKeyFilterPattern.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKeyFilterPattern), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:hash_key_filter_pattern: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField9(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_key_filter_type", thrift.I32, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:sort_key_filter_type: ", p), err) + } + if err := oprot.WriteI32(int32(p.SortKeyFilterType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.sort_key_filter_type (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:sort_key_filter_type: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField10(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("sort_key_filter_pattern", thrift.STRUCT, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:sort_key_filter_pattern: ", p), err) + } + if err := p.SortKeyFilterPattern.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.SortKeyFilterPattern), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:sort_key_filter_pattern: ", p), err) + } + return err +} + +func (p *GetScannerRequest) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetValidatePartitionHash() { + if err := oprot.WriteFieldBegin("validate_partition_hash", thrift.BOOL, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:validate_partition_hash: ", p), err) + } + if err := oprot.WriteBool(bool(*p.ValidatePartitionHash)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.validate_partition_hash (11) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:validate_partition_hash: ", p), err) + } + } + return err +} + +func (p *GetScannerRequest) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetReturnExpireTs() { + if err := oprot.WriteFieldBegin("return_expire_ts", thrift.BOOL, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:return_expire_ts: ", p), err) + } + if err := oprot.WriteBool(bool(*p.ReturnExpireTs)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.return_expire_ts (12) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:return_expire_ts: ", p), err) + } + } + return err +} + +func (p *GetScannerRequest) writeField13(oprot thrift.TProtocol) (err error) { + if p.IsSetFullScan() { + if err := oprot.WriteFieldBegin("full_scan", thrift.BOOL, 13); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 13:full_scan: ", p), err) + } + if err := oprot.WriteBool(bool(*p.FullScan)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.full_scan (13) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 13:full_scan: ", p), err) + } + } + return err +} + +func (p *GetScannerRequest) writeField14(oprot thrift.TProtocol) (err error) { + if p.IsSetOnlyReturnCount() { + if err := oprot.WriteFieldBegin("only_return_count", thrift.BOOL, 14); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 14:only_return_count: ", p), err) + } + if err := oprot.WriteBool(bool(p.OnlyReturnCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.only_return_count (14) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 14:only_return_count: ", p), err) + } + } + return err +} + +func (p *GetScannerRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("GetScannerRequest(%+v)", *p) +} + +// Attributes: +// - ContextID +type ScanRequest struct { + ContextID int64 `thrift:"context_id,1" db:"context_id" json:"context_id"` +} + +func NewScanRequest() *ScanRequest { + return &ScanRequest{} +} + +func (p *ScanRequest) GetContextID() int64 { + return p.ContextID +} +func (p *ScanRequest) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ScanRequest) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.ContextID = v + } + return nil +} + +func (p *ScanRequest) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("scan_request"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ScanRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("context_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:context_id: ", p), err) + } + if err := oprot.WriteI64(int64(p.ContextID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.context_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:context_id: ", p), err) + } + return err +} + +func (p *ScanRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ScanRequest(%+v)", *p) +} + +// Attributes: +// - Error +// - Kvs +// - ContextID +// - AppID +// - PartitionIndex +// - Server +// - KvCount +type ScanResponse struct { + Error int32 `thrift:"error,1" db:"error" json:"error"` + Kvs []*KeyValue `thrift:"kvs,2" db:"kvs" json:"kvs"` + ContextID int64 `thrift:"context_id,3" db:"context_id" json:"context_id"` + AppID int32 `thrift:"app_id,4" db:"app_id" json:"app_id"` + PartitionIndex int32 `thrift:"partition_index,5" db:"partition_index" json:"partition_index"` + Server string `thrift:"server,6" db:"server" json:"server"` + KvCount *int32 `thrift:"kv_count,7" db:"kv_count" json:"kv_count,omitempty"` +} + +func NewScanResponse() *ScanResponse { + return &ScanResponse{} +} + +func (p *ScanResponse) GetError() int32 { + return p.Error +} + +func (p *ScanResponse) GetKvs() []*KeyValue { + return p.Kvs +} + +func (p *ScanResponse) GetContextID() int64 { + return p.ContextID +} + +func (p *ScanResponse) GetAppID() int32 { + return p.AppID +} + +func (p *ScanResponse) GetPartitionIndex() int32 { + return p.PartitionIndex +} + +func (p *ScanResponse) GetServer() string { + return p.Server +} + +var ScanResponse_KvCount_DEFAULT int32 + +func (p *ScanResponse) GetKvCount() int32 { + if !p.IsSetKvCount() { + return ScanResponse_KvCount_DEFAULT + } + return *p.KvCount +} +func (p *ScanResponse) IsSetKvCount() bool { + return p.KvCount != nil +} + +func (p *ScanResponse) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.LIST { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I64 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I32 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I32 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.STRING { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 7: + if fieldTypeId == thrift.I32 { + if err := p.ReadField7(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ScanResponse) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Error = v + } + return nil +} + +func (p *ScanResponse) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*KeyValue, 0, size) + p.Kvs = tSlice + for i := 0; i < size; i++ { + _elem7 := &KeyValue{} + if err := _elem7.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem7), err) + } + p.Kvs = append(p.Kvs, _elem7) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ScanResponse) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ContextID = v + } + return nil +} + +func (p *ScanResponse) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.AppID = v + } + return nil +} + +func (p *ScanResponse) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.PartitionIndex = v + } + return nil +} + +func (p *ScanResponse) ReadField6(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 6: ", err) + } else { + p.Server = v + } + return nil +} + +func (p *ScanResponse) ReadField7(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 7: ", err) + } else { + p.KvCount = &v + } + return nil +} + +func (p *ScanResponse) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("scan_response"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *ScanResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("error", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:error: ", p), err) + } + if err := oprot.WriteI32(int32(p.Error)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.error (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:error: ", p), err) + } + return err +} + +func (p *ScanResponse) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("kvs", thrift.LIST, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:kvs: ", p), err) + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Kvs)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Kvs { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:kvs: ", p), err) + } + return err +} + +func (p *ScanResponse) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("context_id", thrift.I64, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:context_id: ", p), err) + } + if err := oprot.WriteI64(int64(p.ContextID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.context_id (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:context_id: ", p), err) + } + return err +} + +func (p *ScanResponse) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("app_id", thrift.I32, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:app_id: ", p), err) + } + if err := oprot.WriteI32(int32(p.AppID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.app_id (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:app_id: ", p), err) + } + return err +} + +func (p *ScanResponse) writeField5(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("partition_index", thrift.I32, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:partition_index: ", p), err) + } + if err := oprot.WriteI32(int32(p.PartitionIndex)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.partition_index (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:partition_index: ", p), err) + } + return err +} + +func (p *ScanResponse) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("server", thrift.STRING, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:server: ", p), err) + } + if err := oprot.WriteString(string(p.Server)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.server (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:server: ", p), err) + } + return err +} + +func (p *ScanResponse) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetKvCount() { + if err := oprot.WriteFieldBegin("kv_count", thrift.I32, 7); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:kv_count: ", p), err) + } + if err := oprot.WriteI32(int32(*p.KvCount)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.kv_count (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 7:kv_count: ", p), err) + } + } + return err +} + +func (p *ScanResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ScanResponse(%+v)", *p) +} + +type Rrdb interface { + // Parameters: + // - Update + Put(ctx context.Context, update *UpdateRequest) (r *UpdateResponse, err error) + // Parameters: + // - Request + MultiPut(ctx context.Context, request *MultiPutRequest) (r *UpdateResponse, err error) + // Parameters: + // - Key + Remove(ctx context.Context, key *base.Blob) (r *UpdateResponse, err error) + // Parameters: + // - Request + MultiRemove(ctx context.Context, request *MultiRemoveRequest) (r *MultiRemoveResponse, err error) + // Parameters: + // - Request + Incr(ctx context.Context, request *IncrRequest) (r *IncrResponse, err error) + // Parameters: + // - Request + CheckAndSet(ctx context.Context, request *CheckAndSetRequest) (r *CheckAndSetResponse, err error) + // Parameters: + // - Request + CheckAndMutate(ctx context.Context, request *CheckAndMutateRequest) (r *CheckAndMutateResponse, err error) + // Parameters: + // - Key + Get(ctx context.Context, key *base.Blob) (r *ReadResponse, err error) + // Parameters: + // - Request + MultiGet(ctx context.Context, request *MultiGetRequest) (r *MultiGetResponse, err error) + // Parameters: + // - Request + BatchGet(ctx context.Context, request *BatchGetRequest) (r *BatchGetResponse, err error) + // Parameters: + // - HashKey + SortkeyCount(ctx context.Context, hash_key *base.Blob) (r *CountResponse, err error) + // Parameters: + // - Key + TTL(ctx context.Context, key *base.Blob) (r *TTLResponse, err error) + // Parameters: + // - Request + GetScanner(ctx context.Context, request *GetScannerRequest) (r *ScanResponse, err error) + // Parameters: + // - Request + Scan(ctx context.Context, request *ScanRequest) (r *ScanResponse, err error) + // Parameters: + // - ContextID + ClearScanner(ctx context.Context, context_id int64) (err error) +} + +type RrdbClient struct { + c thrift.TClient +} + +func NewRrdbClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *RrdbClient { + return &RrdbClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewRrdbClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *RrdbClient { + return &RrdbClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewRrdbClient(c thrift.TClient) *RrdbClient { + return &RrdbClient{ + c: c, + } +} + +func (p *RrdbClient) Client_() thrift.TClient { + return p.c +} + +// Parameters: +// - Update +func (p *RrdbClient) Put(ctx context.Context, update *UpdateRequest) (r *UpdateResponse, err error) { + var _args8 RrdbPutArgs + _args8.Update = update + var _result9 RrdbPutResult + if err = p.Client_().Call(ctx, "put", &_args8, &_result9); err != nil { + return + } + return _result9.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) MultiPut(ctx context.Context, request *MultiPutRequest) (r *UpdateResponse, err error) { + var _args10 RrdbMultiPutArgs + _args10.Request = request + var _result11 RrdbMultiPutResult + if err = p.Client_().Call(ctx, "multi_put", &_args10, &_result11); err != nil { + return + } + return _result11.GetSuccess(), nil +} + +// Parameters: +// - Key +func (p *RrdbClient) Remove(ctx context.Context, key *base.Blob) (r *UpdateResponse, err error) { + var _args12 RrdbRemoveArgs + _args12.Key = key + var _result13 RrdbRemoveResult + if err = p.Client_().Call(ctx, "remove", &_args12, &_result13); err != nil { + return + } + return _result13.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) MultiRemove(ctx context.Context, request *MultiRemoveRequest) (r *MultiRemoveResponse, err error) { + var _args14 RrdbMultiRemoveArgs + _args14.Request = request + var _result15 RrdbMultiRemoveResult + if err = p.Client_().Call(ctx, "multi_remove", &_args14, &_result15); err != nil { + return + } + return _result15.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) Incr(ctx context.Context, request *IncrRequest) (r *IncrResponse, err error) { + var _args16 RrdbIncrArgs + _args16.Request = request + var _result17 RrdbIncrResult + if err = p.Client_().Call(ctx, "incr", &_args16, &_result17); err != nil { + return + } + return _result17.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) CheckAndSet(ctx context.Context, request *CheckAndSetRequest) (r *CheckAndSetResponse, err error) { + var _args18 RrdbCheckAndSetArgs + _args18.Request = request + var _result19 RrdbCheckAndSetResult + if err = p.Client_().Call(ctx, "check_and_set", &_args18, &_result19); err != nil { + return + } + return _result19.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) CheckAndMutate(ctx context.Context, request *CheckAndMutateRequest) (r *CheckAndMutateResponse, err error) { + var _args20 RrdbCheckAndMutateArgs + _args20.Request = request + var _result21 RrdbCheckAndMutateResult + if err = p.Client_().Call(ctx, "check_and_mutate", &_args20, &_result21); err != nil { + return + } + return _result21.GetSuccess(), nil +} + +// Parameters: +// - Key +func (p *RrdbClient) Get(ctx context.Context, key *base.Blob) (r *ReadResponse, err error) { + var _args22 RrdbGetArgs + _args22.Key = key + var _result23 RrdbGetResult + if err = p.Client_().Call(ctx, "get", &_args22, &_result23); err != nil { + return + } + return _result23.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) MultiGet(ctx context.Context, request *MultiGetRequest) (r *MultiGetResponse, err error) { + var _args24 RrdbMultiGetArgs + _args24.Request = request + var _result25 RrdbMultiGetResult + if err = p.Client_().Call(ctx, "multi_get", &_args24, &_result25); err != nil { + return + } + return _result25.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) BatchGet(ctx context.Context, request *BatchGetRequest) (r *BatchGetResponse, err error) { + var _args26 RrdbBatchGetArgs + _args26.Request = request + var _result27 RrdbBatchGetResult + if err = p.Client_().Call(ctx, "batch_get", &_args26, &_result27); err != nil { + return + } + return _result27.GetSuccess(), nil +} + +// Parameters: +// - HashKey +func (p *RrdbClient) SortkeyCount(ctx context.Context, hash_key *base.Blob) (r *CountResponse, err error) { + var _args28 RrdbSortkeyCountArgs + _args28.HashKey = hash_key + var _result29 RrdbSortkeyCountResult + if err = p.Client_().Call(ctx, "sortkey_count", &_args28, &_result29); err != nil { + return + } + return _result29.GetSuccess(), nil +} + +// Parameters: +// - Key +func (p *RrdbClient) TTL(ctx context.Context, key *base.Blob) (r *TTLResponse, err error) { + var _args30 RrdbTTLArgs + _args30.Key = key + var _result31 RrdbTTLResult + if err = p.Client_().Call(ctx, "ttl", &_args30, &_result31); err != nil { + return + } + return _result31.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) GetScanner(ctx context.Context, request *GetScannerRequest) (r *ScanResponse, err error) { + var _args32 RrdbGetScannerArgs + _args32.Request = request + var _result33 RrdbGetScannerResult + if err = p.Client_().Call(ctx, "get_scanner", &_args32, &_result33); err != nil { + return + } + return _result33.GetSuccess(), nil +} + +// Parameters: +// - Request +func (p *RrdbClient) Scan(ctx context.Context, request *ScanRequest) (r *ScanResponse, err error) { + var _args34 RrdbScanArgs + _args34.Request = request + var _result35 RrdbScanResult + if err = p.Client_().Call(ctx, "scan", &_args34, &_result35); err != nil { + return + } + return _result35.GetSuccess(), nil +} + +// Parameters: +// - ContextID +func (p *RrdbClient) ClearScanner(ctx context.Context, context_id int64) (err error) { + var _args36 RrdbClearScannerArgs + _args36.ContextID = context_id + if err := p.Client_().Call(ctx, "clear_scanner", &_args36, nil); err != nil { + return err + } + return nil +} + +type RrdbProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler Rrdb +} + +func (p *RrdbProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *RrdbProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *RrdbProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewRrdbProcessor(handler Rrdb) *RrdbProcessor { + + self37 := &RrdbProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self37.processorMap["put"] = &rrdbProcessorPut{handler: handler} + self37.processorMap["multi_put"] = &rrdbProcessorMultiPut{handler: handler} + self37.processorMap["remove"] = &rrdbProcessorRemove{handler: handler} + self37.processorMap["multi_remove"] = &rrdbProcessorMultiRemove{handler: handler} + self37.processorMap["incr"] = &rrdbProcessorIncr{handler: handler} + self37.processorMap["check_and_set"] = &rrdbProcessorCheckAndSet{handler: handler} + self37.processorMap["check_and_mutate"] = &rrdbProcessorCheckAndMutate{handler: handler} + self37.processorMap["get"] = &rrdbProcessorGet{handler: handler} + self37.processorMap["multi_get"] = &rrdbProcessorMultiGet{handler: handler} + self37.processorMap["batch_get"] = &rrdbProcessorBatchGet{handler: handler} + self37.processorMap["sortkey_count"] = &rrdbProcessorSortkeyCount{handler: handler} + self37.processorMap["ttl"] = &rrdbProcessorTTL{handler: handler} + self37.processorMap["get_scanner"] = &rrdbProcessorGetScanner{handler: handler} + self37.processorMap["scan"] = &rrdbProcessorScan{handler: handler} + self37.processorMap["clear_scanner"] = &rrdbProcessorClearScanner{handler: handler} + return self37 +} + +func (p *RrdbProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x38 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x38.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, x38 + +} + +type rrdbProcessorPut struct { + handler Rrdb +} + +func (p *rrdbProcessorPut) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbPutArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("put", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbPutResult{} + var retval *UpdateResponse + var err2 error + if retval, err2 = p.handler.Put(ctx, args.Update); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing put: "+err2.Error()) + oprot.WriteMessageBegin("put", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("put", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorMultiPut struct { + handler Rrdb +} + +func (p *rrdbProcessorMultiPut) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbMultiPutArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("multi_put", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbMultiPutResult{} + var retval *UpdateResponse + var err2 error + if retval, err2 = p.handler.MultiPut(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing multi_put: "+err2.Error()) + oprot.WriteMessageBegin("multi_put", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("multi_put", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorRemove struct { + handler Rrdb +} + +func (p *rrdbProcessorRemove) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbRemoveArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("remove", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbRemoveResult{} + var retval *UpdateResponse + var err2 error + if retval, err2 = p.handler.Remove(ctx, args.Key); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing remove: "+err2.Error()) + oprot.WriteMessageBegin("remove", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("remove", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorMultiRemove struct { + handler Rrdb +} + +func (p *rrdbProcessorMultiRemove) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbMultiRemoveArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("multi_remove", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbMultiRemoveResult{} + var retval *MultiRemoveResponse + var err2 error + if retval, err2 = p.handler.MultiRemove(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing multi_remove: "+err2.Error()) + oprot.WriteMessageBegin("multi_remove", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("multi_remove", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorIncr struct { + handler Rrdb +} + +func (p *rrdbProcessorIncr) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbIncrArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("incr", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbIncrResult{} + var retval *IncrResponse + var err2 error + if retval, err2 = p.handler.Incr(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing incr: "+err2.Error()) + oprot.WriteMessageBegin("incr", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("incr", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorCheckAndSet struct { + handler Rrdb +} + +func (p *rrdbProcessorCheckAndSet) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbCheckAndSetArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("check_and_set", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbCheckAndSetResult{} + var retval *CheckAndSetResponse + var err2 error + if retval, err2 = p.handler.CheckAndSet(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing check_and_set: "+err2.Error()) + oprot.WriteMessageBegin("check_and_set", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("check_and_set", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorCheckAndMutate struct { + handler Rrdb +} + +func (p *rrdbProcessorCheckAndMutate) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbCheckAndMutateArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("check_and_mutate", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbCheckAndMutateResult{} + var retval *CheckAndMutateResponse + var err2 error + if retval, err2 = p.handler.CheckAndMutate(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing check_and_mutate: "+err2.Error()) + oprot.WriteMessageBegin("check_and_mutate", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("check_and_mutate", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorGet struct { + handler Rrdb +} + +func (p *rrdbProcessorGet) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbGetArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("get", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbGetResult{} + var retval *ReadResponse + var err2 error + if retval, err2 = p.handler.Get(ctx, args.Key); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get: "+err2.Error()) + oprot.WriteMessageBegin("get", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("get", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorMultiGet struct { + handler Rrdb +} + +func (p *rrdbProcessorMultiGet) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbMultiGetArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("multi_get", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbMultiGetResult{} + var retval *MultiGetResponse + var err2 error + if retval, err2 = p.handler.MultiGet(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing multi_get: "+err2.Error()) + oprot.WriteMessageBegin("multi_get", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("multi_get", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorBatchGet struct { + handler Rrdb +} + +func (p *rrdbProcessorBatchGet) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbBatchGetArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("batch_get", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbBatchGetResult{} + var retval *BatchGetResponse + var err2 error + if retval, err2 = p.handler.BatchGet(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing batch_get: "+err2.Error()) + oprot.WriteMessageBegin("batch_get", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("batch_get", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorSortkeyCount struct { + handler Rrdb +} + +func (p *rrdbProcessorSortkeyCount) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbSortkeyCountArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("sortkey_count", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbSortkeyCountResult{} + var retval *CountResponse + var err2 error + if retval, err2 = p.handler.SortkeyCount(ctx, args.HashKey); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing sortkey_count: "+err2.Error()) + oprot.WriteMessageBegin("sortkey_count", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("sortkey_count", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorTTL struct { + handler Rrdb +} + +func (p *rrdbProcessorTTL) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbTTLArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("ttl", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbTTLResult{} + var retval *TTLResponse + var err2 error + if retval, err2 = p.handler.TTL(ctx, args.Key); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ttl: "+err2.Error()) + oprot.WriteMessageBegin("ttl", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("ttl", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorGetScanner struct { + handler Rrdb +} + +func (p *rrdbProcessorGetScanner) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbGetScannerArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("get_scanner", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbGetScannerResult{} + var retval *ScanResponse + var err2 error + if retval, err2 = p.handler.GetScanner(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_scanner: "+err2.Error()) + oprot.WriteMessageBegin("get_scanner", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("get_scanner", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorScan struct { + handler Rrdb +} + +func (p *rrdbProcessorScan) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbScanArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("scan", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := RrdbScanResult{} + var retval *ScanResponse + var err2 error + if retval, err2 = p.handler.Scan(ctx, args.Request); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing scan: "+err2.Error()) + oprot.WriteMessageBegin("scan", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("scan", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type rrdbProcessorClearScanner struct { + handler Rrdb +} + +func (p *rrdbProcessorClearScanner) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := RrdbClearScannerArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + if err2 = p.handler.ClearScanner(ctx, args.ContextID); err2 != nil { + return true, err2 + } + return true, nil +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Update +type RrdbPutArgs struct { + Update *UpdateRequest `thrift:"update,1" db:"update" json:"update"` +} + +func NewRrdbPutArgs() *RrdbPutArgs { + return &RrdbPutArgs{} +} + +var RrdbPutArgs_Update_DEFAULT *UpdateRequest + +func (p *RrdbPutArgs) GetUpdate() *UpdateRequest { + if !p.IsSetUpdate() { + return RrdbPutArgs_Update_DEFAULT + } + return p.Update +} +func (p *RrdbPutArgs) IsSetUpdate() bool { + return p.Update != nil +} + +func (p *RrdbPutArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbPutArgs) ReadField1(iprot thrift.TProtocol) error { + p.Update = &UpdateRequest{} + if err := p.Update.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Update), err) + } + return nil +} + +func (p *RrdbPutArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("put_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbPutArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("update", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:update: ", p), err) + } + if err := p.Update.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Update), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:update: ", p), err) + } + return err +} + +func (p *RrdbPutArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbPutArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbPutResult struct { + Success *UpdateResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbPutResult() *RrdbPutResult { + return &RrdbPutResult{} +} + +var RrdbPutResult_Success_DEFAULT *UpdateResponse + +func (p *RrdbPutResult) GetSuccess() *UpdateResponse { + if !p.IsSetSuccess() { + return RrdbPutResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbPutResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbPutResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbPutResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &UpdateResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbPutResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("put_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbPutResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbPutResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbPutResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbMultiPutArgs struct { + Request *MultiPutRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbMultiPutArgs() *RrdbMultiPutArgs { + return &RrdbMultiPutArgs{} +} + +var RrdbMultiPutArgs_Request_DEFAULT *MultiPutRequest + +func (p *RrdbMultiPutArgs) GetRequest() *MultiPutRequest { + if !p.IsSetRequest() { + return RrdbMultiPutArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbMultiPutArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbMultiPutArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbMultiPutArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &MultiPutRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbMultiPutArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_put_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbMultiPutArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbMultiPutArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbMultiPutArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbMultiPutResult struct { + Success *UpdateResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbMultiPutResult() *RrdbMultiPutResult { + return &RrdbMultiPutResult{} +} + +var RrdbMultiPutResult_Success_DEFAULT *UpdateResponse + +func (p *RrdbMultiPutResult) GetSuccess() *UpdateResponse { + if !p.IsSetSuccess() { + return RrdbMultiPutResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbMultiPutResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbMultiPutResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbMultiPutResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &UpdateResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbMultiPutResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_put_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbMultiPutResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbMultiPutResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbMultiPutResult(%+v)", *p) +} + +// Attributes: +// - Key +type RrdbRemoveArgs struct { + Key *base.Blob `thrift:"key,1" db:"key" json:"key"` +} + +func NewRrdbRemoveArgs() *RrdbRemoveArgs { + return &RrdbRemoveArgs{} +} + +var RrdbRemoveArgs_Key_DEFAULT *base.Blob + +func (p *RrdbRemoveArgs) GetKey() *base.Blob { + if !p.IsSetKey() { + return RrdbRemoveArgs_Key_DEFAULT + } + return p.Key +} +func (p *RrdbRemoveArgs) IsSetKey() bool { + return p.Key != nil +} + +func (p *RrdbRemoveArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbRemoveArgs) ReadField1(iprot thrift.TProtocol) error { + p.Key = &base.Blob{} + if err := p.Key.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) + } + return nil +} + +func (p *RrdbRemoveArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("remove_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbRemoveArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := p.Key.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *RrdbRemoveArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbRemoveArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbRemoveResult struct { + Success *UpdateResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbRemoveResult() *RrdbRemoveResult { + return &RrdbRemoveResult{} +} + +var RrdbRemoveResult_Success_DEFAULT *UpdateResponse + +func (p *RrdbRemoveResult) GetSuccess() *UpdateResponse { + if !p.IsSetSuccess() { + return RrdbRemoveResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbRemoveResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbRemoveResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbRemoveResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &UpdateResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbRemoveResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("remove_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbRemoveResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbRemoveResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbRemoveResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbMultiRemoveArgs struct { + Request *MultiRemoveRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbMultiRemoveArgs() *RrdbMultiRemoveArgs { + return &RrdbMultiRemoveArgs{} +} + +var RrdbMultiRemoveArgs_Request_DEFAULT *MultiRemoveRequest + +func (p *RrdbMultiRemoveArgs) GetRequest() *MultiRemoveRequest { + if !p.IsSetRequest() { + return RrdbMultiRemoveArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbMultiRemoveArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbMultiRemoveArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbMultiRemoveArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &MultiRemoveRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbMultiRemoveArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_remove_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbMultiRemoveArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbMultiRemoveArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbMultiRemoveArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbMultiRemoveResult struct { + Success *MultiRemoveResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbMultiRemoveResult() *RrdbMultiRemoveResult { + return &RrdbMultiRemoveResult{} +} + +var RrdbMultiRemoveResult_Success_DEFAULT *MultiRemoveResponse + +func (p *RrdbMultiRemoveResult) GetSuccess() *MultiRemoveResponse { + if !p.IsSetSuccess() { + return RrdbMultiRemoveResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbMultiRemoveResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbMultiRemoveResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbMultiRemoveResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &MultiRemoveResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbMultiRemoveResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_remove_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbMultiRemoveResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbMultiRemoveResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbMultiRemoveResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbIncrArgs struct { + Request *IncrRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbIncrArgs() *RrdbIncrArgs { + return &RrdbIncrArgs{} +} + +var RrdbIncrArgs_Request_DEFAULT *IncrRequest + +func (p *RrdbIncrArgs) GetRequest() *IncrRequest { + if !p.IsSetRequest() { + return RrdbIncrArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbIncrArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbIncrArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbIncrArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &IncrRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbIncrArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("incr_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbIncrArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbIncrArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbIncrArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbIncrResult struct { + Success *IncrResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbIncrResult() *RrdbIncrResult { + return &RrdbIncrResult{} +} + +var RrdbIncrResult_Success_DEFAULT *IncrResponse + +func (p *RrdbIncrResult) GetSuccess() *IncrResponse { + if !p.IsSetSuccess() { + return RrdbIncrResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbIncrResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbIncrResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbIncrResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &IncrResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbIncrResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("incr_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbIncrResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbIncrResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbIncrResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbCheckAndSetArgs struct { + Request *CheckAndSetRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbCheckAndSetArgs() *RrdbCheckAndSetArgs { + return &RrdbCheckAndSetArgs{} +} + +var RrdbCheckAndSetArgs_Request_DEFAULT *CheckAndSetRequest + +func (p *RrdbCheckAndSetArgs) GetRequest() *CheckAndSetRequest { + if !p.IsSetRequest() { + return RrdbCheckAndSetArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbCheckAndSetArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbCheckAndSetArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbCheckAndSetArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &CheckAndSetRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbCheckAndSetArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("check_and_set_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbCheckAndSetArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbCheckAndSetArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbCheckAndSetArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbCheckAndSetResult struct { + Success *CheckAndSetResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbCheckAndSetResult() *RrdbCheckAndSetResult { + return &RrdbCheckAndSetResult{} +} + +var RrdbCheckAndSetResult_Success_DEFAULT *CheckAndSetResponse + +func (p *RrdbCheckAndSetResult) GetSuccess() *CheckAndSetResponse { + if !p.IsSetSuccess() { + return RrdbCheckAndSetResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbCheckAndSetResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbCheckAndSetResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbCheckAndSetResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &CheckAndSetResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbCheckAndSetResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("check_and_set_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbCheckAndSetResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbCheckAndSetResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbCheckAndSetResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbCheckAndMutateArgs struct { + Request *CheckAndMutateRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbCheckAndMutateArgs() *RrdbCheckAndMutateArgs { + return &RrdbCheckAndMutateArgs{} +} + +var RrdbCheckAndMutateArgs_Request_DEFAULT *CheckAndMutateRequest + +func (p *RrdbCheckAndMutateArgs) GetRequest() *CheckAndMutateRequest { + if !p.IsSetRequest() { + return RrdbCheckAndMutateArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbCheckAndMutateArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbCheckAndMutateArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbCheckAndMutateArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &CheckAndMutateRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbCheckAndMutateArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("check_and_mutate_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbCheckAndMutateArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbCheckAndMutateArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbCheckAndMutateArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbCheckAndMutateResult struct { + Success *CheckAndMutateResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbCheckAndMutateResult() *RrdbCheckAndMutateResult { + return &RrdbCheckAndMutateResult{} +} + +var RrdbCheckAndMutateResult_Success_DEFAULT *CheckAndMutateResponse + +func (p *RrdbCheckAndMutateResult) GetSuccess() *CheckAndMutateResponse { + if !p.IsSetSuccess() { + return RrdbCheckAndMutateResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbCheckAndMutateResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbCheckAndMutateResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbCheckAndMutateResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &CheckAndMutateResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbCheckAndMutateResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("check_and_mutate_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbCheckAndMutateResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbCheckAndMutateResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbCheckAndMutateResult(%+v)", *p) +} + +// Attributes: +// - Key +type RrdbGetArgs struct { + Key *base.Blob `thrift:"key,1" db:"key" json:"key"` +} + +func NewRrdbGetArgs() *RrdbGetArgs { + return &RrdbGetArgs{} +} + +var RrdbGetArgs_Key_DEFAULT *base.Blob + +func (p *RrdbGetArgs) GetKey() *base.Blob { + if !p.IsSetKey() { + return RrdbGetArgs_Key_DEFAULT + } + return p.Key +} +func (p *RrdbGetArgs) IsSetKey() bool { + return p.Key != nil +} + +func (p *RrdbGetArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbGetArgs) ReadField1(iprot thrift.TProtocol) error { + p.Key = &base.Blob{} + if err := p.Key.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) + } + return nil +} + +func (p *RrdbGetArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("get_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbGetArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := p.Key.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *RrdbGetArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbGetArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbGetResult struct { + Success *ReadResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbGetResult() *RrdbGetResult { + return &RrdbGetResult{} +} + +var RrdbGetResult_Success_DEFAULT *ReadResponse + +func (p *RrdbGetResult) GetSuccess() *ReadResponse { + if !p.IsSetSuccess() { + return RrdbGetResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbGetResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbGetResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbGetResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ReadResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbGetResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("get_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbGetResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbGetResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbGetResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbMultiGetArgs struct { + Request *MultiGetRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbMultiGetArgs() *RrdbMultiGetArgs { + return &RrdbMultiGetArgs{} +} + +var RrdbMultiGetArgs_Request_DEFAULT *MultiGetRequest + +func (p *RrdbMultiGetArgs) GetRequest() *MultiGetRequest { + if !p.IsSetRequest() { + return RrdbMultiGetArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbMultiGetArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbMultiGetArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbMultiGetArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &MultiGetRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbMultiGetArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_get_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbMultiGetArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbMultiGetArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbMultiGetArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbMultiGetResult struct { + Success *MultiGetResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbMultiGetResult() *RrdbMultiGetResult { + return &RrdbMultiGetResult{} +} + +var RrdbMultiGetResult_Success_DEFAULT *MultiGetResponse + +func (p *RrdbMultiGetResult) GetSuccess() *MultiGetResponse { + if !p.IsSetSuccess() { + return RrdbMultiGetResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbMultiGetResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbMultiGetResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbMultiGetResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &MultiGetResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbMultiGetResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("multi_get_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbMultiGetResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbMultiGetResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbMultiGetResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbBatchGetArgs struct { + Request *BatchGetRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbBatchGetArgs() *RrdbBatchGetArgs { + return &RrdbBatchGetArgs{} +} + +var RrdbBatchGetArgs_Request_DEFAULT *BatchGetRequest + +func (p *RrdbBatchGetArgs) GetRequest() *BatchGetRequest { + if !p.IsSetRequest() { + return RrdbBatchGetArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbBatchGetArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbBatchGetArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbBatchGetArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &BatchGetRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbBatchGetArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("batch_get_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbBatchGetArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbBatchGetArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbBatchGetArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbBatchGetResult struct { + Success *BatchGetResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbBatchGetResult() *RrdbBatchGetResult { + return &RrdbBatchGetResult{} +} + +var RrdbBatchGetResult_Success_DEFAULT *BatchGetResponse + +func (p *RrdbBatchGetResult) GetSuccess() *BatchGetResponse { + if !p.IsSetSuccess() { + return RrdbBatchGetResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbBatchGetResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbBatchGetResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbBatchGetResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &BatchGetResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbBatchGetResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("batch_get_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbBatchGetResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbBatchGetResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbBatchGetResult(%+v)", *p) +} + +// Attributes: +// - HashKey +type RrdbSortkeyCountArgs struct { + HashKey *base.Blob `thrift:"hash_key,1" db:"hash_key" json:"hash_key"` +} + +func NewRrdbSortkeyCountArgs() *RrdbSortkeyCountArgs { + return &RrdbSortkeyCountArgs{} +} + +var RrdbSortkeyCountArgs_HashKey_DEFAULT *base.Blob + +func (p *RrdbSortkeyCountArgs) GetHashKey() *base.Blob { + if !p.IsSetHashKey() { + return RrdbSortkeyCountArgs_HashKey_DEFAULT + } + return p.HashKey +} +func (p *RrdbSortkeyCountArgs) IsSetHashKey() bool { + return p.HashKey != nil +} + +func (p *RrdbSortkeyCountArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbSortkeyCountArgs) ReadField1(iprot thrift.TProtocol) error { + p.HashKey = &base.Blob{} + if err := p.HashKey.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.HashKey), err) + } + return nil +} + +func (p *RrdbSortkeyCountArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("sortkey_count_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbSortkeyCountArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("hash_key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:hash_key: ", p), err) + } + if err := p.HashKey.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.HashKey), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:hash_key: ", p), err) + } + return err +} + +func (p *RrdbSortkeyCountArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbSortkeyCountArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbSortkeyCountResult struct { + Success *CountResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbSortkeyCountResult() *RrdbSortkeyCountResult { + return &RrdbSortkeyCountResult{} +} + +var RrdbSortkeyCountResult_Success_DEFAULT *CountResponse + +func (p *RrdbSortkeyCountResult) GetSuccess() *CountResponse { + if !p.IsSetSuccess() { + return RrdbSortkeyCountResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbSortkeyCountResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbSortkeyCountResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbSortkeyCountResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &CountResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbSortkeyCountResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("sortkey_count_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbSortkeyCountResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbSortkeyCountResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbSortkeyCountResult(%+v)", *p) +} + +// Attributes: +// - Key +type RrdbTTLArgs struct { + Key *base.Blob `thrift:"key,1" db:"key" json:"key"` +} + +func NewRrdbTTLArgs() *RrdbTTLArgs { + return &RrdbTTLArgs{} +} + +var RrdbTTLArgs_Key_DEFAULT *base.Blob + +func (p *RrdbTTLArgs) GetKey() *base.Blob { + if !p.IsSetKey() { + return RrdbTTLArgs_Key_DEFAULT + } + return p.Key +} +func (p *RrdbTTLArgs) IsSetKey() bool { + return p.Key != nil +} + +func (p *RrdbTTLArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbTTLArgs) ReadField1(iprot thrift.TProtocol) error { + p.Key = &base.Blob{} + if err := p.Key.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Key), err) + } + return nil +} + +func (p *RrdbTTLArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ttl_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbTTLArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("key", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := p.Key.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Key), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *RrdbTTLArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbTTLArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbTTLResult struct { + Success *TTLResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbTTLResult() *RrdbTTLResult { + return &RrdbTTLResult{} +} + +var RrdbTTLResult_Success_DEFAULT *TTLResponse + +func (p *RrdbTTLResult) GetSuccess() *TTLResponse { + if !p.IsSetSuccess() { + return RrdbTTLResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbTTLResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbTTLResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbTTLResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &TTLResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbTTLResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("ttl_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbTTLResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbTTLResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbTTLResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbGetScannerArgs struct { + Request *GetScannerRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbGetScannerArgs() *RrdbGetScannerArgs { + return &RrdbGetScannerArgs{} +} + +var RrdbGetScannerArgs_Request_DEFAULT *GetScannerRequest + +func (p *RrdbGetScannerArgs) GetRequest() *GetScannerRequest { + if !p.IsSetRequest() { + return RrdbGetScannerArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbGetScannerArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbGetScannerArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbGetScannerArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &GetScannerRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbGetScannerArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("get_scanner_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbGetScannerArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbGetScannerArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbGetScannerArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbGetScannerResult struct { + Success *ScanResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbGetScannerResult() *RrdbGetScannerResult { + return &RrdbGetScannerResult{} +} + +var RrdbGetScannerResult_Success_DEFAULT *ScanResponse + +func (p *RrdbGetScannerResult) GetSuccess() *ScanResponse { + if !p.IsSetSuccess() { + return RrdbGetScannerResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbGetScannerResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbGetScannerResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbGetScannerResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ScanResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbGetScannerResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("get_scanner_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbGetScannerResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbGetScannerResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbGetScannerResult(%+v)", *p) +} + +// Attributes: +// - Request +type RrdbScanArgs struct { + Request *ScanRequest `thrift:"request,1" db:"request" json:"request"` +} + +func NewRrdbScanArgs() *RrdbScanArgs { + return &RrdbScanArgs{} +} + +var RrdbScanArgs_Request_DEFAULT *ScanRequest + +func (p *RrdbScanArgs) GetRequest() *ScanRequest { + if !p.IsSetRequest() { + return RrdbScanArgs_Request_DEFAULT + } + return p.Request +} +func (p *RrdbScanArgs) IsSetRequest() bool { + return p.Request != nil +} + +func (p *RrdbScanArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbScanArgs) ReadField1(iprot thrift.TProtocol) error { + p.Request = &ScanRequest{} + if err := p.Request.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Request), err) + } + return nil +} + +func (p *RrdbScanArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("scan_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbScanArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:request: ", p), err) + } + if err := p.Request.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Request), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:request: ", p), err) + } + return err +} + +func (p *RrdbScanArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbScanArgs(%+v)", *p) +} + +// Attributes: +// - Success +type RrdbScanResult struct { + Success *ScanResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewRrdbScanResult() *RrdbScanResult { + return &RrdbScanResult{} +} + +var RrdbScanResult_Success_DEFAULT *ScanResponse + +func (p *RrdbScanResult) GetSuccess() *ScanResponse { + if !p.IsSetSuccess() { + return RrdbScanResult_Success_DEFAULT + } + return p.Success +} +func (p *RrdbScanResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *RrdbScanResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbScanResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &ScanResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *RrdbScanResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("scan_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbScanResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *RrdbScanResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbScanResult(%+v)", *p) +} + +// Attributes: +// - ContextID +type RrdbClearScannerArgs struct { + ContextID int64 `thrift:"context_id,1" db:"context_id" json:"context_id"` +} + +func NewRrdbClearScannerArgs() *RrdbClearScannerArgs { + return &RrdbClearScannerArgs{} +} + +func (p *RrdbClearScannerArgs) GetContextID() int64 { + return p.ContextID +} +func (p *RrdbClearScannerArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *RrdbClearScannerArgs) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.ContextID = v + } + return nil +} + +func (p *RrdbClearScannerArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("clear_scanner_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *RrdbClearScannerArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("context_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:context_id: ", p), err) + } + if err := oprot.WriteI64(int64(p.ContextID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.context_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:context_id: ", p), err) + } + return err +} + +func (p *RrdbClearScannerArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RrdbClearScannerArgs(%+v)", *p) +} + +type Meta interface { + // Parameters: + // - Query + QueryCfg(ctx context.Context, query *replication.QueryCfgRequest) (r *replication.QueryCfgResponse, err error) +} + +type MetaClient struct { + c thrift.TClient +} + +func NewMetaClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *MetaClient { + return &MetaClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewMetaClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *MetaClient { + return &MetaClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewMetaClient(c thrift.TClient) *MetaClient { + return &MetaClient{ + c: c, + } +} + +func (p *MetaClient) Client_() thrift.TClient { + return p.c +} + +// Parameters: +// - Query +func (p *MetaClient) QueryCfg(ctx context.Context, query *replication.QueryCfgRequest) (r *replication.QueryCfgResponse, err error) { + var _args124 MetaQueryCfgArgs + _args124.Query = query + var _result125 MetaQueryCfgResult + if err = p.Client_().Call(ctx, "query_cfg", &_args124, &_result125); err != nil { + return + } + return _result125.GetSuccess(), nil +} + +type MetaProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler Meta +} + +func (p *MetaProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *MetaProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *MetaProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewMetaProcessor(handler Meta) *MetaProcessor { + + self126 := &MetaProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self126.processorMap["query_cfg"] = &metaProcessorQueryCfg{handler: handler} + return self126 +} + +func (p *MetaProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x127 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x127.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, x127 + +} + +type metaProcessorQueryCfg struct { + handler Meta +} + +func (p *metaProcessorQueryCfg) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := MetaQueryCfgArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("query_cfg", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + result := MetaQueryCfgResult{} + var retval *replication.QueryCfgResponse + var err2 error + if retval, err2 = p.handler.QueryCfg(ctx, args.Query); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_cfg: "+err2.Error()) + oprot.WriteMessageBegin("query_cfg", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("query_cfg", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Query +type MetaQueryCfgArgs struct { + Query *replication.QueryCfgRequest `thrift:"query,1" db:"query" json:"query"` +} + +func NewMetaQueryCfgArgs() *MetaQueryCfgArgs { + return &MetaQueryCfgArgs{} +} + +var MetaQueryCfgArgs_Query_DEFAULT *replication.QueryCfgRequest + +func (p *MetaQueryCfgArgs) GetQuery() *replication.QueryCfgRequest { + if !p.IsSetQuery() { + return MetaQueryCfgArgs_Query_DEFAULT + } + return p.Query +} +func (p *MetaQueryCfgArgs) IsSetQuery() bool { + return p.Query != nil +} + +func (p *MetaQueryCfgArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MetaQueryCfgArgs) ReadField1(iprot thrift.TProtocol) error { + p.Query = &replication.QueryCfgRequest{} + if err := p.Query.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Query), err) + } + return nil +} + +func (p *MetaQueryCfgArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_cfg_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MetaQueryCfgArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("query", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:query: ", p), err) + } + if err := p.Query.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Query), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:query: ", p), err) + } + return err +} + +func (p *MetaQueryCfgArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MetaQueryCfgArgs(%+v)", *p) +} + +// Attributes: +// - Success +type MetaQueryCfgResult struct { + Success *replication.QueryCfgResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewMetaQueryCfgResult() *MetaQueryCfgResult { + return &MetaQueryCfgResult{} +} + +var MetaQueryCfgResult_Success_DEFAULT *replication.QueryCfgResponse + +func (p *MetaQueryCfgResult) GetSuccess() *replication.QueryCfgResponse { + if !p.IsSetSuccess() { + return MetaQueryCfgResult_Success_DEFAULT + } + return p.Success +} +func (p *MetaQueryCfgResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *MetaQueryCfgResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MetaQueryCfgResult) ReadField0(iprot thrift.TProtocol) error { + p.Success = &replication.QueryCfgResponse{} + if err := p.Success.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err) + } + return nil +} + +func (p *MetaQueryCfgResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("query_cfg_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField0(oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MetaQueryCfgResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) + } + if err := p.Success.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) + } + } + return err +} + +func (p *MetaQueryCfgResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MetaQueryCfgResult(%+v)", *p) +} diff --git a/go-client/pegasus/table_connector.go b/go-client/pegasus/table_connector.go index d1074a7bf3..c83461e393 100644 --- a/go-client/pegasus/table_connector.go +++ b/go-client/pegasus/table_connector.go @@ -703,6 +703,7 @@ func (p *pegasusTableConnector) handleReplicaError(err error, replica *session.R case base.ERR_TIMEOUT: case context.DeadlineExceeded: + confUpdate = true case context.Canceled: // timeout will not trigger a configuration update diff --git a/go-client/pegasus/table_connector_test.go b/go-client/pegasus/table_connector_test.go index 1b28747655..b4016748ea 100644 --- a/go-client/pegasus/table_connector_test.go +++ b/go-client/pegasus/table_connector_test.go @@ -269,8 +269,14 @@ func TestPegasusTableConnector_TriggerSelfUpdate(t *testing.T) { assert.True(t, confUpdate) assert.False(t, retry) + confUpdate, retry, err = ptb.handleReplicaError(context.DeadlineExceeded, nil) + <-ptb.confUpdateCh + assert.Error(t, err) + assert.True(t, confUpdate) + assert.False(t, retry) + { // Ensure: The following errors should not trigger configuration update - errorTypes := []error{base.ERR_TIMEOUT, context.DeadlineExceeded, base.ERR_CAPACITY_EXCEEDED, base.ERR_NOT_ENOUGH_MEMBER, base.ERR_BUSY, base.ERR_SPLITTING, base.ERR_DISK_INSUFFICIENT} + errorTypes := []error{base.ERR_TIMEOUT, base.ERR_CAPACITY_EXCEEDED, base.ERR_NOT_ENOUGH_MEMBER, base.ERR_BUSY, base.ERR_SPLITTING, base.ERR_DISK_INSUFFICIENT} for _, err := range errorTypes { channelEmpty := false diff --git a/go-client/session/meta_call.go b/go-client/session/meta_call.go index 2db6179ab1..d846aa09b8 100644 --- a/go-client/session/meta_call.go +++ b/go-client/session/meta_call.go @@ -26,6 +26,8 @@ import ( "time" "github.com/apache/incubator-pegasus/go-client/idl/base" + "github.com/apache/incubator-pegasus/go-client/idl/replication" + "github.com/apache/incubator-pegasus/go-client/pegalog" ) type metaCallFunc func(context.Context, *metaSession) (metaResponse, error) @@ -42,21 +44,24 @@ type metaCall struct { backupCh chan interface{} callFunc metaCallFunc - metas []*metaSession - lead int + metaIPAddrs []string + metas []*metaSession + lead int // After a Run successfully ends, the current leader will be set in this field. // If there is no meta failover, `newLead` equals to `lead`. newLead uint32 + lock sync.RWMutex } -func newMetaCall(lead int, metas []*metaSession, callFunc metaCallFunc) *metaCall { +func newMetaCall(lead int, metas []*metaSession, callFunc metaCallFunc, meatIPAddr []string) *metaCall { return &metaCall{ - metas: metas, - lead: lead, - newLead: uint32(lead), - respCh: make(chan metaResponse), - callFunc: callFunc, - backupCh: make(chan interface{}), + metas: metas, + metaIPAddrs: meatIPAddr, + lead: lead, + newLead: uint32(lead), + respCh: make(chan metaResponse), + callFunc: callFunc, + backupCh: make(chan interface{}), } } @@ -106,14 +111,44 @@ func (c *metaCall) Run(ctx context.Context) (metaResponse, error) { } // issueSingleMeta returns false if we should try another meta -func (c *metaCall) issueSingleMeta(ctx context.Context, i int) bool { - meta := c.metas[i] +func (c *metaCall) issueSingleMeta(ctx context.Context, curLeader int) bool { + meta := c.metas[curLeader] resp, err := c.callFunc(ctx, meta) + + if err == nil && resp.GetErr().Errno == base.ERR_FORWARD_TO_OTHERS.String() { + forwardAddr := c.getMetaServiceForwardAddress(resp) + if forwardAddr == nil { + return false + } + addr := forwardAddr.GetAddress() + found := false + c.lock.Lock() + for i := range c.metaIPAddrs { + if addr == c.metaIPAddrs[i] { + found = true + break + } + } + c.lock.Unlock() + if !found { + c.lock.Lock() + c.metaIPAddrs = append(c.metaIPAddrs, addr) + c.metas = append(c.metas, &metaSession{ + NodeSession: newNodeSession(addr, NodeTypeMeta), + logger: pegalog.GetLogger(), + }) + c.lock.Unlock() + curLeader = len(c.metas) - 1 + c.metas[curLeader].logger.Printf("add forward address %s as meta server", addr) + resp, err = c.callFunc(ctx, c.metas[curLeader]) + } + } + if err != nil || resp.GetErr().Errno == base.ERR_FORWARD_TO_OTHERS.String() { return false } // the RPC succeeds, this meta becomes the new leader now. - atomic.StoreUint32(&c.newLead, uint32(i)) + atomic.StoreUint32(&c.newLead, uint32(curLeader)) select { case <-ctx.Done(): case c.respCh <- resp: @@ -133,3 +168,14 @@ func (c *metaCall) issueBackupMetas(ctx context.Context) { }(i) } } + +func (c *metaCall) getMetaServiceForwardAddress(resp metaResponse) *base.RPCAddress { + rep, ok := resp.(*replication.QueryCfgResponse) + if !ok || rep.GetErr().Errno != base.ERR_FORWARD_TO_OTHERS.String() { + return nil + } else if rep.GetPartitions() == nil || len(rep.GetPartitions()) == 0 { + return nil + } else { + return rep.Partitions[0].Primary + } +} diff --git a/go-client/session/meta_session.go b/go-client/session/meta_session.go index c209cb8488..b0e962d1d9 100644 --- a/go-client/session/meta_session.go +++ b/go-client/session/meta_session.go @@ -94,10 +94,12 @@ func NewMetaManager(addrs []string, creator NodeSessionCreator) *MetaManager { func (m *MetaManager) call(ctx context.Context, callFunc metaCallFunc) (metaResponse, error) { lead := m.getCurrentLeader() - call := newMetaCall(lead, m.metas, callFunc) + call := newMetaCall(lead, m.metas, callFunc, m.metaIPAddrs) resp, err := call.Run(ctx) if err == nil { m.setCurrentLeader(int(call.newLead)) + m.setNewMetas(call.metas) + m.setMetaIPAddrs(call.metaIPAddrs) } return resp, err } @@ -131,6 +133,20 @@ func (m *MetaManager) setCurrentLeader(lead int) { m.currentLeader = lead } +func (m *MetaManager) setNewMetas(metas []*metaSession) { + m.mu.Lock() + defer m.mu.Unlock() + + m.metas = metas +} + +func (m *MetaManager) setMetaIPAddrs(metaIPAddrs []string) { + m.mu.Lock() + defer m.mu.Unlock() + + m.metaIPAddrs = metaIPAddrs +} + // Close the sessions. func (m *MetaManager) Close() error { funcs := make([]func() error, len(m.metas)) diff --git a/go-client/session/meta_session_test.go b/go-client/session/meta_session_test.go index d2cbf6cc3d..5014a4680e 100644 --- a/go-client/session/meta_session_test.go +++ b/go-client/session/meta_session_test.go @@ -118,7 +118,7 @@ func TestMetaManager_FirstMetaDead(t *testing.T) { for i := 0; i < 3; i++ { call := newMetaCall(mm.currentLeader, mm.metas, func(rpcCtx context.Context, ms *metaSession) (metaResponse, error) { return ms.queryConfig(rpcCtx, "temp") - }) + }, []string{"0.0.0.0:12345", "0.0.0.0:34603", "0.0.0.0:34602", "0.0.0.0:34601"}) // This a trick for testing. If metaCall issue to other meta, not only to the leader, this nil channel will cause panic. call.backupCh = nil metaResp, err := call.Run(context.Background()) @@ -126,3 +126,19 @@ func TestMetaManager_FirstMetaDead(t *testing.T) { assert.Equal(t, metaResp.GetErr().Errno, base.ERR_OK.String()) } } + +// This case mocks the case that the server primary meta is not in the client metalist. +// And the client will forward to the primary meta automatically. +func TestNodeSession_ForwardToPrimaryMeta(t *testing.T) { + defer leaktest.Check(t)() + + metaList := []string{"0.0.0.0:34601", "0.0.0.0:34602", "0.0.0.0:34603"} + + for i := 0; i < 3; i++ { + mm := NewMetaManager(metaList[i:i+1], NewNodeSession) + defer mm.Close() + resp, err := mm.QueryConfig(context.Background(), "temp") + assert.Nil(t, err) + assert.Equal(t, resp.Err.Errno, base.ERR_OK.String()) + } +} diff --git a/idl/backup.thrift b/idl/backup.thrift index 2fdeded174..a73fa12e12 100644 --- a/idl/backup.thrift +++ b/idl/backup.thrift @@ -77,7 +77,12 @@ struct configuration_modify_backup_policy_request 4:optional i64 new_backup_interval_sec; 5:optional i32 backup_history_count_to_keep; 6:optional bool is_disable; - 7:optional string start_time; // restrict the start time of each backup, hour:minute + + // Restrict the start time of each backup, in the form of 'hh:mm', for example '02:05'. + 7:optional string start_time; + + // Force disable the policy, even if the policy is in during backup. + 8:optional bool force_disable; } struct configuration_modify_backup_policy_response diff --git a/java-client/README.md b/java-client/README.md index 24229cbb83..c03a93541c 100644 --- a/java-client/README.md +++ b/java-client/README.md @@ -24,7 +24,7 @@ under the License. ### 1. Prepare ``` -cd scripts && bash recompile_thrift.sh +cd scripts && bash download_thrift.sh ``` ### 2. Format the code @@ -60,7 +60,7 @@ mvn clean package -Dtest=TestPing ### Install ``` -cd scripts && bash recompile_thrift.sh && cd - +cd scripts && bash download_thrift.sh && cd - mvn clean install -DskipTests ``` @@ -108,7 +108,7 @@ For each type of request(get, set, multiset, etc.), we collect 8 metrics: We use io.dropwizard.metrics library to calculate the request count. -Currently, metrics are integrated with open-falcon(https://open-falcon.org/), +Currently, metrics are integrated with open-falcon(https://github.com/open-falcon), which push counters to local http agent http://127.0.0.1:1988/push/v1. diff --git a/java-client/pom.xml b/java-client/pom.xml index 2a23c3e9a8..14afe22508 100644 --- a/java-client/pom.xml +++ b/java-client/pom.xml @@ -69,6 +69,7 @@ 4.1.85.Final 1.3.7-1 0.11.0 + 0.10.0 1.3.2 3.7.2 none @@ -470,6 +471,36 @@ + + org.apache.thrift + thrift-maven-plugin + ${thrift-maven-plugin.version} + + ${project.basedir}/../idl + + backup.thrift + bulk_load.thrift + dsn.layer2.thrift + duplication.thrift + metadata.thrift + meta_admin.thrift + partition_split.thrift + rrdb.thrift + security.thrift + + thrift + ${project.basedir}/src/main/java + + + + thrift-sources + + compile + + generate-sources + + + ${project.basedir}/src/main/java ${project.basedir}/src/test/java diff --git a/java-client/scripts/recompile_thrift.sh b/java-client/scripts/download_thrift.sh similarity index 73% rename from java-client/scripts/recompile_thrift.sh rename to java-client/scripts/download_thrift.sh index 6efcba8fae..ea5e4f6ae7 100755 --- a/java-client/scripts/recompile_thrift.sh +++ b/java-client/scripts/download_thrift.sh @@ -48,21 +48,4 @@ if ! $thrift -version | grep "0.11.0" ; then exit 1 fi -TMP_DIR=./gen-java -rm -rf $TMP_DIR - -mkdir -p $TMP_DIR -$thrift --gen java ../../idl/backup.thrift -$thrift --gen java ../../idl/bulk_load.thrift -$thrift --gen java ../../idl/dsn.layer2.thrift -$thrift --gen java ../../idl/duplication.thrift -$thrift --gen java ../../idl/metadata.thrift -$thrift --gen java ../../idl/meta_admin.thrift -$thrift --gen java ../../idl/partition_split.thrift -$thrift --gen java ../../idl/rrdb.thrift -$thrift --gen java ../../idl/security.thrift - -cp -v -r $TMP_DIR/* ../src/main/java/ -rm -rf $TMP_DIR - echo "done" diff --git a/java-client/src/main/java/org/apache/pegasus/client/PegasusTable.java b/java-client/src/main/java/org/apache/pegasus/client/PegasusTable.java index ef27a706c9..33a6cfc392 100644 --- a/java-client/src/main/java/org/apache/pegasus/client/PegasusTable.java +++ b/java-client/src/main/java/org/apache/pegasus/client/PegasusTable.java @@ -23,12 +23,7 @@ import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; +import java.util.*; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -89,6 +84,24 @@ public PegasusTable(PegasusClient client, Table table) { this.metaList = client.getMetaList(); } + /** + * Check if the given hash key is valid. + * + * @param hashKey is the hash key to be checked. + * @return the error message once the validation failed; otherwise return an empty Optional. + */ + private Optional validateHashKey(byte[] hashKey) { + if (hashKey == null || hashKey.length == 0) { + return Optional.of("Invalid parameter: hashKey should not be null or empty"); + } + + if (hashKey.length >= 0xFFFF) { + return Optional.of("Invalid parameter: hashKey length should be less than UINT16_MAX"); + } + + return Optional.empty(); + } + @Override public Future asyncExist(byte[] hashKey, byte[] sortKey, int timeout) { final DefaultPromise promise = table.newPromise(); @@ -110,13 +123,10 @@ public void operationComplete(Future future) throws Exception { @Override public Future asyncSortKeyCount(byte[] hashKey, int timeout) { final DefaultPromise promise = table.newPromise(); - if (hashKey == null || hashKey.length == 0) { - promise.setFailure(new PException("Invalid parameter: hashKey should not be null or empty")); - return promise; - } - if (hashKey.length >= 0xFFFF) { - promise.setFailure( - new PException("Invalid parameter: hashKey length should be less than UINT16_MAX")); + + Optional hashKeyValidationError = validateHashKey(hashKey); + if (hashKeyValidationError.isPresent()) { + promise.setFailure(new PException(hashKeyValidationError.get())); return promise; } @@ -246,13 +256,10 @@ private Future asyncMultiGet( boolean noValue, int timeout) { final DefaultPromise promise = table.newPromise(); - if (hashKey == null || hashKey.length == 0) { - promise.setFailure(new PException("Invalid parameter: hashKey should not be null or empty")); - return promise; - } - if (hashKey.length >= 0xFFFF) { - promise.setFailure( - new PException("Invalid parameter: hashKey length should be less than UINT16_MAX")); + + Optional hashKeyValidationError = validateHashKey(hashKey); + if (hashKeyValidationError.isPresent()) { + promise.setFailure(new PException(hashKeyValidationError.get())); return promise; } @@ -260,7 +267,7 @@ private Future asyncMultiGet( List sortKeyBlobs = new ArrayList(); Map setKeyMap = null; - if (sortKeys != null && sortKeys.size() > 0) { + if (sortKeys != null && !sortKeys.isEmpty()) { setKeyMap = new TreeMap(); for (int i = 0; i < sortKeys.size(); i++) { byte[] sortKey = sortKeys.get(i); @@ -365,13 +372,10 @@ public Future asyncMultiGet( int maxFetchSize, int timeout /* ms */) { final DefaultPromise promise = table.newPromise(); - if (hashKey == null || hashKey.length == 0) { - promise.setFailure(new PException("Invalid parameter: hashKey should not be null or empty")); - return promise; - } - if (hashKey.length >= 0xFFFF) { - promise.setFailure( - new PException("Invalid parameter: hashKey length should be less than UINT16_MAX")); + + Optional hashKeyValidationError = validateHashKey(hashKey); + if (hashKeyValidationError.isPresent()) { + promise.setFailure(new PException(hashKeyValidationError.get())); return promise; } @@ -483,13 +487,13 @@ public Future asyncMultiGetSortKeys(byte[] hashKey, int @Override public Future asyncBatchGet(batch_get_request request, int timeout) { final DefaultPromise promise = table.newPromise(); - if (request.keys.isEmpty()) { - promise.setFailure(new PException("Invalid parameter: hashKey should not be null or empty")); + if (request.keys == null || request.keys.isEmpty()) { + promise.setFailure( + new PException("Invalid parameter: full_key list should not be null or empty")); return promise; } for (full_key fullKey : request.keys) { - blob key = fullKey.hash_key; - if (key.data.length >= 0xFFFF) { + if (fullKey.hash_key.data.length >= 0xFFFF) { promise.setFailure( new PException("Invalid parameter: hashKey length should be less than UINT16_MAX")); return promise; @@ -543,16 +547,14 @@ public void onCompletion(client_operator clientOP) { public Future asyncMultiSet( byte[] hashKey, List> values, int ttlSeconds, int timeout) { final DefaultPromise promise = table.newPromise(); - if (hashKey == null || hashKey.length == 0) { - promise.setFailure(new PException("Invalid parameter: hashKey should not be null or empty")); - return promise; - } - if (hashKey.length >= 0xFFFF) { - promise.setFailure( - new PException("Invalid parameter: hashKey length should be less than UINT16_MAX")); + + Optional hashKeyValidationError = validateHashKey(hashKey); + if (hashKeyValidationError.isPresent()) { + promise.setFailure(new PException(hashKeyValidationError.get())); return promise; } - if (values == null || values.size() == 0) { + + if (values == null || values.isEmpty()) { promise.setFailure(new PException("Invalid parameter: values should not be null or empty")); return promise; } @@ -666,15 +668,13 @@ public void onCompletion(client_operator clientOP) { @Override public Future asyncMultiDel(byte[] hashKey, final List sortKeys, int timeout) { final DefaultPromise promise = table.newPromise(); - if (hashKey == null || hashKey.length == 0) { - promise.setFailure(new PException("Invalid parameter: hashKey should not be null or empty")); - return promise; - } - if (hashKey.length >= 0xFFFF) { - promise.setFailure( - new PException("Invalid parameter: hashKey length should be less than UINT16_MAX")); + + Optional hashKeyValidationError = validateHashKey(hashKey); + if (hashKeyValidationError.isPresent()) { + promise.setFailure(new PException(hashKeyValidationError.get())); return promise; } + if (sortKeys == null || sortKeys.isEmpty()) { promise.setFailure(new PException("Invalid parameter: sortKeys size should be at lease 1")); return promise; @@ -781,15 +781,13 @@ public Future asyncCheckAndSet( CheckAndSetOptions options, int timeout) { final DefaultPromise promise = table.newPromise(); - if (hashKey == null || hashKey.length == 0) { - promise.setFailure(new PException("Invalid parameter: hashKey should not be null or empty")); - return promise; - } - if (hashKey.length >= 0xFFFF) { - promise.setFailure( - new PException("Invalid parameter: hashKey length should be less than UINT16_MAX")); + + Optional hashKeyValidationError = validateHashKey(hashKey); + if (hashKeyValidationError.isPresent()) { + promise.setFailure(new PException(hashKeyValidationError.get())); return promise; } + if (options.setValueTTLSeconds < 0) { promise.setFailure(new PException("Invalid parameter: ttlSeconds should be no less than 0")); return promise; @@ -889,17 +887,14 @@ public Future asyncCheckAndMutate( Mutations mutations, CheckAndMutateOptions options, int timeout) { - final DefaultPromise promise = table.newPromise(); - if (hashKey == null || hashKey.length == 0) { - promise.setFailure(new PException("Invalid parameter: hashKey should not be null or empty")); - return promise; - } - if (hashKey.length >= 0xFFFF) { - promise.setFailure( - new PException("Invalid parameter: hashKey length should be less than UINT16_MAX")); + + Optional hashKeyValidationError = validateHashKey(hashKey); + if (hashKeyValidationError.isPresent()) { + promise.setFailure(new PException(hashKeyValidationError.get())); return promise; } + if (mutations == null || mutations.isEmpty()) { promise.setFailure( new PException("Invalid parameter: mutations should not be null or empty")); @@ -992,15 +987,13 @@ public Future asyncCompareExchange( int ttlSeconds, int timeout) { final DefaultPromise promise = table.newPromise(); - if (hashKey == null || hashKey.length == 0) { - promise.setFailure(new PException("Invalid parameter: hashKey should not be null or empty")); - return promise; - } - if (hashKey.length >= 0xFFFF) { - promise.setFailure( - new PException("Invalid parameter: hashKey length should be less than UINT16_MAX")); + + Optional hashKeyValidationError = validateHashKey(hashKey); + if (hashKeyValidationError.isPresent()) { + promise.setFailure(new PException(hashKeyValidationError.get())); return promise; } + if (ttlSeconds < 0) { promise.setFailure(new PException("Invalid parameter: ttlSeconds should be no less than 0")); return promise; @@ -1147,7 +1140,7 @@ public byte[] get(byte[] hashKey, byte[] sortKey, int timeout) throws PException @Override public void batchGet(List> keys, List values, int timeout) throws PException { - if (keys == null || keys.size() == 0) { + if (keys == null || keys.isEmpty()) { throw new PException("Invalid parameter: keys should not be null or empty"); } if (values == null) { @@ -1175,7 +1168,7 @@ public void batchGet(List> keys, List values, int t public int batchGetByPartitions( List> keys, List> results, int timeout) throws PException { - if (keys == null || keys.size() == 0) { + if (keys == null || keys.isEmpty()) { throw new PException("Invalid parameter: keys should not be null or empty"); } if (results == null) { @@ -1265,7 +1258,7 @@ public int batchGetByPartitions( public int batchGet2( List> keys, List> results, int timeout) throws PException { - if (keys == null || keys.size() == 0) { + if (keys == null || keys.isEmpty()) { throw new PException("Invalid parameter: keys should not be null or empty"); } if (results == null) { @@ -1392,7 +1385,7 @@ public MultiGetResult multiGet( public void batchMultiGet( List>> keys, List values, int timeout) throws PException { - if (keys == null || keys.size() == 0) { + if (keys == null || keys.isEmpty()) { throw new PException("Invalid parameter: keys should not be null or empty"); } if (values == null) { @@ -1426,7 +1419,7 @@ public int batchMultiGet2( List> results, int timeout) throws PException { - if (keys == null || keys.size() == 0) { + if (keys == null || keys.isEmpty()) { throw new PException("Invalid parameter: keys should not be null or empty"); } if (results == null) { @@ -1610,7 +1603,7 @@ public void multiSet(byte[] hashKey, List> values, int time @Override public void batchMultiSet(List items, int ttlSeconds, int timeout) throws PException { - if (items == null || items.size() == 0) { + if (items == null || items.isEmpty()) { throw new PException("Invalid parameter: items should not be null or empty"); } if (ttlSeconds < 0) { @@ -1683,7 +1676,7 @@ public void del(byte[] hashKey, byte[] sortKey, int timeout) throws PException { @Override public void batchDel(List> keys, int timeout) throws PException { - if (keys == null || keys.size() == 0) { + if (keys == null || keys.isEmpty()) { throw new PException("Invalid parameter: keys should not be null or empty"); } List> futures = new ArrayList>(); @@ -1850,7 +1843,7 @@ public void delRange( @Override public void batchMultiDel(List>> keys, int timeout) throws PException { - if (keys == null || keys.size() == 0) { + if (keys == null || keys.isEmpty()) { throw new PException("Invalid parameter: keys should not be null or empty"); } List> futures = new ArrayList>(); diff --git a/nodejs-client/src/dsn/dsn_types.js b/nodejs-client/src/dsn/dsn_types.js index 46eb508ffd..4f2ef0a6f0 100644 --- a/nodejs-client/src/dsn/dsn_types.js +++ b/nodejs-client/src/dsn/dsn_types.js @@ -256,6 +256,41 @@ rpc_address.prototype.equals = function(other){ return false; }; +// TODO(yingchun): host_port is now just a place holder and not well implemented, need improve it +var host_port_type = { + HOST_TYPE_INVALID : 0, + HOST_TYPE_IPV4 : 1, + HOST_TYPE_GROUP : 2 +}; + +var host_port = function(args) { + this.host = null; + this.port = 0; + this.type = host_port_type.HOST_TYPE_INVALID; + if(args && args.host){ + this.host = args.host; + } + if(args && args.port){ + this.port = args.port; + } + if(args && args.type){ + this.type = args.type; + } +}; + +host_port.prototype = {}; +host_port.prototype.read = function(input){ + this.host = input.readBinary(); + this.port = input.readI16(); + this.type = input.readByte(); +}; + +host_port.prototype.write = function(output){ + output.writeBinary(this.host); + output.writeI16(this.port); + output.writeByte(this.type); +}; + //value, calculate by app_id and partition index var gpid = function(args) { this.value = 0; @@ -298,6 +333,7 @@ module.exports = { error_code : error_code, task_code : task_code, rpc_address : rpc_address, + host_port : host_port, gpid : gpid, }; diff --git a/pegic/go.mod b/pegic/go.mod index 099f53deb4..c6ae27a6f4 100644 --- a/pegic/go.mod +++ b/pegic/go.mod @@ -43,9 +43,9 @@ require ( github.com/mattn/go-colorable v0.1.8 // indirect github.com/mattn/go-isatty v0.0.12 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.13.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.18.0 // indirect gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect gopkg.in/yaml.v3 v3.0.0 // indirect - k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3 // indirect + k8s.io/apimachinery v0.16.13 // indirect ) diff --git a/pegic/go.sum b/pegic/go.sum index cb59e06972..1291249fe7 100644 --- a/pegic/go.sum +++ b/pegic/go.sum @@ -157,6 +157,7 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -204,8 +205,10 @@ github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96d github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -309,8 +312,8 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191105084925-a882066a44e0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -336,6 +339,7 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -343,8 +347,8 @@ golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -410,6 +414,7 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -419,12 +424,14 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3 h1:FErmbNIJruD5GT2oVEjtPn5Ar5+rcWJsC8/PPUkR0s4= k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/apimachinery v0.16.13 h1:E40YK/NhqhUubG44ZHQULa4Pn+8NnXMAE6awvQ97Pyg= +k8s.io/apimachinery v0.16.13/go.mod h1:4HMHS3mDHtVttspuuhrJ1GGr/0S9B6iWYWZ57KnnZqQ= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/kube-openapi v0.0.0-20200410163147-594e756bea31/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/python-client/pypegasus/base/ttypes.py b/python-client/pypegasus/base/ttypes.py index def909a0d6..6ed2e88dee 100644 --- a/python-client/pypegasus/base/ttypes.py +++ b/python-client/pypegasus/base/ttypes.py @@ -265,13 +265,13 @@ def __init__(self): def is_valid(self): return self.address == 0 - def from_string(self, host_port): - host, port = host_port.split(':') - self.address = socket.ntohl(struct.unpack("I", socket.inet_aton(host))[0]) + def from_string(self, ip_port): + ip, port = ip_port.split(':') + self.address = socket.ntohl(struct.unpack("I", socket.inet_aton(ip))[0]) self.address = (self.address << 32) + (int(port) << 16) + 1 # TODO why + 1? return True - def to_host_port(self): + def to_ip_port(self): s = [] address = self.address port = (address >> 16) & 0xFFFF @@ -305,6 +305,73 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) + +# TODO(yingchun): host_port is now just a place holder and not well implemented, need improve it +class host_port_types(Enum): + kHostTypeInvalid = 0 + kHostTypeIpv4 = 1 + kHostTypeGroup = 2 + + +class host_port: + + thrift_spec = ( + (1, TType.STRING, 'host', None, None, ), # 1 + (2, TType.I16, 'port', None, None, ), # 2 + (3, TType.I08, 'type', None, None, ), # 3 + ) + + def __init__(self): + self.host = "" + self.port = 0 + self.type = host_port_types.kHostTypeInvalid + + def is_valid(self): + return self.type != host_port_types.kHostTypeInvalid + + def from_string(self, host_port_str): + host_and_port = host_port_str.split(':') + if len(host_and_port) != 2: + return False + self.host = host_and_port[0] + self.port = int(host_and_port[1]) + # TODO(yingchun): Maybe it's not true, improve it + self.type = host_port_types.kHostTypeIpv4 + return True + + def to_host_port(self): + if not self.is_valid(): + return None, None + return self.host, self.port + + def read(self, iprot): + self.host = iprot.readString() + self.port = iprot.readI16() + self.type = iprot.readByte() + + def write(self, oprot): + oprot.writeString(self.host) + oprot.writeI16(self.port) + oprot.writeByte(self.type) + + def validate(self): + return + + def __hash__(self): + return hash(self.host) ^ self.port ^ self.type + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.items()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return other.__class__.__name__ == "host_port" and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + + class gpid: thrift_spec = ( diff --git a/python-client/pypegasus/pgclient.py b/python-client/pypegasus/pgclient.py index 457e99c8a1..2438483ba0 100644 --- a/python-client/pypegasus/pgclient.py +++ b/python-client/pypegasus/pgclient.py @@ -249,12 +249,12 @@ def __init__(self, table_name, timeout): def add_meta_server(self, meta_addr): rpc_addr = rpc_address() if rpc_addr.from_string(meta_addr): - host_port_list = meta_addr.split(':') - if not len(host_port_list) == 2: + ip_port = meta_addr.split(':') + if not len(ip_port) == 2: return False - host, port = host_port_list[0], int(host_port_list[1]) - self.addr_list.append((host, port)) + ip, port = ip_port[0], int(ip_port[1]) + self.addr_list.append((ip, port)) return True else: @@ -281,9 +281,9 @@ def got_results(self, res): def query(self): ds = [] - for (host, port) in self.addr_list: + for (ip, port) in self.addr_list: rpc_addr = rpc_address() - rpc_addr.from_string(host + ':' + str(port)) + rpc_addr.from_string(ip + ':' + str(port)) if rpc_addr in self.session_dict: self.session_dict[rpc_addr].close() @@ -294,7 +294,7 @@ def query(self): None, self, self.timeout - ).connectTCP(host, port, self.timeout) + ).connectTCP(ip, port, self.timeout) d.addCallbacks(self.got_conn, self.got_err) d.addCallbacks(self.query_one, self.got_err) ds.append(d) @@ -345,7 +345,7 @@ def update_cfg(self, resp): if rpc_addr in connected_rpc_addrs or rpc_addr.address == 0: continue - host, port = rpc_addr.to_host_port() + ip, port = rpc_addr.to_ip_port() if rpc_addr in self.session_dict: self.session_dict[rpc_addr].close() @@ -356,7 +356,7 @@ def update_cfg(self, resp): None, self.container, self.timeout - ).connectTCP(host, port, self.timeout) + ).connectTCP(ip, port, self.timeout) connected_rpc_addrs[rpc_addr] = 1 d.addCallbacks(self.got_conn, self.got_err) ds.append(d) @@ -642,8 +642,8 @@ def __init__(self, meta_addrs=None, table_name='', self.table = Table(table_name, self, timeout) self.meta_session_manager = MetaSessionManager(table_name, timeout) if isinstance(meta_addrs, list): - for host_port in meta_addrs: - self.meta_session_manager.add_meta_server(host_port) + for meta_addr in meta_addrs: + self.meta_session_manager.add_meta_server(meta_addr) PegasusHash.populate_table() self.timeout_times = 0 self.update_partition = False diff --git a/python-client/requirement.txt b/python-client/requirement.txt index 52e1707316..e785a8c169 100644 --- a/python-client/requirement.txt +++ b/python-client/requirement.txt @@ -1,5 +1,5 @@ Twisted==21.2.0 aenum==3.0.0 thrift==0.13.0 -pyOpenSSL==20.0.1 -cryptography==3.2 +pyOpenSSL==24.2.1 +cryptography==43.0.1 diff --git a/python-client/setup.py b/python-client/setup.py index 9b9d7c21aa..44553ce8a4 100644 --- a/python-client/setup.py +++ b/python-client/setup.py @@ -21,7 +21,7 @@ setup( name='pypegasus3', version=pypegasus.__version__, - install_requires=['Twisted==21.2.0', 'aenum==3.0.0', 'thrift==0.13.0', 'pyOpenSSL==20.0.1','cryptography==3.2'], + install_requires=['Twisted==21.2.0', 'aenum==3.0.0', 'thrift==0.13.0', 'pyOpenSSL==24.2.1','cryptography==43.0.1'], packages=find_packages(), package_data={'': ['logger.conf']}, platforms='any', diff --git a/rfcs/2020-08-27-metric-api.md b/rfcs/2020-08-27-metric-api.md index f09577fc51..2eb8543f96 100644 --- a/rfcs/2020-08-27-metric-api.md +++ b/rfcs/2020-08-27-metric-api.md @@ -25,7 +25,7 @@ This RFC proposes a new metric API in replace of the old perf-counter API. ## Motivation -The perf-counter API has bad naming convention to be parsed and queried over the external monitoring system like [Prometheus](https://prometheus.io/), or [open-falcon](http://open-falcon.org/). +The perf-counter API has bad naming convention to be parsed and queried over the external monitoring system like [Prometheus](https://prometheus.io/), or [open-falcon](https://github.com/open-falcon). Here are some examples of the perf-counter it exposes: diff --git a/run.sh b/run.sh index abfd90d1ad..f01b9c022f 100755 --- a/run.sh +++ b/run.sh @@ -24,7 +24,8 @@ ROOT="$(cd "$(dirname "$0")" && pwd)" export BUILD_ROOT_DIR=${ROOT}/build export BUILD_LATEST_DIR=${BUILD_ROOT_DIR}/latest export REPORT_DIR="$ROOT/test_report" -export THIRDPARTY_ROOT=$ROOT/thirdparty +# It's possible to specify THIRDPARTY_ROOT by setting the environment variable PEGASUS_THIRDPARTY_ROOT. +export THIRDPARTY_ROOT=${PEGASUS_THIRDPARTY_ROOT:-"$ROOT/thirdparty"} ARCH_TYPE='' arch_output=$(arch) if [ "$arch_output"x == "x86_64"x ]; then @@ -113,6 +114,7 @@ function usage_build() echo " --enable_rocksdb_portable build a portable rocksdb binary" echo " --test whether to build test binaries" echo " --iwyu specify the binary path of 'include-what-you-use' when build with IWYU" + echo " --cmake_only whether to run cmake only, default no" } function exit_if_fail() { @@ -130,6 +132,7 @@ function run_build() C_COMPILER="gcc" CXX_COMPILER="g++" BUILD_TYPE="release" + # TODO(yingchun): some boolean variables are using YES/NO, some are using ON/OFF, should be unified. CLEAR=NO CLEAR_THIRDPARTY=NO JOB_NUM=8 @@ -144,6 +147,7 @@ function run_build() BUILD_TEST=OFF IWYU="" BUILD_MODULES="" + CMAKE_ONLY=NO while [[ $# > 0 ]]; do key="$1" case $key in @@ -219,6 +223,9 @@ function run_build() IWYU="$2" shift ;; + --cmake_only) + CMAKE_ONLY=YES + ;; *) echo "ERROR: unknown option \"$key\"" echo @@ -317,8 +324,8 @@ function run_build() if [ ! -f "${ROOT}/src/common/serialization_helper/dsn.layer2_types.h" ]; then echo "Gen thrift" # TODO(yingchun): should be optimized - python3 $ROOT/scripts/compile_thrift.py - sh ${ROOT}/scripts/recompile_thrift.sh + python3 $ROOT/build_tools/compile_thrift.py + sh ${ROOT}/build_tools/recompile_thrift.sh fi if [ ! -d "$BUILD_DIR" ]; then @@ -352,6 +359,11 @@ function run_build() rm -f ${BUILD_LATEST_DIR} ln -s ${BUILD_DIR} ${BUILD_LATEST_DIR} + if [ "$CMAKE_ONLY" == "YES" ]; then + echo "CMake only, exit" + return + fi + echo "[$(date)] Building Pegasus ..." pushd $BUILD_DIR if [ ! -z "${IWYU}" ]; then @@ -644,7 +656,7 @@ function run_start_zk() fi fi - INSTALL_DIR="$INSTALL_DIR" PORT="$PORT" $ROOT/scripts/start_zk.sh + INSTALL_DIR="$INSTALL_DIR" PORT="$PORT" $ROOT/build_tools/start_zk.sh } ##################### @@ -681,7 +693,7 @@ function run_stop_zk() esac shift done - INSTALL_DIR="$INSTALL_DIR" $ROOT/scripts/stop_zk.sh + INSTALL_DIR="$INSTALL_DIR" $ROOT/build_tools/stop_zk.sh } ##################### @@ -718,7 +730,7 @@ function run_clear_zk() esac shift done - INSTALL_DIR="$INSTALL_DIR" $ROOT/scripts/clear_zk.sh + INSTALL_DIR="$INSTALL_DIR" $ROOT/build_tools/clear_zk.sh } ##################### @@ -841,7 +853,7 @@ function run_start_onebox() exit 1 fi - source "${ROOT}"/scripts/config_hdfs.sh + source "${ROOT}"/admin_tools/config_hdfs.sh if [ $USE_PRODUCT_CONFIG == "true" ]; then [ -z "${CONFIG_FILE}" ] && CONFIG_FILE=${ROOT}/src/server/config.ini [ ! -f "${CONFIG_FILE}" ] && { echo "${CONFIG_FILE} is not exist"; exit 1; } @@ -1085,7 +1097,7 @@ function run_start_onebox_instance() esac shift done - source "${ROOT}"/scripts/config_hdfs.sh + source "${ROOT}"/admin_tools/config_hdfs.sh if [ $META_ID = "0" -a $REPLICA_ID = "0" -a $COLLECTOR_ID = "0" ]; then echo "ERROR: no meta_id or replica_id or collector set" exit 1 @@ -1765,7 +1777,9 @@ function run_shell() cd ${ROOT} if [ -f ${ROOT}/bin/pegasus_shell/pegasus_shell ]; then # The pegasus_shell was packaged by pack_tools, to be used on production environment. - ln -s -f ${ROOT}/bin/pegasus_shell/pegasus_shell + if test ! -f ./pegasus_shell; then + ln -s -f ${ROOT}/bin/pegasus_shell/pegasus_shell + fi elif [ -f ${BUILD_LATEST_DIR}/output/bin/pegasus_shell/pegasus_shell ]; then # The pegasus_shell was built locally, to be used for test on development environment. ln -s -f ${BUILD_LATEST_DIR}/output/bin/pegasus_shell/pegasus_shell @@ -1873,9 +1887,9 @@ function run_migrate_node() cd ${ROOT} echo "------------------------------" if [ "$CLUSTER" != "" ]; then - ./scripts/migrate_node.sh $CLUSTER $NODE "$APP" $TYPE + ./admin_tools/migrate_node.sh $CLUSTER $NODE "$APP" $TYPE else - ./scripts/migrate_node.sh $CONFIG $NODE "$APP" $TYPE -f + ./admin_tools/migrate_node.sh $CONFIG $NODE "$APP" $TYPE -f fi echo "------------------------------" echo @@ -1981,9 +1995,9 @@ function run_downgrade_node() cd ${ROOT} echo "------------------------------" if [ "$CLUSTER" != "" ]; then - ./scripts/downgrade_node.sh $CLUSTER $NODE "$APP" $TYPE + ./admin_tools/downgrade_node.sh $CLUSTER $NODE "$APP" $TYPE else - ./scripts/downgrade_node.sh $CONFIG $NODE "$APP" $TYPE -f + ./admin_tools/downgrade_node.sh $CONFIG $NODE "$APP" $TYPE -f fi echo "------------------------------" echo @@ -2091,19 +2105,19 @@ case $cmd in ;; pack_server) shift - PEGASUS_ROOT=$ROOT ./scripts/pack_server.sh $* + PEGASUS_ROOT=$ROOT ./build_tools/pack_server.sh $* ;; pack_client) shift - PEGASUS_ROOT=$ROOT ./scripts/pack_client.sh $* + PEGASUS_ROOT=$ROOT ./build_tools/pack_client.sh $* ;; pack_tools) shift - PEGASUS_ROOT=$ROOT ./scripts/pack_tools.sh $* + PEGASUS_ROOT=$ROOT ./build_tools/pack_tools.sh $* ;; bump_version) shift - ./scripts/bump_version.sh $* + ./build_tools/bump_version.sh $* ;; *) echo "ERROR: unknown command $cmd" diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 1a90098773..55197062e2 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -41,6 +41,7 @@ add_subdirectory(client_lib) add_subdirectory(common) add_subdirectory(failure_detector) add_subdirectory(geo) +add_subdirectory(gutil) add_subdirectory(http) add_subdirectory(meta) add_subdirectory(nfs) @@ -49,12 +50,14 @@ add_subdirectory(ranger) add_subdirectory(redis_protocol) add_subdirectory(remote_cmd) add_subdirectory(replica) +add_subdirectory(rpc) add_subdirectory(runtime) add_subdirectory(sample) add_subdirectory(security) add_subdirectory(server) add_subdirectory(server/test) add_subdirectory(shell) +add_subdirectory(task) add_subdirectory(test_util) add_subdirectory(test/bench_test) add_subdirectory(test/function_test) diff --git a/src/aio/aio_task.cpp b/src/aio/aio_task.cpp index 112cb79eb9..e2026040da 100644 --- a/src/aio/aio_task.cpp +++ b/src/aio/aio_task.cpp @@ -26,10 +26,10 @@ #include "aio/file_io.h" #include "runtime/api_task.h" #include "runtime/service_engine.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_engine.h" -#include "runtime/task/task_spec.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_engine.h" +#include "task/task_spec.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/error_code.h" diff --git a/src/aio/aio_task.h b/src/aio/aio_task.h index f8bf2ca5cf..cef55d7fa2 100644 --- a/src/aio/aio_task.h +++ b/src/aio/aio_task.h @@ -33,8 +33,8 @@ #include #include "runtime/api_task.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "task/task.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" diff --git a/src/aio/disk_engine.cpp b/src/aio/disk_engine.cpp index 1b104be301..ddc669515f 100644 --- a/src/aio/disk_engine.cpp +++ b/src/aio/disk_engine.cpp @@ -34,9 +34,9 @@ #include "aio/aio_provider.h" #include "aio/aio_task.h" #include "native_linux_aio_provider.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "runtime/tool_api.h" #include "utils/error_code.h" #include "utils/factory_store.h" diff --git a/src/aio/file_io.h b/src/aio/file_io.h index 0cef3f1b80..12f1479afe 100644 --- a/src/aio/file_io.h +++ b/src/aio/file_io.h @@ -33,9 +33,9 @@ #include "aio/aio_task.h" #include "runtime/api_task.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/join_point.h" diff --git a/src/aio/native_linux_aio_provider.cpp b/src/aio/native_linux_aio_provider.cpp index 7270dff733..597fddf835 100644 --- a/src/aio/native_linux_aio_provider.cpp +++ b/src/aio/native_linux_aio_provider.cpp @@ -32,7 +32,7 @@ #include "rocksdb/slice.h" #include "rocksdb/status.h" #include "runtime/service_engine.h" -#include "runtime/task/async_calls.h" +#include "task/async_calls.h" #include "utils/env.h" #include "utils/fmt_logging.h" #include "utils/latency_tracer.h" diff --git a/src/aio/test/aio.cpp b/src/aio/test/aio.cpp index 3e3c53d3e3..7dbff2d7d6 100644 --- a/src/aio/test/aio.cpp +++ b/src/aio/test/aio.cpp @@ -38,7 +38,7 @@ #include "aio/aio_task.h" #include "aio/file_io.h" #include "gtest/gtest.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "runtime/tool_api.h" #include "test_util/test_util.h" #include "utils/autoref_ptr.h" @@ -388,17 +388,18 @@ TEST_P(aio_test, dsn_file) uint64_t offset = 0; while (true) { aio_result rin; - aio_task_ptr tin = file::read(fin, - kUnitBuffer, - 1024, - offset, - LPC_AIO_TEST_READ, - nullptr, - [&rin](dsn::error_code err, size_t sz) { - rin.err = err; - rin.sz = sz; - }, - 0); + aio_task_ptr tin = file::read( + fin, + kUnitBuffer, + 1024, + offset, + LPC_AIO_TEST_READ, + nullptr, + [&rin](dsn::error_code err, size_t sz) { + rin.err = err; + rin.sz = sz; + }, + 0); ASSERT_NE(nullptr, tin); if (dsn::tools::get_current_tool()->name() != "simulator") { @@ -420,17 +421,18 @@ TEST_P(aio_test, dsn_file) } aio_result rout; - aio_task_ptr tout = file::write(fout, - kUnitBuffer, - rin.sz, - offset, - LPC_AIO_TEST_WRITE, - nullptr, - [&rout](dsn::error_code err, size_t sz) { - rout.err = err; - rout.sz = sz; - }, - 0); + aio_task_ptr tout = file::write( + fout, + kUnitBuffer, + rin.sz, + offset, + LPC_AIO_TEST_WRITE, + nullptr, + [&rout](dsn::error_code err, size_t sz) { + rout.err = err; + rout.sz = sz; + }, + 0); ASSERT_NE(nullptr, tout); tout->wait(); ASSERT_EQ(ERR_OK, rout.err); diff --git a/src/base/idl_utils.h b/src/base/idl_utils.h index bb04018102..88324929b0 100644 --- a/src/base/idl_utils.h +++ b/src/base/idl_utils.h @@ -23,16 +23,17 @@ #include "rrdb/rrdb_types.h" #include "utils/fmt_utils.h" +#include "gutil/map_util.h" namespace pegasus { inline std::string cas_check_type_to_string(dsn::apps::cas_check_type::type type) { - auto it = dsn::apps::_cas_check_type_VALUES_TO_NAMES.find(type); - if (it == dsn::apps::_cas_check_type_VALUES_TO_NAMES.end()) { - return std::string("INVALID=") + std::to_string(int(type)); + const auto *name = gutil::FindOrNull(dsn::apps::_cas_check_type_VALUES_TO_NAMES, type); + if (dsn_unlikely(name == nullptr)) { + return fmt::format("INVALID={}", type); } - return it->second; + return *name; } inline bool cas_is_check_operand_needed(dsn::apps::cas_check_type::type type) diff --git a/src/base/pegasus_rpc_types.h b/src/base/pegasus_rpc_types.h index 8a680d5b6f..5a34524447 100644 --- a/src/base/pegasus_rpc_types.h +++ b/src/base/pegasus_rpc_types.h @@ -17,7 +17,7 @@ * under the License. */ -#include "runtime/rpc/rpc_holder.h" +#include "rpc/rpc_holder.h" #include "meta_admin_types.h" #include "partition_split_types.h" #include "duplication_types.h" diff --git a/src/base/pegasus_utils.cpp b/src/base/pegasus_utils.cpp index e0859fe3d1..d0cd360342 100644 --- a/src/base/pegasus_utils.cpp +++ b/src/base/pegasus_utils.cpp @@ -19,35 +19,14 @@ #include "pegasus_utils.h" -#include -#include -#include #include -#include #include -#include "runtime/rpc/rpc_address.h" #include "utils/fmt_logging.h" namespace pegasus { namespace utils { -void addr2host(const ::dsn::rpc_address &addr, char *str, int len /* = 100*/) -{ - struct sockaddr_in addr2; - addr2.sin_addr.s_addr = htonl(addr.ip()); - addr2.sin_family = AF_INET; - if (getnameinfo((struct sockaddr *)&addr2, - sizeof(sockaddr), - str, - sizeof(char *) * len, - nullptr, - 0, - NI_NAMEREQD)) { - inet_ntop(AF_INET, &(addr2.sin_addr), str, 100); - } -} - size_t c_escape_string(const char *src, size_t src_len, char *dest, size_t dest_len, bool always_escape) { diff --git a/src/base/pegasus_utils.h b/src/base/pegasus_utils.h index 0df0bee3e4..adb0788884 100644 --- a/src/base/pegasus_utils.h +++ b/src/base/pegasus_utils.h @@ -28,15 +28,11 @@ #include #include +#include #include "utils/flags.h" -#include "absl/strings/string_view.h" DSN_DECLARE_bool(encrypt_data_at_rest); -namespace dsn { -class rpc_address; -} // namespace dsn - namespace pegasus { namespace utils { @@ -45,9 +41,6 @@ const uint32_t epoch_begin = 1451606400; inline uint32_t epoch_now() { return time(nullptr) - epoch_begin; } const static std::string kRedactedString = ""; -// extract "host" from rpc_address -void addr2host(const ::dsn::rpc_address &addr, char *str, int len); - template > class top_n { @@ -129,9 +122,9 @@ const std::string &redact_sensitive_string(const T &src) } } -inline absl::string_view to_string_view(rocksdb::Slice s) { return {s.data(), s.size()}; } +inline std::string_view to_string_view(rocksdb::Slice s) { return {s.data(), s.size()}; } -inline rocksdb::Slice to_rocksdb_slice(absl::string_view s) { return {s.data(), s.size()}; } +inline rocksdb::Slice to_rocksdb_slice(std::string_view s) { return {s.data(), s.size()}; } } // namespace utils } // namespace pegasus diff --git a/src/base/pegasus_value_schema.h b/src/base/pegasus_value_schema.h index d8f651b163..c95ceedbd3 100644 --- a/src/base/pegasus_value_schema.h +++ b/src/base/pegasus_value_schema.h @@ -32,7 +32,7 @@ #include "utils/blob.h" #include "utils/endians.h" #include "utils/fmt_logging.h" -#include "absl/strings/string_view.h" +#include #include "value_field.h" namespace pegasus { @@ -55,7 +55,7 @@ inline uint64_t extract_timestamp_from_timetag(uint64_t timetag) /// Extracts expire_ts from rocksdb value with given version. /// The value schema must be in v0 or v1. /// \return expire_ts in host endian -inline uint32_t pegasus_extract_expire_ts(uint32_t version, absl::string_view value) +inline uint32_t pegasus_extract_expire_ts(uint32_t version, std::string_view value) { CHECK_LE(version, PEGASUS_DATA_VERSION_MAX); return dsn::data_input(value).read_u32(); @@ -76,7 +76,7 @@ pegasus_extract_user_data(uint32_t version, std::string &&raw_value, ::dsn::blob if (version == 1) { input.skip(sizeof(uint64_t)); } - absl::string_view view = input.read_str(); + std::string_view view = input.read_str(); // tricky code to avoid memory copy std::shared_ptr buf(const_cast(view.data()), [s](char *) { delete s; }); @@ -84,7 +84,7 @@ pegasus_extract_user_data(uint32_t version, std::string &&raw_value, ::dsn::blob } /// Extracts timetag from a v1 value. -inline uint64_t pegasus_extract_timetag(int version, absl::string_view value) +inline uint64_t pegasus_extract_timetag(int version, std::string_view value) { CHECK_EQ(version, 1); @@ -118,7 +118,7 @@ inline bool check_if_ts_expired(uint32_t epoch_now, uint32_t expire_ts) /// \return true if expired inline bool check_if_record_expired(uint32_t value_schema_version, uint32_t epoch_now, - absl::string_view raw_value) + std::string_view raw_value) { return check_if_ts_expired(epoch_now, pegasus_extract_expire_ts(value_schema_version, raw_value)); @@ -136,7 +136,7 @@ class pegasus_value_generator /// A higher level utility for generating value with given version. /// The value schema must be in v0 or v1. rocksdb::SliceParts generate_value(uint32_t value_schema_version, - absl::string_view user_data, + std::string_view user_data, uint32_t expire_ts, uint64_t timetag) { @@ -157,7 +157,7 @@ class pegasus_value_generator /// /// rocksdb value (ver 0) = [expire_ts(uint32_t)] [user_data(bytes)] /// \internal - rocksdb::SliceParts generate_value_v0(uint32_t expire_ts, absl::string_view user_data) + rocksdb::SliceParts generate_value_v0(uint32_t expire_ts, std::string_view user_data) { _write_buf.resize(sizeof(uint32_t)); _write_slices.clear(); @@ -210,7 +210,7 @@ class pegasus_value_generator /// /// \internal rocksdb::SliceParts - generate_value_v1(uint32_t expire_ts, uint64_t timetag, absl::string_view user_data) + generate_value_v1(uint32_t expire_ts, uint64_t timetag, std::string_view user_data) { _write_buf.resize(sizeof(uint32_t) + sizeof(uint64_t)); _write_slices.clear(); @@ -258,7 +258,7 @@ class value_schema public: virtual ~value_schema() = default; - virtual std::unique_ptr extract_field(absl::string_view value, + virtual std::unique_ptr extract_field(std::string_view value, value_field_type type) = 0; /// Extracts user value from the raw rocksdb value. /// In order to avoid data copy, the ownership of `raw_value` will be transferred diff --git a/src/base/test/value_manager_test.cpp b/src/base/test/value_manager_test.cpp index 35c1ce488c..f9e8a8dfd0 100644 --- a/src/base/test/value_manager_test.cpp +++ b/src/base/test/value_manager_test.cpp @@ -23,7 +23,7 @@ #include "base/value_schema_manager.h" #include "gtest/gtest.h" #include "pegasus_value_schema.h" -#include "absl/strings/string_view.h" +#include #include "value_field.h" using namespace pegasus; @@ -31,7 +31,7 @@ using namespace pegasus; extern std::string generate_value(value_schema *schema, uint32_t expire_ts, uint64_t time_tag, - absl::string_view user_data); + std::string_view user_data); TEST(value_schema_manager, get_latest_value_schema) { diff --git a/src/base/test/value_schema_test.cpp b/src/base/test/value_schema_test.cpp index 07ecf5f547..aa39bffccd 100644 --- a/src/base/test/value_schema_test.cpp +++ b/src/base/test/value_schema_test.cpp @@ -30,7 +30,7 @@ #include "base/value_schema_manager.h" #include "gtest/gtest.h" #include "utils/blob.h" -#include "absl/strings/string_view.h" +#include #include "value_field.h" using namespace pegasus; @@ -52,7 +52,7 @@ uint64_t extract_time_tag(value_schema *schema, const std::string &raw_value) std::string generate_value(value_schema *schema, uint32_t expire_ts, uint64_t time_tag, - absl::string_view user_data) + std::string_view user_data) { std::string write_buf; std::vector write_slices; @@ -117,7 +117,9 @@ TEST(value_schema, update_expire_ts) uint32_t expire_ts; uint32_t update_expire_ts; } tests[] = { - {0, 1000, 10086}, {1, 1000, 10086}, {2, 1000, 10086}, + {0, 1000, 10086}, + {1, 1000, 10086}, + {2, 1000, 10086}, }; for (const auto &t : tests) { diff --git a/src/base/value_field.h b/src/base/value_field.h index 11e99b8939..f5319a2261 100644 --- a/src/base/value_field.h +++ b/src/base/value_field.h @@ -56,9 +56,9 @@ struct time_tag_field : public value_field struct user_data_field : public value_field { - explicit user_data_field(absl::string_view data) : user_data(data) {} + explicit user_data_field(std::string_view data) : user_data(data) {} value_field_type type() { return value_field_type::USER_DATA; } - absl::string_view user_data; + std::string_view user_data; }; } // namespace pegasus diff --git a/src/base/value_schema_manager.cpp b/src/base/value_schema_manager.cpp index 2ce2f1db66..491230bdfa 100644 --- a/src/base/value_schema_manager.cpp +++ b/src/base/value_schema_manager.cpp @@ -19,7 +19,7 @@ #include "value_schema_manager.h" -#include +#include #include #include @@ -47,7 +47,7 @@ void value_schema_manager::register_schema(std::unique_ptr schema) } value_schema *value_schema_manager::get_value_schema(uint32_t meta_cf_data_version, - absl::string_view value) const + std::string_view value) const { dsn::data_input input(value); uint8_t first_byte = input.read_u8(); diff --git a/src/base/value_schema_manager.h b/src/base/value_schema_manager.h index 5afeef5740..86b834bfd6 100644 --- a/src/base/value_schema_manager.h +++ b/src/base/value_schema_manager.h @@ -25,7 +25,7 @@ #include "pegasus_value_schema.h" #include "utils/singleton.h" -#include "absl/strings/string_view.h" +#include namespace pegasus { @@ -35,7 +35,7 @@ class value_schema_manager : public dsn::utils::singleton void register_schema(std::unique_ptr schema); /// using the raw value in rocksdb and data version stored in meta column family to get data /// version - value_schema *get_value_schema(uint32_t meta_cf_data_version, absl::string_view value) const; + value_schema *get_value_schema(uint32_t meta_cf_data_version, std::string_view value) const; value_schema *get_value_schema(uint32_t version) const; value_schema *get_latest_value_schema() const; diff --git a/src/base/value_schema_v0.cpp b/src/base/value_schema_v0.cpp index b7cc30b5dc..1d9d0433bc 100644 --- a/src/base/value_schema_v0.cpp +++ b/src/base/value_schema_v0.cpp @@ -19,7 +19,7 @@ #include "value_schema_v0.h" -#include +#include #include #include #include @@ -32,7 +32,7 @@ #include "utils/ports.h" namespace pegasus { -std::unique_ptr value_schema_v0::extract_field(absl::string_view value, +std::unique_ptr value_schema_v0::extract_field(std::string_view value, value_field_type type) { std::unique_ptr field = nullptr; @@ -80,14 +80,14 @@ rocksdb::SliceParts value_schema_v0::generate_value(const value_params ¶ms) params.write_slices.clear(); params.write_slices.emplace_back(params.write_buf.data(), params.write_buf.size()); - absl::string_view user_data = data_field->user_data; + std::string_view user_data = data_field->user_data; if (user_data.length() > 0) { params.write_slices.emplace_back(user_data.data(), user_data.length()); } return {¶ms.write_slices[0], static_cast(params.write_slices.size())}; } -std::unique_ptr value_schema_v0::extract_timestamp(absl::string_view value) +std::unique_ptr value_schema_v0::extract_timestamp(std::string_view value) { uint32_t expire_ts = dsn::data_input(value).read_u32(); return std::make_unique(expire_ts); diff --git a/src/base/value_schema_v0.h b/src/base/value_schema_v0.h index 683afe8f92..750136916d 100644 --- a/src/base/value_schema_v0.h +++ b/src/base/value_schema_v0.h @@ -25,7 +25,7 @@ #include "pegasus_value_schema.h" #include "utils/blob.h" -#include "absl/strings/string_view.h" +#include #include "value_field.h" namespace pegasus { @@ -37,7 +37,7 @@ class value_schema_v0 : public value_schema public: value_schema_v0() = default; - std::unique_ptr extract_field(absl::string_view value, + std::unique_ptr extract_field(std::string_view value, value_field_type type) override; dsn::blob extract_user_data(std::string &&value) override; void update_field(std::string &value, std::unique_ptr field) override; @@ -45,7 +45,7 @@ class value_schema_v0 : public value_schema data_version version() const override { return data_version::VERSION_0; } private: - std::unique_ptr extract_timestamp(absl::string_view value); + std::unique_ptr extract_timestamp(std::string_view value); void update_expire_ts(std::string &value, std::unique_ptr field); }; } // namespace pegasus diff --git a/src/base/value_schema_v1.cpp b/src/base/value_schema_v1.cpp index 9e00369176..50ac2ba5bd 100644 --- a/src/base/value_schema_v1.cpp +++ b/src/base/value_schema_v1.cpp @@ -19,7 +19,7 @@ #include "value_schema_v1.h" -#include +#include #include #include #include @@ -32,7 +32,7 @@ #include "utils/ports.h" namespace pegasus { -std::unique_ptr value_schema_v1::extract_field(absl::string_view value, +std::unique_ptr value_schema_v1::extract_field(std::string_view value, value_field_type type) { std::unique_ptr field = nullptr; @@ -88,20 +88,20 @@ rocksdb::SliceParts value_schema_v1::generate_value(const value_params ¶ms) params.write_slices.clear(); params.write_slices.emplace_back(params.write_buf.data(), params.write_buf.size()); - absl::string_view user_data = data_field->user_data; + std::string_view user_data = data_field->user_data; if (user_data.length() > 0) { params.write_slices.emplace_back(user_data.data(), user_data.length()); } return {¶ms.write_slices[0], static_cast(params.write_slices.size())}; } -std::unique_ptr value_schema_v1::extract_timestamp(absl::string_view value) +std::unique_ptr value_schema_v1::extract_timestamp(std::string_view value) { uint32_t expire_ts = dsn::data_input(value).read_u32(); return std::make_unique(expire_ts); } -std::unique_ptr value_schema_v1::extract_time_tag(absl::string_view value) +std::unique_ptr value_schema_v1::extract_time_tag(std::string_view value) { dsn::data_input input(value); input.skip(sizeof(uint32_t)); diff --git a/src/base/value_schema_v1.h b/src/base/value_schema_v1.h index 7811b49f6b..88e8a03373 100644 --- a/src/base/value_schema_v1.h +++ b/src/base/value_schema_v1.h @@ -25,7 +25,7 @@ #include "pegasus_value_schema.h" #include "utils/blob.h" -#include "absl/strings/string_view.h" +#include #include "value_field.h" namespace pegasus { @@ -37,7 +37,7 @@ class value_schema_v1 : public value_schema public: value_schema_v1() = default; - std::unique_ptr extract_field(absl::string_view value, + std::unique_ptr extract_field(std::string_view value, value_field_type type) override; dsn::blob extract_user_data(std::string &&value) override; void update_field(std::string &value, std::unique_ptr field) override; @@ -45,8 +45,8 @@ class value_schema_v1 : public value_schema data_version version() const override { return data_version::VERSION_1; } private: - std::unique_ptr extract_timestamp(absl::string_view value); - std::unique_ptr extract_time_tag(absl::string_view value); + std::unique_ptr extract_timestamp(std::string_view value); + std::unique_ptr extract_time_tag(std::string_view value); void update_expire_ts(std::string &value, std::unique_ptr field); }; diff --git a/src/base/value_schema_v2.cpp b/src/base/value_schema_v2.cpp index 400cad75e7..82f4942a74 100644 --- a/src/base/value_schema_v2.cpp +++ b/src/base/value_schema_v2.cpp @@ -19,7 +19,7 @@ #include "value_schema_v2.h" -#include +#include #include #include #include @@ -33,7 +33,7 @@ namespace pegasus { -std::unique_ptr value_schema_v2::extract_field(absl::string_view value, +std::unique_ptr value_schema_v2::extract_field(std::string_view value, value_field_type type) { std::unique_ptr field = nullptr; @@ -91,21 +91,21 @@ rocksdb::SliceParts value_schema_v2::generate_value(const value_params ¶ms) params.write_slices.clear(); params.write_slices.emplace_back(params.write_buf.data(), params.write_buf.size()); - absl::string_view user_data = data_field->user_data; + std::string_view user_data = data_field->user_data; if (user_data.length() > 0) { params.write_slices.emplace_back(user_data.data(), user_data.length()); } return {¶ms.write_slices[0], static_cast(params.write_slices.size())}; } -std::unique_ptr value_schema_v2::extract_timestamp(absl::string_view value) +std::unique_ptr value_schema_v2::extract_timestamp(std::string_view value) { dsn::data_input input(value); input.skip(sizeof(uint8_t)); return std::make_unique(input.read_u32()); } -std::unique_ptr value_schema_v2::extract_time_tag(absl::string_view value) +std::unique_ptr value_schema_v2::extract_time_tag(std::string_view value) { dsn::data_input input(value); input.skip(sizeof(uint8_t)); diff --git a/src/base/value_schema_v2.h b/src/base/value_schema_v2.h index c3101baf3a..5415c5d648 100644 --- a/src/base/value_schema_v2.h +++ b/src/base/value_schema_v2.h @@ -25,7 +25,7 @@ #include "pegasus_value_schema.h" #include "utils/blob.h" -#include "absl/strings/string_view.h" +#include #include "value_field.h" namespace pegasus { @@ -38,7 +38,7 @@ class value_schema_v2 : public value_schema public: value_schema_v2() = default; - std::unique_ptr extract_field(absl::string_view value, + std::unique_ptr extract_field(std::string_view value, value_field_type type) override; dsn::blob extract_user_data(std::string &&value) override; void update_field(std::string &value, std::unique_ptr field) override; @@ -46,8 +46,8 @@ class value_schema_v2 : public value_schema data_version version() const override { return data_version::VERSION_2; } private: - std::unique_ptr extract_timestamp(absl::string_view value); - std::unique_ptr extract_time_tag(absl::string_view value); + std::unique_ptr extract_timestamp(std::string_view value); + std::unique_ptr extract_time_tag(std::string_view value); void update_expire_ts(std::string &value, std::unique_ptr field); }; } // namespace pegasus diff --git a/src/block_service/block_service.h b/src/block_service/block_service.h index d351dcf44a..aa6b4b26cb 100644 --- a/src/block_service/block_service.h +++ b/src/block_service/block_service.h @@ -19,21 +19,21 @@ #pragma once -#include "runtime/task/task_code.h" -#include "runtime/task/task_tracker.h" +#include "task/task_code.h" +#include "task/task_tracker.h" #include "runtime/api_task.h" #include "runtime/api_layer1.h" #include "runtime/app_model.h" #include "utils/api_utilities.h" #include "utils/error_code.h" #include "utils/threadpool_code.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "common/gpid.h" -#include "runtime/rpc/serialization.h" -#include "runtime/rpc/rpc_stream.h" +#include "rpc/serialization.h" +#include "rpc/rpc_stream.h" #include "runtime/serverlet.h" #include "runtime/service_app.h" -#include "runtime/rpc/rpc_address.h" +#include "rpc/rpc_address.h" #include "common/replication_other_types.h" #include "common/replication.codes.h" #include @@ -422,6 +422,6 @@ class block_file : public dsn::ref_counter protected: std::string _name; }; -} -} -} +} // namespace block_service +} // namespace dist +} // namespace dsn diff --git a/src/block_service/block_service_manager.cpp b/src/block_service/block_service_manager.cpp index f77361cac7..9ca1ed9561 100644 --- a/src/block_service/block_service_manager.cpp +++ b/src/block_service/block_service_manager.cpp @@ -25,8 +25,8 @@ #include "block_service/hdfs/hdfs_service.h" #include "block_service/local/local_service.h" #include "fmt/core.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_tracker.h" +#include "task/task_code.h" +#include "task/task_tracker.h" #include "utils/config_api.h" #include "utils/factory_store.h" #include "utils/filesystem.h" @@ -108,10 +108,11 @@ static create_file_response create_block_file_sync(const std::string &remote_fil task_tracker *tracker) { create_file_response ret; - fs->create_file(create_file_request{remote_file_path, ignore_meta}, - TASK_CODE_EXEC_INLINED, - [&ret](const create_file_response &resp) { ret = resp; }, - tracker); + fs->create_file( + create_file_request{remote_file_path, ignore_meta}, + TASK_CODE_EXEC_INLINED, + [&ret](const create_file_response &resp) { ret = resp; }, + tracker); tracker->wait_outstanding_tasks(); return ret; } @@ -120,10 +121,11 @@ static download_response download_block_file_sync(const std::string &local_file_path, block_file *bf, task_tracker *tracker) { download_response ret; - bf->download(download_request{local_file_path, 0, -1}, - TASK_CODE_EXEC_INLINED, - [&ret](const download_response &resp) { ret = resp; }, - tracker); + bf->download( + download_request{local_file_path, 0, -1}, + TASK_CODE_EXEC_INLINED, + [&ret](const download_response &resp) { ret = resp; }, + tracker); tracker->wait_outstanding_tasks(); return ret; } diff --git a/src/block_service/hdfs/hdfs_service.cpp b/src/block_service/hdfs/hdfs_service.cpp index e303710496..987195314b 100644 --- a/src/block_service/hdfs/hdfs_service.cpp +++ b/src/block_service/hdfs/hdfs_service.cpp @@ -29,8 +29,8 @@ #include "hdfs_service.h" #include "rocksdb/slice.h" #include "rocksdb/status.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" +#include "task/async_calls.h" +#include "task/task.h" #include "utils/TokenBucket.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" @@ -41,6 +41,7 @@ #include "utils/fmt_logging.h" #include "utils/safe_strerror_posix.h" #include "utils/strings.h" +#include "utils/utils.h" DSN_DEFINE_uint64(replication, hdfs_read_batch_size_bytes, @@ -435,8 +436,8 @@ dsn::task_ptr hdfs_file_object::upload(const upload_request &req, } rocksdb::Slice result; - char scratch[file_size]; - s = rfile->Read(file_size, &result, scratch); + auto scratch = dsn::utils::make_shared_array(file_size); + s = rfile->Read(file_size, &result, scratch.get()); if (!s.ok()) { LOG_ERROR( "read local file '{}' failed, err = {}", req.input_local_name, s.ToString()); diff --git a/src/block_service/hdfs/hdfs_service.h b/src/block_service/hdfs/hdfs_service.h index bc3da5b519..b8a7bf7392 100644 --- a/src/block_service/hdfs/hdfs_service.h +++ b/src/block_service/hdfs/hdfs_service.h @@ -26,8 +26,8 @@ #include #include "block_service/block_service.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "task/task.h" +#include "task/task_code.h" #include "utils/TokenBucket.h" #include "utils/error_code.h" diff --git a/src/block_service/local/local_service.cpp b/src/block_service/local/local_service.cpp index f0dc35f141..d7aa4e9ca9 100644 --- a/src/block_service/local/local_service.cpp +++ b/src/block_service/local/local_service.cpp @@ -21,12 +21,12 @@ #include #include -#include "absl/strings/string_view.h" +#include #include "local_service.h" #include "nlohmann/json.hpp" #include "rocksdb/slice.h" #include "rocksdb/status.h" -#include "runtime/task/async_calls.h" +#include "task/async_calls.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/env.h" @@ -279,7 +279,7 @@ dsn::task_ptr local_file_object::write(const write_request &req, write_future_ptr tsk(new write_future(code, cb, 0)); tsk->set_tracker(tracker); - FAIL_POINT_INJECT_F("mock_local_service_write_failed", [=](absl::string_view) { + FAIL_POINT_INJECT_F("mock_local_service_write_failed", [=](std::string_view) { auto write_failed = [=]() { write_response resp; resp.err = ERR_FS_INTERNAL; diff --git a/src/block_service/local/local_service.h b/src/block_service/local/local_service.h index c67b9913e3..a505bea4e9 100644 --- a/src/block_service/local/local_service.h +++ b/src/block_service/local/local_service.h @@ -25,8 +25,8 @@ #include #include "block_service/block_service.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "task/task.h" +#include "task/task_code.h" #include "utils/error_code.h" namespace dsn { @@ -113,6 +113,6 @@ class local_file_object : public block_file std::string _md5_value; bool _has_meta_synced; }; -} -} -} +} // namespace block_service +} // namespace dist +} // namespace dsn diff --git a/src/block_service/test/hdfs_service_test.cpp b/src/block_service/test/hdfs_service_test.cpp index f96549ec98..66333c5f9d 100644 --- a/src/block_service/test/hdfs_service_test.cpp +++ b/src/block_service/test/hdfs_service_test.cpp @@ -32,10 +32,10 @@ #include "block_service/hdfs/hdfs_service.h" #include "gtest/gtest.h" #include "runtime/api_layer1.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_tracker.h" +#include "task/async_calls.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_tracker.h" #include "test_util/test_util.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" @@ -144,20 +144,22 @@ TEST_P(HDFSClientTest, test_hdfs_read_write) // 1. clean up all old file in remote test directory. printf("clean up all old files.\n"); remove_path_response rem_resp; - s->remove_path(remove_path_request{kRemoteTestPath, true}, - LPC_TEST_HDFS, - [&rem_resp](const remove_path_response &resp) { rem_resp = resp; }, - nullptr) + s->remove_path( + remove_path_request{kRemoteTestPath, true}, + LPC_TEST_HDFS, + [&rem_resp](const remove_path_response &resp) { rem_resp = resp; }, + nullptr) ->wait(); ASSERT_TRUE(dsn::ERR_OK == rem_resp.err || dsn::ERR_OBJECT_NOT_FOUND == rem_resp.err); // 2. create file. printf("test write operation.\n"); create_file_response cf_resp; - s->create_file(create_file_request{kRemoteTestRWFile, false}, - LPC_TEST_HDFS, - [&cf_resp](const create_file_response &r) { cf_resp = r; }, - nullptr) + s->create_file( + create_file_request{kRemoteTestRWFile, false}, + LPC_TEST_HDFS, + [&cf_resp](const create_file_response &r) { cf_resp = r; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, cf_resp.err); @@ -165,10 +167,11 @@ TEST_P(HDFSClientTest, test_hdfs_read_write) dsn::blob bb(kTestBuffer.c_str(), 0, kTestBufferLength); write_response w_resp; cf_resp.file_handle - ->write(write_request{bb}, - LPC_TEST_HDFS, - [&w_resp](const write_response &w) { w_resp = w; }, - nullptr) + ->write( + write_request{bb}, + LPC_TEST_HDFS, + [&w_resp](const write_response &w) { w_resp = w; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, w_resp.err); ASSERT_EQ(kTestBufferLength, w_resp.written_size); @@ -178,10 +181,11 @@ TEST_P(HDFSClientTest, test_hdfs_read_write) printf("test read just written contents.\n"); read_response r_resp; cf_resp.file_handle - ->read(read_request{0, -1}, - LPC_TEST_HDFS, - [&r_resp](const read_response &r) { r_resp = r; }, - nullptr) + ->read( + read_request{0, -1}, + LPC_TEST_HDFS, + [&r_resp](const read_response &r) { r_resp = r; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, r_resp.err); ASSERT_EQ(kTestBufferLength, r_resp.buffer.length()); @@ -191,10 +195,11 @@ TEST_P(HDFSClientTest, test_hdfs_read_write) const uint64_t kOffset = 5; const int64_t kSize = 10; cf_resp.file_handle - ->read(read_request{kOffset, kSize}, - LPC_TEST_HDFS, - [&r_resp](const read_response &r) { r_resp = r; }, - nullptr) + ->read( + read_request{kOffset, kSize}, + LPC_TEST_HDFS, + [&r_resp](const read_response &r) { r_resp = r; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, r_resp.err); ASSERT_EQ(kSize, r_resp.buffer.length()); @@ -225,40 +230,44 @@ TEST_P(HDFSClientTest, test_upload_and_download) // 1. clean up all old file in remote test directory. printf("clean up all old files.\n"); remove_path_response rem_resp; - s->remove_path(remove_path_request{kRemoteTestPath, true}, - LPC_TEST_HDFS, - [&rem_resp](const remove_path_response &resp) { rem_resp = resp; }, - nullptr) + s->remove_path( + remove_path_request{kRemoteTestPath, true}, + LPC_TEST_HDFS, + [&rem_resp](const remove_path_response &resp) { rem_resp = resp; }, + nullptr) ->wait(); ASSERT_TRUE(dsn::ERR_OK == rem_resp.err || dsn::ERR_OBJECT_NOT_FOUND == rem_resp.err); // 2. create file. fmt::printf("create and upload: {}.\n", kRemoteTestFile); create_file_response cf_resp; - s->create_file(create_file_request{kRemoteTestFile, true}, - LPC_TEST_HDFS, - [&cf_resp](const create_file_response &r) { cf_resp = r; }, - nullptr) + s->create_file( + create_file_request{kRemoteTestFile, true}, + LPC_TEST_HDFS, + [&cf_resp](const create_file_response &r) { cf_resp = r; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, cf_resp.err); // 3. upload file. upload_response u_resp; cf_resp.file_handle - ->upload(upload_request{kLocalFile}, - LPC_TEST_HDFS, - [&u_resp](const upload_response &r) { u_resp = r; }, - nullptr) + ->upload( + upload_request{kLocalFile}, + LPC_TEST_HDFS, + [&u_resp](const upload_response &r) { u_resp = r; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, u_resp.err); ASSERT_EQ(local_file_size, cf_resp.file_handle->get_size()); // 4. list directory. ls_response l_resp; - s->list_dir(ls_request{kRemoteTestPath}, - LPC_TEST_HDFS, - [&l_resp](const ls_response &resp) { l_resp = resp; }, - nullptr) + s->list_dir( + ls_request{kRemoteTestPath}, + LPC_TEST_HDFS, + [&l_resp](const ls_response &resp) { l_resp = resp; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, l_resp.err); ASSERT_EQ(1, l_resp.entries->size()); @@ -268,19 +277,21 @@ TEST_P(HDFSClientTest, test_upload_and_download) // 5. download file. download_response d_resp; fmt::printf("test download {}.\n", kRemoteTestFile); - s->create_file(create_file_request{kRemoteTestFile, false}, - LPC_TEST_HDFS, - [&cf_resp](const create_file_response &resp) { cf_resp = resp; }, - nullptr) + s->create_file( + create_file_request{kRemoteTestFile, false}, + LPC_TEST_HDFS, + [&cf_resp](const create_file_response &resp) { cf_resp = resp; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, cf_resp.err); ASSERT_EQ(local_file_size, cf_resp.file_handle->get_size()); std::string kLocalDownloadFile = "test_file_d"; cf_resp.file_handle - ->download(download_request{kLocalDownloadFile, 0, -1}, - LPC_TEST_HDFS, - [&d_resp](const download_response &resp) { d_resp = resp; }, - nullptr) + ->download( + download_request{kLocalDownloadFile, 0, -1}, + LPC_TEST_HDFS, + [&d_resp](const download_response &resp) { d_resp = resp; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, d_resp.err); ASSERT_EQ(local_file_size, d_resp.downloaded_size); @@ -342,10 +353,11 @@ TEST_P(HDFSClientTest, test_concurrent_upload_download) printf("clean up all old files.\n"); remove_path_response rem_resp; - s->remove_path(remove_path_request{"hdfs_concurrent_test", true}, - LPC_TEST_HDFS, - [&rem_resp](const remove_path_response &resp) { rem_resp = resp; }, - nullptr) + s->remove_path( + remove_path_request{"hdfs_concurrent_test", true}, + LPC_TEST_HDFS, + [&rem_resp](const remove_path_response &resp) { rem_resp = resp; }, + nullptr) ->wait(); ASSERT_TRUE(dsn::ERR_OK == rem_resp.err || dsn::ERR_OBJECT_NOT_FOUND == rem_resp.err); @@ -354,10 +366,11 @@ TEST_P(HDFSClientTest, test_concurrent_upload_download) std::vector block_files; for (int i = 0; i < total_files; ++i) { create_file_response cf_resp; - s->create_file(create_file_request{remote_file_names[i], true}, - LPC_TEST_HDFS, - [&cf_resp](const create_file_response &resp) { cf_resp = resp; }, - nullptr) + s->create_file( + create_file_request{remote_file_names[i], true}, + LPC_TEST_HDFS, + [&cf_resp](const create_file_response &resp) { cf_resp = resp; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, cf_resp.err); ASSERT_NE(nullptr, cf_resp.file_handle.get()); @@ -389,10 +402,11 @@ TEST_P(HDFSClientTest, test_concurrent_upload_download) std::vector block_files; for (int i = 0; i < total_files; ++i) { create_file_response cf_resp; - s->create_file(create_file_request{remote_file_names[i], true}, - LPC_TEST_HDFS, - [&cf_resp](const create_file_response &r) { cf_resp = r; }, - nullptr) + s->create_file( + create_file_request{remote_file_names[i], true}, + LPC_TEST_HDFS, + [&cf_resp](const create_file_response &r) { cf_resp = r; }, + nullptr) ->wait(); ASSERT_EQ(dsn::ERR_OK, cf_resp.err); ASSERT_NE(nullptr, cf_resp.file_handle.get()); diff --git a/src/client/partition_resolver.cpp b/src/client/partition_resolver.cpp index 96d505e924..6267180fb5 100644 --- a/src/client/partition_resolver.cpp +++ b/src/client/partition_resolver.cpp @@ -31,8 +31,8 @@ #include "partition_resolver_manager.h" #include "runtime/api_layer1.h" #include "runtime/api_task.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/task/task_spec.h" +#include "rpc/dns_resolver.h" +#include "task/task_spec.h" #include "utils/fmt_logging.h" #include "utils/threadpool_code.h" @@ -67,9 +67,8 @@ void partition_resolver::call_task(const rpc_response_task_ptr &t) rpc_response_handler old_callback; t->fetch_current_handler(old_callback); - auto new_callback = [ this, deadline_ms, oc = std::move(old_callback) ]( - dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + auto new_callback = [this, deadline_ms, oc = std::move(old_callback)]( + dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (req->header->gpid.value() != 0 && err != ERR_OK && error_retry(err)) { on_access_failure(req->header->gpid.get_partition_index(), err); // still got time, retry @@ -92,11 +91,12 @@ void partition_resolver::call_task(const rpc_response_task_ptr &t) enum_to_string(ctask->state())); // sleep gap milliseconds before retry - tasking::enqueue(LPC_RPC_DELAY_CALL, - nullptr, - [r, ctask]() { r->call_task(ctask); }, - 0, - std::chrono::milliseconds(gap)); + tasking::enqueue( + LPC_RPC_DELAY_CALL, + nullptr, + [r, ctask]() { r->call_task(ctask); }, + 0, + std::chrono::milliseconds(gap)); return; } else { LOG_ERROR("service access failed ({}), no more time for further tries, set error " @@ -112,27 +112,27 @@ void partition_resolver::call_task(const rpc_response_task_ptr &t) }; t->replace_callback(std::move(new_callback)); - resolve(hdr.client.partition_hash, - [t](resolve_result &&result) mutable { - if (result.err != ERR_OK) { - t->enqueue(result.err, nullptr); - return; - } + resolve( + hdr.client.partition_hash, + [t](resolve_result &&result) mutable { + if (result.err != ERR_OK) { + t->enqueue(result.err, nullptr); + return; + } - // update gpid when necessary - auto &hdr = *(t->get_request()->header); - if (hdr.gpid.value() != result.pid.value()) { - if (hdr.client.thread_hash == 0 // thread_hash is not assigned by applications - || - hdr.gpid.value() != 0 // requests set to child redirect to parent - ) { - hdr.client.thread_hash = result.pid.thread_hash(); - } - hdr.gpid = result.pid; + // update gpid when necessary + auto &hdr = *(t->get_request()->header); + if (hdr.gpid.value() != result.pid.value()) { + if (hdr.client.thread_hash == 0 // thread_hash is not assigned by applications + || hdr.gpid.value() != 0 // requests set to child redirect to parent + ) { + hdr.client.thread_hash = result.pid.thread_hash(); } - dsn_rpc_call(dns_resolver::instance().resolve_address(result.hp), t.get()); - }, - hdr.client.timeout_ms); + hdr.gpid = result.pid; + } + dsn_rpc_call(dns_resolver::instance().resolve_address(result.hp), t.get()); + }, + hdr.client.timeout_ms); } } // namespace replication } // namespace dsn diff --git a/src/client/partition_resolver.h b/src/client/partition_resolver.h index d4105a26bc..d1aa4f0cf2 100644 --- a/src/client/partition_resolver.h +++ b/src/client/partition_resolver.h @@ -34,12 +34,12 @@ #include #include "common/gpid.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" +#include "task/async_calls.h" +#include "task/task.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" diff --git a/src/client/partition_resolver_manager.cpp b/src/client/partition_resolver_manager.cpp index 0ec0a7aa32..9fccb30456 100644 --- a/src/client/partition_resolver_manager.cpp +++ b/src/client/partition_resolver_manager.cpp @@ -29,8 +29,8 @@ #include "client/partition_resolver.h" #include "partition_resolver_manager.h" #include "partition_resolver_simple.h" -#include "runtime/rpc/group_host_port.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/group_host_port.h" +#include "rpc/rpc_host_port.h" #include "utils/autoref_ptr.h" #include "utils/fmt_logging.h" #include "utils/utils.h" diff --git a/src/client/partition_resolver_simple.cpp b/src/client/partition_resolver_simple.cpp index d577425633..db1d8e543d 100644 --- a/src/client/partition_resolver_simple.cpp +++ b/src/client/partition_resolver_simple.cpp @@ -34,13 +34,13 @@ #include "common/gpid.h" #include "dsn.layer2_types.h" #include "partition_resolver_simple.h" +#include "rpc/dns_resolver.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "task/async_calls.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/fmt_logging.h" #include "utils/ports.h" #include "utils/rand.h" @@ -187,11 +187,12 @@ void partition_resolver_simple::call(request_context_ptr &&request, bool from_me // delay 1 second for further config query if (from_meta_ack) { - tasking::enqueue(LPC_REPLICATION_DELAY_QUERY_CONFIG, - &_tracker, - [ =, req2 = request ]() mutable { call(std::move(req2), false); }, - 0, - std::chrono::seconds(1)); + tasking::enqueue( + LPC_REPLICATION_DELAY_QUERY_CONFIG, + &_tracker, + [=, req2 = request]() mutable { call(std::move(req2), false); }, + 0, + std::chrono::seconds(1)); return; } @@ -206,12 +207,12 @@ void partition_resolver_simple::call(request_context_ptr &&request, bool from_me { zauto_lock l(request->lock); if (request->timeout_timer == nullptr) { - request->timeout_timer = - tasking::enqueue(LPC_REPLICATION_CLIENT_REQUEST_TIMEOUT, - &_tracker, - [ =, req2 = request ]() mutable { on_timeout(std::move(req2)); }, - 0, - std::chrono::milliseconds(timeout_ms)); + request->timeout_timer = tasking::enqueue( + LPC_REPLICATION_CLIENT_REQUEST_TIMEOUT, + &_tracker, + [=, req2 = request]() mutable { on_timeout(std::move(req2)); }, + 0, + std::chrono::milliseconds(timeout_ms)); } } @@ -301,26 +302,24 @@ void partition_resolver_simple::query_config_reply(error_code err, _app_partition_count = resp.partition_count; _app_is_stateful = resp.is_stateful; - for (auto it = resp.partitions.begin(); it != resp.partitions.end(); ++it) { - auto &new_config = *it; - + for (const auto &new_pc : resp.partitions) { LOG_DEBUG_PREFIX("query config reply, gpid = {}, ballot = {}, primary = {}", - new_config.pid, - new_config.ballot, - FMT_HOST_PORT_AND_IP(new_config, primary)); + new_pc.pid, + new_pc.ballot, + FMT_HOST_PORT_AND_IP(new_pc, primary)); - auto it2 = _config_cache.find(new_config.pid.get_partition_index()); + auto it2 = _config_cache.find(new_pc.pid.get_partition_index()); if (it2 == _config_cache.end()) { - std::unique_ptr pi(new partition_info); + auto pi = std::make_unique(); pi->timeout_count = 0; - pi->config = new_config; - _config_cache.emplace(new_config.pid.get_partition_index(), std::move(pi)); - } else if (_app_is_stateful && it2->second->config.ballot < new_config.ballot) { + pi->pc = new_pc; + _config_cache.emplace(new_pc.pid.get_partition_index(), std::move(pi)); + } else if (_app_is_stateful && it2->second->pc.ballot < new_pc.ballot) { it2->second->timeout_count = 0; - it2->second->config = new_config; + it2->second->pc = new_pc; } else if (!_app_is_stateful) { it2->second->timeout_count = 0; - it2->second->config = new_config; + it2->second->pc = new_pc; } else { // nothing to do } @@ -412,32 +411,30 @@ void partition_resolver_simple::handle_pending_requests(std::dequesecond->config; - if (it->second->config.ballot < 0) { + if (it->second->pc.ballot < 0) { // client query config for splitting app, child partition is not ready return ERR_CHILD_NOT_READY; } - hp = get_host_port(it->second->config); + hp = get_host_port(it->second->pc); if (!hp) { return ERR_IO_PENDING; } else { diff --git a/src/client/partition_resolver_simple.h b/src/client/partition_resolver_simple.h index 41ec74e791..9038e5b418 100644 --- a/src/client/partition_resolver_simple.h +++ b/src/client/partition_resolver_simple.h @@ -34,9 +34,9 @@ #include "client/partition_resolver.h" #include "common/serialization_helper/dsn.layer2_types.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task.h" -#include "runtime/task/task_tracker.h" +#include "rpc/rpc_host_port.h" +#include "task/task.h" +#include "task/task_tracker.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/zlocks.h" @@ -65,7 +65,7 @@ class partition_resolver_simple : public partition_resolver struct partition_info { int timeout_count; - ::dsn::partition_configuration config; + ::dsn::partition_configuration pc; }; mutable dsn::zrwlock_nr _config_lock; std::unordered_map> _config_cache; @@ -107,7 +107,7 @@ class partition_resolver_simple : public partition_resolver private: // local routines - host_port get_host_port(const partition_configuration &config) const; + host_port get_host_port(const partition_configuration &pc) const; error_code get_host_port(int partition_index, /*out*/ host_port &hp); void handle_pending_requests(std::deque &reqs, error_code err); void clear_all_pending_requests(); diff --git a/src/client/replication_ddl_client.cpp b/src/client/replication_ddl_client.cpp index a71241362e..3a0fc3a9c5 100644 --- a/src/client/replication_ddl_client.cpp +++ b/src/client/replication_ddl_client.cpp @@ -38,6 +38,7 @@ #include "backup_types.h" #include "common//duplication_common.h" +#include "common/backup_common.h" #include "common/bulk_load_common.h" #include "common/gpid.h" #include "common/manual_compact.h" @@ -48,9 +49,9 @@ #include "fmt/core.h" #include "fmt/format.h" #include "meta/meta_rpc_types.h" +#include "rpc/group_host_port.h" +#include "rpc/rpc_address.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/group_host_port.h" -#include "runtime/rpc/rpc_address.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" #include "utils/output_utils.h" @@ -92,7 +93,7 @@ replication_ddl_client::replication_ddl_client(const std::vector _meta_server.assign_group("meta-servers"); for (const auto &m : meta_servers) { if (!_meta_server.group_host_port()->add(m)) { - LOG_WARNING("duplicate adress {}", m); + LOG_WARNING("duplicate address {}", m); } } } @@ -165,7 +166,7 @@ dsn::error_code replication_ddl_client::wait_app_ready(const std::string &app_na CHECK_EQ(partition_count, query_resp.partition_count); int ready_count = 0; for (int i = 0; i < partition_count; i++) { - const partition_configuration &pc = query_resp.partitions[i]; + const auto &pc = query_resp.partitions[i]; if (pc.hp_primary && (pc.hp_secondaries.size() + 1 >= max_replica_count)) { ready_count++; } @@ -421,8 +422,8 @@ dsn::error_code replication_ddl_client::list_apps(const dsn::app_status::type st } int32_t app_id; int32_t partition_count; - std::vector partitions; - r = list_app(info.app_name, app_id, partition_count, partitions); + std::vector pcs; + r = list_app(info.app_name, app_id, partition_count, pcs); if (r != dsn::ERR_OK) { LOG_ERROR("list app({}) failed, err = {}", info.app_name, r); return r; @@ -432,18 +433,18 @@ dsn::error_code replication_ddl_client::list_apps(const dsn::app_status::type st int fully_healthy = 0; int write_unhealthy = 0; int read_unhealthy = 0; - for (int i = 0; i < partitions.size(); i++) { - const dsn::partition_configuration &p = partitions[i]; + for (const auto &pc : pcs) { int replica_count = 0; - if (p.hp_primary) { + if (pc.hp_primary) { replica_count++; } - replica_count += p.hp_secondaries.size(); - if (p.hp_primary) { - if (replica_count >= p.max_replica_count) + replica_count += pc.hp_secondaries.size(); + if (pc.hp_primary) { + if (replica_count >= pc.max_replica_count) { fully_healthy++; - else if (replica_count < 2) + } else if (replica_count < 2) { write_unhealthy++; + } } else { write_unhealthy++; read_unhealthy++; @@ -546,7 +547,7 @@ dsn::error_code replication_ddl_client::list_nodes(const dsn::replication::node_ std::map tmp_map; int alive_node_count = 0; - for (const auto & [ hp, type ] : nodes) { + for (const auto &[hp, type] : nodes) { if (type == dsn::replication::node_status::NS_ALIVE) { alive_node_count++; } @@ -565,22 +566,21 @@ dsn::error_code replication_ddl_client::list_nodes(const dsn::replication::node_ for (auto &app : apps) { int32_t app_id; int32_t partition_count; - std::vector partitions; - r = list_app(app.app_name, app_id, partition_count, partitions); + std::vector pcs; + r = list_app(app.app_name, app_id, partition_count, pcs); if (r != dsn::ERR_OK) { return r; } - for (int i = 0; i < partitions.size(); i++) { - const dsn::partition_configuration &p = partitions[i]; - if (p.hp_primary) { - auto find = tmp_map.find(p.hp_primary); + for (const auto &pc : pcs) { + if (pc.hp_primary) { + auto find = tmp_map.find(pc.hp_primary); if (find != tmp_map.end()) { find->second.primary_count++; } } - for (int j = 0; j < p.hp_secondaries.size(); j++) { - auto find = tmp_map.find(p.hp_secondaries[j]); + for (const auto &secondary : pc.hp_secondaries) { + auto find = tmp_map.find(secondary); if (find != tmp_map.end()) { find->second.secondary_count++; } @@ -722,13 +722,13 @@ dsn::error_code replication_ddl_client::list_app(const std::string &app_name, int32_t app_id = 0; int32_t partition_count = 0; int32_t max_replica_count = 0; - std::vector partitions; - dsn::error_code err = list_app(app_name, app_id, partition_count, partitions); + std::vector pcs; + dsn::error_code err = list_app(app_name, app_id, partition_count, pcs); if (err != dsn::ERR_OK) { return err; } - if (!partitions.empty()) { - max_replica_count = partitions[0].max_replica_count; + if (!pcs.empty()) { + max_replica_count = pcs[0].max_replica_count; } // print query_cfg_response @@ -764,41 +764,33 @@ dsn::error_code replication_ddl_client::list_app(const std::string &app_name, int fully_healthy = 0; int write_unhealthy = 0; int read_unhealthy = 0; - for (const auto &p : partitions) { + for (const auto &pc : pcs) { int replica_count = 0; - if (p.hp_primary) { + if (pc.hp_primary) { replica_count++; - node_stat[p.hp_primary].first++; + node_stat[pc.hp_primary].first++; total_prim_count++; } - replica_count += p.hp_secondaries.size(); - total_sec_count += p.hp_secondaries.size(); - if (p.hp_primary) { - if (replica_count >= p.max_replica_count) + replica_count += pc.hp_secondaries.size(); + total_sec_count += pc.hp_secondaries.size(); + if (pc.hp_primary) { + if (replica_count >= pc.max_replica_count) { fully_healthy++; - else if (replica_count < 2) + } else if (replica_count < 2) { write_unhealthy++; + } } else { write_unhealthy++; read_unhealthy++; } - tp_details.add_row(p.pid.get_partition_index()); - tp_details.append_data(p.ballot); - std::stringstream oss; - oss << replica_count << "/" << p.max_replica_count; - tp_details.append_data(oss.str()); - tp_details.append_data(p.hp_primary ? p.hp_primary.to_string() : "-"); - oss.str(""); - oss << "["; - // TODO (yingchun) join - for (int j = 0; j < p.hp_secondaries.size(); j++) { - if (j != 0) - oss << ","; - oss << p.hp_secondaries[j]; - node_stat[p.hp_secondaries[j]].second++; + for (const auto &secondary : pc.hp_secondaries) { + node_stat[secondary].second++; } - oss << "]"; - tp_details.append_data(oss.str()); + tp_details.add_row(pc.pid.get_partition_index()); + tp_details.append_data(pc.ballot); + tp_details.append_data(fmt::format("{}/{}", replica_count, pc.max_replica_count)); + tp_details.append_data(pc.hp_primary ? pc.hp_primary.to_string() : "-"); + tp_details.append_data(fmt::format("[{}]", fmt::join(pc.hp_secondaries, ","))); } mtp.add(std::move(tp_details)); @@ -808,7 +800,7 @@ dsn::error_code replication_ddl_client::list_app(const std::string &app_name, tp_nodes.add_column("primary"); tp_nodes.add_column("secondary"); tp_nodes.add_column("total"); - for (const auto & [ hp, pri_and_sec_rep_cnts ] : node_stat) { + for (const auto &[hp, pri_and_sec_rep_cnts] : node_stat) { tp_nodes.add_row(node_name(hp, resolve_ip)); tp_nodes.append_data(pri_and_sec_rep_cnts.first); tp_nodes.append_data(pri_and_sec_rep_cnts.second); @@ -836,7 +828,7 @@ dsn::error_code replication_ddl_client::list_app(const std::string &app_name, dsn::error_code replication_ddl_client::list_app(const std::string &app_name, int32_t &app_id, int32_t &partition_count, - std::vector &partitions) + std::vector &pcs) { RETURN_EC_NOT_OK_MSG(validate_app_name(app_name), "invalid app_name: '{}'", app_name); @@ -858,7 +850,7 @@ dsn::error_code replication_ddl_client::list_app(const std::string &app_name, app_id = resp.app_id; partition_count = resp.partition_count; - partitions = resp.partitions; + pcs = resp.partitions; return dsn::ERR_OK; } @@ -1078,11 +1070,13 @@ error_with replication_ddl_client::query_backup(in return call_rpc_sync(query_backup_status_rpc(std::move(req), RPC_CM_QUERY_BACKUP_STATUS)); } -dsn::error_code replication_ddl_client::disable_backup_policy(const std::string &policy_name) +dsn::error_code replication_ddl_client::disable_backup_policy(const std::string &policy_name, + bool force) { auto req = std::make_shared(); req->policy_name = policy_name; req->__set_is_disable(true); + req->__set_force_disable(force); auto resp_task = request_meta(RPC_CM_MODIFY_BACKUP_POLICY, req); @@ -1139,20 +1133,19 @@ dsn::error_code replication_ddl_client::enable_backup_policy(const std::string & } } -static void print_policy_entry(const policy_entry &entry) +static dsn::utils::table_printer print_policy_entry(const policy_entry &entry) { - dsn::utils::table_printer tp; - tp.add_row_name_and_data(" name", entry.policy_name); - tp.add_row_name_and_data(" backup_provider_type", entry.backup_provider_type); - tp.add_row_name_and_data(" backup_interval", entry.backup_interval_seconds + "s"); - tp.add_row_name_and_data(" app_ids", fmt::format("{{{}}}", fmt::join(entry.app_ids, ", "))); - tp.add_row_name_and_data(" start_time", entry.start_time); - tp.add_row_name_and_data(" status", entry.is_disable ? "disabled" : "enabled"); - tp.add_row_name_and_data(" backup_history_count", entry.backup_history_count_to_keep); - tp.output(std::cout); + dsn::utils::table_printer tp(entry.policy_name); + tp.add_row_name_and_data("backup_provider_type", entry.backup_provider_type); + tp.add_row_name_and_data("backup_interval", entry.backup_interval_seconds + "s"); + tp.add_row_name_and_data("app_ids", fmt::format("{{{}}}", fmt::join(entry.app_ids, ", "))); + tp.add_row_name_and_data("start_time", entry.start_time); + tp.add_row_name_and_data("status", entry.is_disable ? "disabled" : "enabled"); + tp.add_row_name_and_data("backup_history_count", entry.backup_history_count_to_keep); + return tp; } -static void print_backup_entry(const backup_entry &bentry) +static void print_backup_entry(dsn::utils::table_printer &tp, const backup_entry &bentry) { char start_time[30] = {'\0'}; char end_time[30] = {'\0'}; @@ -1164,15 +1157,13 @@ static void print_backup_entry(const backup_entry &bentry) ::dsn::utils::time_ms_to_date_time(bentry.end_time_ms, end_time, 30); } - dsn::utils::table_printer tp; - tp.add_row_name_and_data(" id", bentry.backup_id); - tp.add_row_name_and_data(" start_time", start_time); - tp.add_row_name_and_data(" end_time", end_time); - tp.add_row_name_and_data(" app_ids", fmt::format("{{{}}}", fmt::join(bentry.app_ids, ", "))); - tp.output(std::cout); + tp.add_row(bentry.backup_id); + tp.append_data(start_time); + tp.append_data(end_time); + tp.append_data(fmt::format("{{{}}}", fmt::join(bentry.app_ids, ", "))); } -dsn::error_code replication_ddl_client::ls_backup_policy() +dsn::error_code replication_ddl_client::ls_backup_policy(bool json) { auto req = std::make_shared(); req->policy_names.clear(); @@ -1187,21 +1178,26 @@ dsn::error_code replication_ddl_client::ls_backup_policy() configuration_query_backup_policy_response resp; ::dsn::unmarshall(resp_task->get_response(), resp); + std::streambuf *buf; + std::ofstream of; + buf = std::cout.rdbuf(); + std::ostream out(buf); + if (resp.err != ERR_OK) { return resp.err; } else { + dsn::utils::multi_table_printer mtp; for (int32_t idx = 0; idx < resp.policys.size(); idx++) { - std::cout << "[" << idx + 1 << "]" << std::endl; - print_policy_entry(resp.policys[idx]); - std::cout << std::endl; + dsn::utils::table_printer tp = print_policy_entry(resp.policys[idx]); + mtp.add(std::move(tp)); } + mtp.output(out, json ? tp_output_format::kJsonPretty : tp_output_format::kTabular); } return ERR_OK; } -dsn::error_code -replication_ddl_client::query_backup_policy(const std::vector &policy_names, - int backup_info_cnt) +dsn::error_code replication_ddl_client::query_backup_policy( + const std::vector &policy_names, int backup_info_cnt, bool json) { auto req = std::make_shared(); req->policy_names = policy_names; @@ -1217,23 +1213,32 @@ replication_ddl_client::query_backup_policy(const std::vector &poli configuration_query_backup_policy_response resp; ::dsn::unmarshall(resp_task->get_response(), resp); + std::streambuf *buf; + std::ofstream of; + buf = std::cout.rdbuf(); + std::ostream out(buf); + if (resp.err != ERR_OK) { return resp.err; } else { + dsn::utils::multi_table_printer mtp; for (int32_t idx = 0; idx < resp.policys.size(); idx++) { - if (idx != 0) { - std::cout << "************************" << std::endl; - } const policy_entry &pentry = resp.policys[idx]; - std::cout << "policy_info:" << std::endl; - print_policy_entry(pentry); - std::cout << std::endl << "backup_infos:" << std::endl; + dsn::utils::table_printer tp_policy = print_policy_entry(pentry); + mtp.add(std::move(tp_policy)); const std::vector &backup_infos = resp.backup_infos[idx]; + dsn::utils::table_printer tp_backup(pentry.policy_name + "_" + + cold_backup_constant::BACKUP_INFO); + tp_backup.add_title("id"); + tp_backup.add_column("start_time"); + tp_backup.add_column("end_time"); + tp_backup.add_column("app_ids"); for (int bi_idx = 0; bi_idx < backup_infos.size(); bi_idx++) { - std::cout << "[" << (bi_idx + 1) << "]" << std::endl; - print_backup_entry(backup_infos[bi_idx]); + print_backup_entry(tp_backup, backup_infos[bi_idx]); } + mtp.add(std::move(tp_backup)); } + mtp.output(out, json ? tp_output_format::kJsonPretty : tp_output_format::kTabular); } return ERR_OK; } @@ -1308,8 +1313,8 @@ dsn::error_code replication_ddl_client::query_restore(int32_t restore_app_id, bo ::dsn::unmarshall(resp_task->get_response(), response); if (response.err == ERR_OK) { int overall_progress = 0; - for (const auto &p : response.restore_progress) { - overall_progress += p; + for (const auto &progress : response.restore_progress) { + overall_progress += progress; } overall_progress = overall_progress / response.restore_progress.size(); overall_progress = overall_progress / 10; @@ -1450,10 +1455,9 @@ void replication_ddl_client::end_meta_request(const rpc_response_task_ptr &callb &_tracker, [this, attempt_count, callback]( error_code err, dsn::message_ex *request, dsn::message_ex *response) mutable { - FAIL_POINT_INJECT_NOT_RETURN_F( "ddl_client_request_meta", - [&err, this](absl::string_view str) { err = pop_mock_error(); }); + [&err, this](std::string_view str) { err = pop_mock_error(); }); end_meta_request(callback, attempt_count + 1, err, request, response); }); diff --git a/src/client/replication_ddl_client.h b/src/client/replication_ddl_client.h index 69b3ae5718..2f891da2ec 100644 --- a/src/client/replication_ddl_client.h +++ b/src/client/replication_ddl_client.h @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -43,15 +44,15 @@ #include "meta_admin_types.h" #include "partition_split_types.h" #include "replica_admin_types.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_tracker.h" +#include "rpc/dns_resolver.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" +#include "task/async_calls.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_tracker.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/errors.h" @@ -59,7 +60,6 @@ #include "utils/flags.h" #include "utils/fmt_logging.h" #include "utils/ports.h" -#include "absl/strings/string_view.h" DSN_DECLARE_uint32(ddl_client_max_attempt_count); DSN_DECLARE_uint32(ddl_client_retry_interval_ms); @@ -125,7 +125,7 @@ class replication_ddl_client dsn::error_code list_app(const std::string &app_name, int32_t &app_id, int32_t &partition_count, - std::vector &partitions); + std::vector &pcs); dsn::replication::configuration_meta_control_response control_meta_function_level(meta_function_level::type level); @@ -162,7 +162,7 @@ class replication_ddl_client int32_t old_app_id, const std::string &new_app_name, bool skip_bad_partition, - const std::string &restore_path = ""); + const std::string &restore_path); dsn::error_code query_restore(int32_t restore_app_id, bool detailed); @@ -179,14 +179,15 @@ class replication_ddl_client error_with query_backup(int32_t app_id, int64_t backup_id); - dsn::error_code ls_backup_policy(); + dsn::error_code ls_backup_policy(bool json); - dsn::error_code disable_backup_policy(const std::string &policy_name); + dsn::error_code disable_backup_policy(const std::string &policy_name, bool force); dsn::error_code enable_backup_policy(const std::string &policy_name); dsn::error_code query_backup_policy(const std::vector &policy_names, - int backup_info_cnt); + int backup_info_cnt, + bool json); dsn::error_code update_backup_policy(const std::string &policy_name, const std::vector &add_appids, @@ -298,10 +299,9 @@ class replication_ddl_client &_tracker, [this, task]( error_code err, dsn::message_ex *request, dsn::message_ex *response) mutable { - FAIL_POINT_INJECT_NOT_RETURN_F( "ddl_client_request_meta", - [&err, this](absl::string_view str) { err = pop_mock_error(); }); + [&err, this](std::string_view str) { err = pop_mock_error(); }); end_meta_request(std::move(task), 1, err, request, response); }); @@ -342,7 +342,7 @@ class replication_ddl_client FAIL_POINT_INJECT_NOT_RETURN_F( "ddl_client_request_meta", - [&resp, this](absl::string_view str) { resp.err = pop_mock_error(); }); + [&resp, this](std::string_view str) { resp.err = pop_mock_error(); }); LOG_INFO("received response from meta server: rpc_code={}, err={}, attempt_count={}, " "max_attempt_count={}", @@ -378,10 +378,11 @@ class replication_ddl_client static constexpr int MAX_RETRY = 2; error_code err = ERR_UNKNOWN; for (int retry = 0; retry < MAX_RETRY; retry++) { - task_ptr task = rpc.call(dsn::dns_resolver::instance().resolve_address(_meta_server), - &_tracker, - [&err](error_code code) { err = code; }, - reply_thread_hash); + task_ptr task = rpc.call( + dsn::dns_resolver::instance().resolve_address(_meta_server), + &_tracker, + [&err](error_code code) { err = code; }, + reply_thread_hash); task->wait(); if (err == ERR_OK) { break; diff --git a/src/client/test/ddl_client_test.cpp b/src/client/test/ddl_client_test.cpp index f44a0382b0..13b13d69ae 100644 --- a/src/client/test/ddl_client_test.cpp +++ b/src/client/test/ddl_client_test.cpp @@ -26,9 +26,9 @@ #include "common/replication.codes.h" #include "gtest/gtest.h" #include "meta_admin_types.h" +#include "rpc/rpc_host_port.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task.h" +#include "task/task.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/errors.h" diff --git a/src/client_lib/client_factory.cpp b/src/client_lib/client_factory.cpp index 29f2a44dd5..a1401fa79a 100644 --- a/src/client_lib/client_factory.cpp +++ b/src/client_lib/client_factory.cpp @@ -32,4 +32,4 @@ pegasus_client *pegasus_client_factory::get_client(const char *cluster_name, con return client::pegasus_client_factory_impl::get_client(cluster_name, app_name); } -} // namespace +} // namespace pegasus diff --git a/src/client_lib/mutation.cpp b/src/client_lib/mutation.cpp index ba0431c3af..260dfca84d 100644 --- a/src/client_lib/mutation.cpp +++ b/src/client_lib/mutation.cpp @@ -90,4 +90,4 @@ void pegasus_client::mutations::get_mutations(std::vector &mutations) co mutations[pair.first].set_expire_ts_seconds = pair.second + current_time; } } -} +} // namespace pegasus diff --git a/src/client_lib/pegasus_client_factory_impl.h b/src/client_lib/pegasus_client_factory_impl.h index da1e054533..87d5326af8 100644 --- a/src/client_lib/pegasus_client_factory_impl.h +++ b/src/client_lib/pegasus_client_factory_impl.h @@ -45,5 +45,5 @@ class pegasus_client_factory_impl static cluster_to_app_map _cluster_to_clients; static ::dsn::zlock *_map_lock; }; -} -} // namespace +} // namespace client +} // namespace pegasus diff --git a/src/client_lib/pegasus_client_impl.cpp b/src/client_lib/pegasus_client_impl.cpp index c6dfeb44f8..1dd402bd01 100644 --- a/src/client_lib/pegasus_client_impl.cpp +++ b/src/client_lib/pegasus_client_impl.cpp @@ -23,10 +23,10 @@ #include #include #include +#include #include #include -#include "absl/strings/string_view.h" #include "common/common.h" #include "common/replication_other_types.h" #include "common/serialization_helper/dsn.layer2_types.h" @@ -34,12 +34,12 @@ #include "pegasus_client_impl.h" #include "pegasus_key_schema.h" #include "pegasus_utils.h" +#include "rpc/dns_resolver.h" +#include "rpc/group_host_port.h" +#include "rpc/serialization.h" #include "rrdb/rrdb.client.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/group_host_port.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task_code.h" +#include "task/async_calls.h" +#include "task/task_code.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" #include "utils/synchronize.h" @@ -126,8 +126,7 @@ void pegasus_client_impl::async_set(const std::string &hash_key, // wrap the user defined callback function, generate a new callback function. auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -214,8 +213,7 @@ void pegasus_client_impl::async_multi_set(const std::string &hash_key, auto partition_hash = pegasus_key_hash(tmp_key); // wrap the user-defined-callback-function, generate a new callback function. auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -275,8 +273,7 @@ void pegasus_client_impl::async_get(const std::string &hash_key, pegasus_generate_key(req, hash_key, sort_key); auto partition_hash = pegasus_key_hash(req); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -365,8 +362,7 @@ void pegasus_client_impl::async_multi_get(const std::string &hash_key, pegasus_generate_key(tmp_key, req.hash_key, ::dsn::blob()); auto partition_hash = pegasus_key_hash(tmp_key); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -465,8 +461,7 @@ void pegasus_client_impl::async_multi_get(const std::string &hash_key, pegasus_generate_key(tmp_key, req.hash_key, ::dsn::blob()); auto partition_hash = pegasus_key_hash(tmp_key); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -544,8 +539,7 @@ void pegasus_client_impl::async_multi_get_sortkeys(const std::string &hash_key, pegasus_generate_key(tmp_key, req.hash_key, ::dsn::blob()); auto partition_hash = pegasus_key_hash(tmp_key); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -657,8 +651,7 @@ void pegasus_client_impl::async_del(const std::string &hash_key, auto partition_hash = pegasus_key_hash(req); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -738,8 +731,7 @@ void pegasus_client_impl::async_multi_del(const std::string &hash_key, auto partition_hash = pegasus_key_hash(tmp_key); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -819,8 +811,7 @@ void pegasus_client_impl::async_incr(const std::string &hash_key, auto partition_hash = pegasus_key_hash(req.key); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -923,8 +914,7 @@ void pegasus_client_impl::async_check_and_set(const std::string &hash_key, pegasus_generate_key(tmp_key, req.hash_key, ::dsn::blob()); auto partition_hash = pegasus_key_hash(tmp_key); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -1053,8 +1043,7 @@ void pegasus_client_impl::async_check_and_mutate(const std::string &hash_key, pegasus_generate_key(tmp_key, req.hash_key, ::dsn::blob()); auto partition_hash = pegasus_key_hash(tmp_key); auto new_callback = [user_callback = std::move(callback)]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { if (user_callback == nullptr) { return; } @@ -1223,9 +1212,8 @@ void pegasus_client_impl::async_get_unordered_scanners( return; } - auto new_callback = [ user_callback = std::move(callback), max_split_count, options, this ]( - ::dsn::error_code err, dsn::message_ex * req, dsn::message_ex * resp) - { + auto new_callback = [user_callback = std::move(callback), max_split_count, options, this]( + ::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) { std::vector scanners; query_cfg_response response; if (err == ERR_OK) { diff --git a/src/client_lib/pegasus_client_impl.h b/src/client_lib/pegasus_client_impl.h index 689fe74925..798e3464d9 100644 --- a/src/client_lib/pegasus_client_impl.h +++ b/src/client_lib/pegasus_client_impl.h @@ -31,8 +31,8 @@ #include #include +#include "rpc/rpc_host_port.h" #include "rrdb/rrdb_types.h" -#include "runtime/rpc/rpc_host_port.h" #include "utils/blob.h" #include "utils/zlocks.h" diff --git a/src/client_lib/pegasus_scanner_impl.cpp b/src/client_lib/pegasus_scanner_impl.cpp index 51282e14d3..e9715c27bc 100644 --- a/src/client_lib/pegasus_scanner_impl.cpp +++ b/src/client_lib/pegasus_scanner_impl.cpp @@ -34,7 +34,7 @@ #include "pegasus_key_schema.h" #include "rrdb/rrdb.client.h" #include "rrdb/rrdb_types.h" -#include "runtime/rpc/serialization.h" +#include "rpc/serialization.h" #include "utils/blob.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" @@ -266,12 +266,13 @@ void pegasus_client_impl::pegasus_scanner_impl::_next_batch() CHECK(!_rpc_started, ""); _rpc_started = true; - _client->scan(req, - [this](::dsn::error_code err, - dsn::message_ex *req, - dsn::message_ex *resp) mutable { _on_scan_response(err, req, resp); }, - std::chrono::milliseconds(_options.timeout_ms), - _hash); + _client->scan( + req, + [this](::dsn::error_code err, dsn::message_ex *req, dsn::message_ex *resp) mutable { + _on_scan_response(err, req, resp); + }, + std::chrono::milliseconds(_options.timeout_ms), + _hash); } void pegasus_client_impl::pegasus_scanner_impl::_start_scan() @@ -393,13 +394,13 @@ void pegasus_client_impl::pegasus_scanner_impl_wrapper::async_next( async_scan_next_callback_t &&callback) { // wrap shared_ptr _p with callback - _p->async_next([ __p = _p, user_callback = std::move(callback) ](int error_code, - std::string &&hash_key, - std::string &&sort_key, - std::string &&value, - internal_info &&info, - uint32_t expire_ts_seconds, - int32_t kv_count) { + _p->async_next([__p = _p, user_callback = std::move(callback)](int error_code, + std::string &&hash_key, + std::string &&sort_key, + std::string &&value, + internal_info &&info, + uint32_t expire_ts_seconds, + int32_t kv_count) { user_callback(error_code, std::move(hash_key), std::move(sort_key), diff --git a/src/common/backup_common.cpp b/src/common/backup_common.cpp index 23575787cf..ecc2278e62 100644 --- a/src/common/backup_common.cpp +++ b/src/common/backup_common.cpp @@ -19,8 +19,8 @@ #include "common/gpid.h" #include "fmt/core.h" +#include "rpc/rpc_host_port.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_host_port.h" namespace dsn { namespace replication { diff --git a/src/common/backup_common.h b/src/common/backup_common.h index 96e11717e8..60318bee47 100644 --- a/src/common/backup_common.h +++ b/src/common/backup_common.h @@ -20,7 +20,7 @@ #include #include -#include "runtime/rpc/rpc_holder.h" +#include "rpc/rpc_holder.h" namespace dsn { class gpid; diff --git a/src/common/bulk_load_common.h b/src/common/bulk_load_common.h index 4a45869722..5e5ed9bf50 100644 --- a/src/common/bulk_load_common.h +++ b/src/common/bulk_load_common.h @@ -20,7 +20,7 @@ #include #include -#include "runtime/rpc/rpc_holder.h" +#include "rpc/rpc_holder.h" namespace dsn { namespace replication { diff --git a/src/common/consensus.thrift b/src/common/consensus.thrift index 26312b8e36..8952c090c7 100644 --- a/src/common/consensus.thrift +++ b/src/common/consensus.thrift @@ -32,11 +32,24 @@ namespace cpp dsn.replication struct mutation_header { + // The partition that this mutation belongs to. 1:dsn.gpid pid; + + // The ID of the membership configuration that this mutation belongs to, + // increasing monotonically. 2:i64 ballot; + + // The decree of this mutation. 3:i64 decree; + + // The start offset of this mutation in the whole mutation log. 4:i64 log_offset; + + // The max of the decrees that have been committed before this mutation + // is prepared. 5:i64 last_committed_decree; + + // The unique timestamp that increases monotonically in microsecond. 6:i64 timestamp; } diff --git a/src/common/duplication_common.cpp b/src/common/duplication_common.cpp index 0aea933473..4dc5323c65 100644 --- a/src/common/duplication_common.cpp +++ b/src/common/duplication_common.cpp @@ -122,6 +122,7 @@ class duplication_group_registry : public utils::singleton #include "duplication_types.h" -#include "runtime/rpc/rpc_holder.h" +#include "rpc/rpc_holder.h" #include "utils/errors.h" #include "utils/flags.h" #include "utils/fmt_utils.h" diff --git a/src/common/fs_manager.cpp b/src/common/fs_manager.cpp index 1b633b1d24..1801d2c46b 100644 --- a/src/common/fs_manager.cpp +++ b/src/common/fs_manager.cpp @@ -31,7 +31,7 @@ #include #include -#include "absl/strings/string_view.h" +#include #include "common/gpid.h" #include "common/replication_enums.h" #include "fmt/core.h" @@ -134,7 +134,7 @@ uint64_t dir_node::replicas_count(app_id id) const return iter->second.size(); } -std::string dir_node::replica_dir(absl::string_view app_type, const dsn::gpid &pid) const +std::string dir_node::replica_dir(std::string_view app_type, const dsn::gpid &pid) const { return utils::filesystem::path_combine(full_dir, fmt::format("{}.{}", pid, app_type)); } @@ -159,7 +159,7 @@ uint64_t dir_node::remove(const gpid &pid) void dir_node::update_disk_stat() { - FAIL_POINT_INJECT_F("update_disk_stat", [](absl::string_view) { return; }); + FAIL_POINT_INJECT_F("update_disk_stat", [](std::string_view) { return; }); dsn::utils::filesystem::disk_space_info dsi; if (!dsn::utils::filesystem::get_disk_space_info(full_dir, dsi)) { @@ -298,6 +298,7 @@ dir_node *fs_manager::find_best_dir_for_new_replica(const gpid &pid) const for (const auto &dn : _dir_nodes) { // Do not allocate new replica on dir_node which is not NORMAL. if (dn->status != disk_status::NORMAL) { + LOG_INFO("skip the {} state dir_node({})", enum_to_string(dn->status), dn->tag); continue; } CHECK(!dn->has(pid), "gpid({}) already exists in dir_node({})", pid, dn->tag); @@ -328,7 +329,7 @@ dir_node *fs_manager::find_best_dir_for_new_replica(const gpid &pid) const } void fs_manager::specify_dir_for_new_replica_for_test(dir_node *specified_dn, - absl::string_view app_type, + std::string_view app_type, const dsn::gpid &pid) const { bool dn_found = false; @@ -431,7 +432,7 @@ bool fs_manager::is_dir_node_exist(const std::string &data_dir, const std::strin return false; } -dir_node *fs_manager::find_replica_dir(absl::string_view app_type, gpid pid) +dir_node *fs_manager::find_replica_dir(std::string_view app_type, gpid pid) { std::string replica_dir; dir_node *replica_dn = nullptr; @@ -440,6 +441,7 @@ dir_node *fs_manager::find_replica_dir(absl::string_view app_type, gpid pid) for (const auto &dn : _dir_nodes) { // Skip IO error dir_node. if (dn->status == disk_status::IO_ERROR) { + LOG_INFO("skip the {} state dir_node({})", enum_to_string(dn->status), dn->tag); continue; } const auto dir = dn->replica_dir(app_type, pid); @@ -455,7 +457,7 @@ dir_node *fs_manager::find_replica_dir(absl::string_view app_type, gpid pid) return replica_dn; } -dir_node *fs_manager::create_replica_dir_if_necessary(absl::string_view app_type, gpid pid) +dir_node *fs_manager::create_replica_dir_if_necessary(std::string_view app_type, gpid pid) { // Try to find the replica directory. auto replica_dn = find_replica_dir(app_type, pid); @@ -487,7 +489,7 @@ dir_node *fs_manager::create_replica_dir_if_necessary(absl::string_view app_type return replica_dn; } -dir_node *fs_manager::create_child_replica_dir(absl::string_view app_type, +dir_node *fs_manager::create_child_replica_dir(std::string_view app_type, gpid child_pid, const std::string &parent_dir) { @@ -498,6 +500,7 @@ dir_node *fs_manager::create_child_replica_dir(absl::string_view app_type, for (const auto &dn : _dir_nodes) { // Skip non-available dir_node. if (dn->status != disk_status::NORMAL) { + LOG_INFO("skip the {} state dir_node({})", enum_to_string(dn->status), dn->tag); continue; } child_dir = dn->replica_dir(app_type, child_pid); diff --git a/src/common/fs_manager.h b/src/common/fs_manager.h index 4a9d1f69fc..0ceecc3e43 100644 --- a/src/common/fs_manager.h +++ b/src/common/fs_manager.h @@ -31,7 +31,7 @@ #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/flags.h" -#include "absl/strings/string_view.h" +#include #include "utils/metrics.h" #include "utils/ports.h" #include "utils/zlocks.h" @@ -105,7 +105,7 @@ struct dir_node uint64_t replicas_count() const; // Construct the replica dir for the given 'app_type' and 'pid'. // NOTE: Just construct the string, the directory will not be created. - std::string replica_dir(absl::string_view app_type, const dsn::gpid &pid) const; + std::string replica_dir(std::string_view app_type, const dsn::gpid &pid) const; bool has(const dsn::gpid &pid) const; uint64_t remove(const dsn::gpid &pid); void update_disk_stat(); @@ -133,18 +133,18 @@ class fs_manager // dir_nodes. // NOTE: only used in test. void specify_dir_for_new_replica_for_test(dir_node *specified_dn, - absl::string_view app_type, + std::string_view app_type, const dsn::gpid &pid) const; void add_replica(const dsn::gpid &pid, const std::string &pid_dir); // Find the replica instance directory. - dir_node *find_replica_dir(absl::string_view app_type, gpid pid); + dir_node *find_replica_dir(std::string_view app_type, gpid pid); // Similar to the above, but it will create a new directory if not found. - dir_node *create_replica_dir_if_necessary(absl::string_view app_type, gpid pid); + dir_node *create_replica_dir_if_necessary(std::string_view app_type, gpid pid); // Similar to the above, and will create a directory for the child on the same dir_node // of parent. // During partition split, we should guarantee child replica and parent replica share the // same data dir. - dir_node *create_child_replica_dir(absl::string_view app_type, + dir_node *create_child_replica_dir(std::string_view app_type, gpid child_pid, const std::string &parent_dir); void remove_replica(const dsn::gpid &pid); @@ -191,5 +191,5 @@ class fs_manager FRIEND_TEST(open_replica_test, open_replica_add_decree_and_ballot_check); FRIEND_TEST(replica_test, test_auto_trash_of_corruption); }; -} // replication -} // dsn +} // namespace replication +} // namespace dsn diff --git a/src/common/json_helper.h b/src/common/json_helper.h index 4329bd2a8a..d56bcecba6 100644 --- a/src/common/json_helper.h +++ b/src/common/json_helper.h @@ -49,7 +49,7 @@ #include "utils/error_code.h" #include "utils/threadpool_code.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "common/gpid.h" #include "meta_admin_types.h" #include "partition_split_types.h" @@ -237,6 +237,12 @@ JSON_DECODE_ENTRIES(input, t, __VA_ARGS__); \ } +#define JSON_ENCODE_OBJ(writer, name, ...) \ + do { \ + writer.Key(#name); \ + dsn::json::json_encode(writer, __VA_ARGS__); \ + } while (0) + namespace dsn { namespace json { @@ -442,8 +448,8 @@ inline bool json_decode(const dsn::json::JsonObject &in, dsn::host_port &hp) return static_cast(hp); } -inline void json_encode(JsonWriter &out, const dsn::partition_configuration &config); -inline bool json_decode(const JsonObject &in, dsn::partition_configuration &config); +inline void json_encode(JsonWriter &out, const dsn::partition_configuration &pc); +inline bool json_decode(const JsonObject &in, dsn::partition_configuration &pc); inline void json_encode(JsonWriter &out, const dsn::app_info &info); inline bool json_decode(const JsonObject &in, dsn::app_info &info); inline void json_encode(JsonWriter &out, const dsn::replication::file_meta &f_meta); diff --git a/src/common/manual_compact.h b/src/common/manual_compact.h index c6e7ae3b67..38aa57f9de 100644 --- a/src/common/manual_compact.h +++ b/src/common/manual_compact.h @@ -18,7 +18,7 @@ #pragma once #include "meta_admin_types.h" -#include "runtime/rpc/rpc_holder.h" +#include "rpc/rpc_holder.h" namespace dsn { namespace replication { diff --git a/src/common/partition_split_common.h b/src/common/partition_split_common.h index e9cc376c9b..749f031839 100644 --- a/src/common/partition_split_common.h +++ b/src/common/partition_split_common.h @@ -18,7 +18,7 @@ #pragma once #include "partition_split_types.h" -#include "runtime/rpc/rpc_holder.h" +#include "rpc/rpc_holder.h" namespace dsn { namespace replication { diff --git a/src/common/replication.codes.h b/src/common/replication.codes.h index a542e58130..9a13b5c690 100644 --- a/src/common/replication.codes.h +++ b/src/common/replication.codes.h @@ -28,7 +28,7 @@ #include "utils/error_code.h" #include "utils/threadpool_code.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "common/gpid.h" DEFINE_THREAD_POOL_CODE(THREAD_POOL_META_SERVER) diff --git a/src/common/replication_common.cpp b/src/common/replication_common.cpp index 9d0a8f103c..7e18163cd5 100644 --- a/src/common/replication_common.cpp +++ b/src/common/replication_common.cpp @@ -34,8 +34,8 @@ #include "common/replication_other_types.h" #include "dsn.layer2_types.h" #include "fmt/core.h" -#include "runtime/rpc/dns_resolver.h" // IWYU pragma: keep -#include "runtime/rpc/rpc_address.h" +#include "rpc/dns_resolver.h" // IWYU pragma: keep +#include "rpc/rpc_address.h" #include "runtime/service_app.h" #include "utils/config_api.h" #include "utils/filesystem.h" @@ -166,26 +166,26 @@ int32_t replication_options::app_mutation_2pc_min_replica_count(int32_t app_max_ } } -/*static*/ bool replica_helper::get_replica_config(const partition_configuration &partition_config, +/*static*/ bool replica_helper::get_replica_config(const partition_configuration &pc, const ::dsn::host_port &node, - /*out*/ replica_configuration &replica_config) + /*out*/ replica_configuration &rc) { - replica_config.pid = partition_config.pid; - replica_config.ballot = partition_config.ballot; - replica_config.learner_signature = invalid_signature; - SET_OBJ_IP_AND_HOST_PORT(replica_config, primary, partition_config, primary); + rc.pid = pc.pid; + rc.ballot = pc.ballot; + rc.learner_signature = invalid_signature; + SET_OBJ_IP_AND_HOST_PORT(rc, primary, pc, primary); - if (node == partition_config.hp_primary) { - replica_config.status = partition_status::PS_PRIMARY; + if (node == pc.hp_primary) { + rc.status = partition_status::PS_PRIMARY; return true; } - if (utils::contains(partition_config.hp_secondaries, node)) { - replica_config.status = partition_status::PS_SECONDARY; + if (utils::contains(pc.hp_secondaries, node)) { + rc.status = partition_status::PS_SECONDARY; return true; } - replica_config.status = partition_status::PS_INACTIVE; + rc.status = partition_status::PS_INACTIVE; return false; } diff --git a/src/common/replication_common.h b/src/common/replication_common.h index 9d88b9d96a..efa78f13fb 100644 --- a/src/common/replication_common.h +++ b/src/common/replication_common.h @@ -32,9 +32,9 @@ #include #include "metadata_types.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/task/task.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_host_port.h" +#include "task/task.h" namespace dsn { namespace replication { @@ -46,6 +46,7 @@ class query_replica_info_request; class query_replica_info_response; typedef std::unordered_map<::dsn::host_port, partition_status::type> node_statuses; + typedef std::unordered_map<::dsn::host_port, dsn::task_ptr> node_tasks; typedef rpc_holder diff --git a/src/common/replication_other_types.h b/src/common/replication_other_types.h index 7f8c51dcd7..e23164a187 100644 --- a/src/common/replication_other_types.h +++ b/src/common/replication_other_types.h @@ -36,8 +36,8 @@ #include "consensus_types.h" #include "replica_admin_types.h" #include "common/replication_enums.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" namespace dsn { namespace replication { @@ -79,8 +79,8 @@ inline bool is_partition_config_equal(const partition_configuration &pc1, const partition_configuration &pc2) { // secondaries no need to be same order - for (const auto &hp : pc1.hp_secondaries) { - if (!is_secondary(pc2, hp)) { + for (const auto &pc1_secondary : pc1.hp_secondaries) { + if (!is_secondary(pc2, pc1_secondary)) { return false; } } @@ -106,9 +106,9 @@ class replica_helper } return false; } - static bool get_replica_config(const partition_configuration &partition_config, + static bool get_replica_config(const partition_configuration &pc, const ::dsn::host_port &node, - /*out*/ replica_configuration &replica_config); + /*out*/ replica_configuration &rc); // Return true if 'server_list' is a valid comma-separated list of servers, otherwise return // false. The result is filled into 'servers' if success. @@ -122,7 +122,7 @@ class replica_helper const std::string &key, /*out*/ std::vector &servers); }; -} -} // namespace +} // namespace replication +} // namespace dsn #endif diff --git a/src/common/serialization_helper/dsn_types.h b/src/common/serialization_helper/dsn_types.h index 2224bd1507..0e37c4ee5a 100644 --- a/src/common/serialization_helper/dsn_types.h +++ b/src/common/serialization_helper/dsn_types.h @@ -26,4 +26,4 @@ #pragma once -#include "runtime/rpc/serialization.h" +#include "rpc/serialization.h" diff --git a/src/common/serialization_helper/thrift_helper.h b/src/common/serialization_helper/thrift_helper.h index c3e2bbfb0c..8f6c324e0a 100644 --- a/src/common/serialization_helper/thrift_helper.h +++ b/src/common/serialization_helper/thrift_helper.h @@ -26,8 +26,8 @@ #pragma once -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_stream.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_stream.h" #include #include @@ -37,7 +37,7 @@ #include #include -#include "absl/strings/string_view.h" +#include using namespace ::apache::thrift::transport; namespace dsn { @@ -498,7 +498,7 @@ inline uint32_t task_code::write(apache::thrift::protocol::TProtocol *oprot) con dynamic_cast(oprot); if (binary_proto != nullptr) { // the protocol is binary protocol - return binary_proto->writeString(absl::string_view(name)); + return binary_proto->writeString(std::string_view(name)); } else { // the protocol is json protocol uint32_t xfer = 0; @@ -566,7 +566,7 @@ inline uint32_t error_code::write(apache::thrift::protocol::TProtocol *oprot) co dynamic_cast(oprot); if (binary_proto != nullptr) { // the protocol is binary protocol - return binary_proto->writeString(absl::string_view(name)); + return binary_proto->writeString(std::string_view(name)); } else { // the protocol is json protocol uint32_t xfer = 0; diff --git a/src/common/storage_serverlet.h b/src/common/storage_serverlet.h index ee1ef3bee2..ba288747e7 100644 --- a/src/common/storage_serverlet.h +++ b/src/common/storage_serverlet.h @@ -38,13 +38,13 @@ #include "utils/api_utilities.h" #include "utils/error_code.h" #include "utils/threadpool_code.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "common/gpid.h" -#include "runtime/rpc/serialization.h" -#include "runtime/rpc/rpc_stream.h" +#include "rpc/serialization.h" +#include "rpc/rpc_stream.h" #include "runtime/serverlet.h" #include "runtime/service_app.h" -#include "runtime/rpc/rpc_address.h" +#include "rpc/rpc_address.h" namespace dsn { namespace replication { diff --git a/src/failure_detector/failure_detector.cpp b/src/failure_detector/failure_detector.cpp index 0cda09d5f0..a9c330d7d2 100644 --- a/src/failure_detector/failure_detector.cpp +++ b/src/failure_detector/failure_detector.cpp @@ -34,18 +34,18 @@ #include #include -#include "absl/strings/string_view.h" +#include #include "failure_detector/fd.code.definition.h" #include "fd_types.h" #include "fmt/core.h" #include "fmt/format.h" #include "nlohmann/json_fwd.hpp" #include "runtime/api_layer1.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/rpc_address.h" +#include "rpc/dns_resolver.h" +#include "rpc/rpc_address.h" #include "runtime/serverlet.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task_spec.h" +#include "task/async_calls.h" +#include "task/task_spec.h" #include "utils/autoref_ptr.h" #include "utils/command_manager.h" #include "utils/fmt_logging.h" @@ -101,12 +101,13 @@ error_code failure_detector::start(uint32_t check_interval_seconds, open_service(); // start periodically check job - _check_task = tasking::enqueue_timer(LPC_BEACON_CHECK, - &_tracker, - [this] { check_all_records(); }, - std::chrono::milliseconds(_check_interval_milliseconds), - -1, - std::chrono::milliseconds(_check_interval_milliseconds)); + _check_task = tasking::enqueue_timer( + LPC_BEACON_CHECK, + &_tracker, + [this] { check_all_records(); }, + std::chrono::milliseconds(_check_interval_milliseconds), + -1, + std::chrono::milliseconds(_check_interval_milliseconds)); _is_started = true; return ERR_OK; @@ -149,13 +150,13 @@ void failure_detector::register_master(const ::dsn::host_port &target) if (setup_timer) { // delay the beacon slightly to make first beacon greater than the // last_beacon_send_time_with_ack - ret.first->second.send_beacon_timer = - tasking::enqueue_timer(LPC_BEACON_SEND, - &_tracker, - [this, target]() { this->send_beacon(target, dsn_now_ms()); }, - std::chrono::milliseconds(_beacon_interval_milliseconds), - 0, - std::chrono::milliseconds(1)); + ret.first->second.send_beacon_timer = tasking::enqueue_timer( + LPC_BEACON_SEND, + &_tracker, + [this, target]() { this->send_beacon(target, dsn_now_ms()); }, + std::chrono::milliseconds(_beacon_interval_milliseconds), + 0, + std::chrono::milliseconds(1)); } } @@ -176,13 +177,13 @@ bool failure_detector::switch_master(const ::dsn::host_port &from, it->second.node = to; it->second.rejected = false; it->second.send_beacon_timer->cancel(true); - it->second.send_beacon_timer = - tasking::enqueue_timer(LPC_BEACON_SEND, - &_tracker, - [this, to]() { this->send_beacon(to, dsn_now_ms()); }, - std::chrono::milliseconds(_beacon_interval_milliseconds), - 0, - std::chrono::milliseconds(delay_milliseconds)); + it->second.send_beacon_timer = tasking::enqueue_timer( + LPC_BEACON_SEND, + &_tracker, + [this, to]() { this->send_beacon(to, dsn_now_ms()); }, + std::chrono::milliseconds(_beacon_interval_milliseconds), + 0, + std::chrono::milliseconds(delay_milliseconds)); _masters.insert(std::make_pair(to, it->second)); _masters.erase(from); @@ -592,24 +593,25 @@ void failure_detector::send_beacon(const host_port &target, uint64_t time) FMT_HOST_PORT_AND_IP(beacon, to_node), time); - ::dsn::rpc::call(addr_target, - RPC_FD_FAILURE_DETECTOR_PING, - beacon, - &_tracker, - [=](error_code err, beacon_ack &&resp) { - if (err != ::dsn::ERR_OK) { - beacon_ack ack; - ack.time = beacon.time; - SET_OBJ_IP_AND_HOST_PORT(ack, this_node, beacon, to_node); - RESET_IP_AND_HOST_PORT(ack, primary_node); - ack.is_master = false; - ack.allowed = true; - end_ping(err, ack, nullptr); - } else { - end_ping(err, std::move(resp), nullptr); - } - }, - std::chrono::milliseconds(_beacon_timeout_milliseconds)); -} + ::dsn::rpc::call( + addr_target, + RPC_FD_FAILURE_DETECTOR_PING, + beacon, + &_tracker, + [=](error_code err, beacon_ack &&resp) { + if (err != ::dsn::ERR_OK) { + beacon_ack ack; + ack.time = beacon.time; + SET_OBJ_IP_AND_HOST_PORT(ack, this_node, beacon, to_node); + RESET_IP_AND_HOST_PORT(ack, primary_node); + ack.is_master = false; + ack.allowed = true; + end_ping(err, ack, nullptr); + } else { + end_ping(err, std::move(resp), nullptr); + } + }, + std::chrono::milliseconds(_beacon_timeout_milliseconds)); } -} // end namespace +} // namespace fd +} // namespace dsn diff --git a/src/failure_detector/failure_detector.h b/src/failure_detector/failure_detector.h index 4ff0fc53eb..775e78d7f4 100644 --- a/src/failure_detector/failure_detector.h +++ b/src/failure_detector/failure_detector.h @@ -35,10 +35,10 @@ #include "failure_detector/fd.client.h" #include "failure_detector/fd.server.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_tracker.h" +#include "rpc/rpc_host_port.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_tracker.h" #include "utils/error_code.h" #include "utils/metrics.h" #include "utils/threadpool_code.h" @@ -237,5 +237,5 @@ class failure_detector : public failure_detector_service, // subClass can rewrite these method. virtual void send_beacon(const host_port &node, uint64_t time); }; -} -} // end namespace +} // namespace fd +} // namespace dsn diff --git a/src/failure_detector/failure_detector_multimaster.cpp b/src/failure_detector/failure_detector_multimaster.cpp index e12fc69d14..a95b026191 100644 --- a/src/failure_detector/failure_detector_multimaster.cpp +++ b/src/failure_detector/failure_detector_multimaster.cpp @@ -29,8 +29,8 @@ #include "failure_detector/failure_detector_multimaster.h" #include "fd_types.h" -#include "runtime/rpc/dns_resolver.h" // IWYU pragma: keep -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/dns_resolver.h" // IWYU pragma: keep +#include "rpc/rpc_host_port.h" #include "utils/error_code.h" #include "utils/rand.h" @@ -132,13 +132,13 @@ void slave_failure_detector_with_multimaster::on_master_disconnected( void slave_failure_detector_with_multimaster::on_master_connected(const host_port &node) { /* - * well, this is called in on_ping_internal, which is called by rep::end_ping. - * So this function is called in the lock context of fd::_lock - */ + * well, this is called in on_ping_internal, which is called by rep::end_ping. + * So this function is called in the lock context of fd::_lock + */ bool is_primary = (_meta_servers.group_host_port()->leader() == node); if (is_primary) { _master_connected_callback(); } } -} -} // end namespace +} // namespace dist +} // namespace dsn diff --git a/src/failure_detector/failure_detector_multimaster.h b/src/failure_detector/failure_detector_multimaster.h index 3a803f9c6c..3b8664151c 100644 --- a/src/failure_detector/failure_detector_multimaster.h +++ b/src/failure_detector/failure_detector_multimaster.h @@ -31,8 +31,8 @@ #include #include "failure_detector/failure_detector.h" -#include "runtime/rpc/group_host_port.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/group_host_port.h" +#include "rpc/rpc_host_port.h" #include "utils/fmt_logging.h" #include "utils/zlocks.h" @@ -40,6 +40,7 @@ namespace dsn { class error_code; namespace fd { + class beacon_ack; } // namespace fd @@ -86,5 +87,5 @@ inline ::dsn::host_port slave_failure_detector_with_multimaster::current_server_ zauto_lock l(failure_detector::_lock); return _meta_servers.group_host_port()->leader(); } -} -} // end namespace +} // namespace dist +} // namespace dsn diff --git a/src/failure_detector/fd.client.h b/src/failure_detector/fd.client.h index 96fde50fe0..3df219d6a7 100644 --- a/src/failure_detector/fd.client.h +++ b/src/failure_detector/fd.client.h @@ -28,7 +28,7 @@ #include "fd.code.definition.h" #include #include "utils/optional.h" -#include "runtime/task/async_calls.h" +#include "task/async_calls.h" namespace dsn { namespace fd { @@ -83,5 +83,5 @@ class failure_detector_client private: ::dsn::rpc_address _server; }; -} -} +} // namespace fd +} // namespace dsn diff --git a/src/failure_detector/fd.code.definition.h b/src/failure_detector/fd.code.definition.h index 1f8934aaee..bde94541ef 100644 --- a/src/failure_detector/fd.code.definition.h +++ b/src/failure_detector/fd.code.definition.h @@ -26,20 +26,20 @@ #pragma once -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "runtime/api_task.h" #include "runtime/api_layer1.h" #include "runtime/app_model.h" #include "utils/api_utilities.h" #include "utils/error_code.h" #include "utils/threadpool_code.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "common/gpid.h" -#include "runtime/rpc/serialization.h" -#include "runtime/rpc/rpc_stream.h" +#include "rpc/serialization.h" +#include "rpc/rpc_stream.h" #include "runtime/serverlet.h" #include "runtime/service_app.h" -#include "runtime/rpc/rpc_address.h" +#include "rpc/rpc_address.h" #include "fd_types.h" namespace dsn { diff --git a/src/failure_detector/fd.server.h b/src/failure_detector/fd.server.h index e518b8c47a..6bb686eb33 100644 --- a/src/failure_detector/fd.server.h +++ b/src/failure_detector/fd.server.h @@ -55,5 +55,5 @@ class failure_detector_service : public ::dsn::serverletunregister_rpc_handler(RPC_FD_FAILURE_DETECTOR_PING); } }; -} -} \ No newline at end of file +} // namespace fd +} // namespace dsn \ No newline at end of file diff --git a/src/failure_detector/test/failure_detector.cpp b/src/failure_detector/test/failure_detector.cpp index 1e14ebece0..81eaf2873c 100644 --- a/src/failure_detector/test/failure_detector.cpp +++ b/src/failure_detector/test/failure_detector.cpp @@ -47,17 +47,17 @@ #include "meta/meta_options.h" #include "meta/meta_server_failure_detector.h" #include "replica/replica_stub.h" +#include "rpc/group_host_port.h" +#include "rpc/network.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/group_host_port.h" -#include "runtime/rpc/network.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" #include "runtime/serverlet.h" #include "runtime/service_app.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "task/async_calls.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/enum_helper.h" #include "utils/error_code.h" #include "utils/flags.h" @@ -110,9 +110,10 @@ class worker_fd_test : public ::dsn::dist::slave_failure_detector_with_multimast public: worker_fd_test(replication::replica_stub *stub, std::vector &meta_servers) - : slave_failure_detector_with_multimaster(meta_servers, - [=]() { stub->on_meta_server_disconnected(); }, - [=]() { stub->on_meta_server_connected(); }) + : slave_failure_detector_with_multimaster( + meta_servers, + [=]() { stub->on_meta_server_disconnected(); }, + [=]() { stub->on_meta_server_connected(); }) { _send_ping_switch = false; } @@ -219,6 +220,7 @@ class test_worker : public service_app, public serverlet } worker_fd_test *fd() { return _worker_fd; } + private: worker_fd_test *_worker_fd; }; @@ -241,6 +243,7 @@ class test_master : public service_app for (auto &port : ports) { rpc_address addr(network::get_local_ipv4(), std::stoi(port)); const auto hp = ::dsn::host_port::from_address(addr); + CHECK(hp, "'{}' can not be reverse resolved", addr); _master_fd->add_allow_list(hp); } use_allow_list = true; @@ -256,6 +259,7 @@ class test_master : public service_app error_code stop(bool) override { return ERR_OK; } master_fd_test *fd() { return _master_fd; } + private: master_fd_test *_master_fd; replication::fd_suboptions _opts; diff --git a/src/failure_detector/test/run.sh b/src/failure_detector/test/run.sh index be8861ea2b..067eee1d12 100755 --- a/src/failure_detector/test/run.sh +++ b/src/failure_detector/test/run.sh @@ -40,9 +40,9 @@ while read -r -a line; do echo "run dsn.failure_detector.tests $test_case failed" echo "---- ls ----" ls -l - if find . -name log.1.txt; then - echo "---- tail -n 100 log.1.txt ----" - tail -n 100 `find . -name log.1.txt` + if [ `find . -name pegasus.log.* | wc -l` -ne 0 ]; then + echo "---- tail -n 100 pegasus.log.* ----" + tail -n 100 `find . -name pegasus.log.*` fi if [ -f core ]; then echo "---- gdb ./dsn.failure_detector.tests core ----" diff --git a/src/geo/bench/bench.cpp b/src/geo/bench/bench.cpp index 47687b0b89..42814687d3 100644 --- a/src/geo/bench/bench.cpp +++ b/src/geo/bench/bench.cpp @@ -44,8 +44,9 @@ static const int data_count = 10000; int main(int argc, char **argv) { if (argc < 7) { - std::cerr << "USAGE: " << argv[0] << " " - " [gen_data]" + std::cerr << "USAGE: " << argv[0] + << " " + " [gen_data]" << std::endl; return -1; } diff --git a/src/geo/lib/geo_client.cpp b/src/geo/lib/geo_client.cpp index bc415eeb34..1cbb7b49d6 100644 --- a/src/geo/lib/geo_client.cpp +++ b/src/geo/lib/geo_client.cpp @@ -166,7 +166,7 @@ void geo_client::async_set(const std::string &hash_key, hash_key, sort_key, true, - [ this, hash_key, sort_key, value, timeout_ms, ttl_seconds, cb = std::move(callback) ]( + [this, hash_key, sort_key, value, timeout_ms, ttl_seconds, cb = std::move(callback)]( int ec_, pegasus_client::internal_info &&info_) { if (ec_ != PERR_OK) { cb(ec_, std::move(info_)); @@ -261,7 +261,7 @@ void geo_client::async_get(const std::string &hash_key, _common_data_client->async_get( hash_key, sort_key, - [ this, &hash_key, &sort_key, id, cb = std::move(callback) ]( + [this, &hash_key, &sort_key, id, cb = std::move(callback)]( int ec_, std::string &&value_, pegasus_client::internal_info &&info_) { if (ec_ != PERR_OK) { cb(ec_, id, 0, 0); @@ -316,7 +316,7 @@ void geo_client::async_del(const std::string &hash_key, _common_data_client->async_get( hash_key, sort_key, - [ this, hash_key, sort_key, keep_common_data, timeout_ms, cb = std::move(callback) ]( + [this, hash_key, sort_key, keep_common_data, timeout_ms, cb = std::move(callback)]( int ec_, std::string &&value_, pegasus_client::internal_info &&info_) { if (ec_ == PERR_NOT_FOUND) { if (cb != nullptr) { @@ -510,16 +510,15 @@ void geo_client::async_search_radial(const std::string &hash_key, _common_data_client->async_get( hash_key, sort_key, - [ - this, - hash_key, - sort_key, - radius_m, - count, - sort_type, - timeout_ms, - cb = std::move(callback) - ](int ec_, std::string &&value_, pegasus_client::internal_info &&) mutable { + [this, + hash_key, + sort_key, + radius_m, + count, + sort_type, + timeout_ms, + cb = std::move(callback)]( + int ec_, std::string &&value_, pegasus_client::internal_info &&) mutable { if (ec_ != PERR_OK) { LOG_ERROR("get failed. hash_key={}, sort_key={}, error={}", utils::redact_sensitive_string(hash_key), @@ -566,8 +565,8 @@ void geo_client::async_search_radial(const S2LatLng &latlng, count, sort_type, timeout_ms, - [ this, count, sort_type, cb = std::move(callback) ]( - std::list> && results_) { + [this, count, sort_type, cb = std::move(callback)]( + std::list> &&results_) { std::list result; normalize_result(std::move(results_), count, sort_type, result); cb(PERR_OK, std::move(result)); @@ -605,13 +604,12 @@ void geo_client::async_get_result_from_cells(const S2CellUnion &cids, std::shared_ptr> send_finish = std::make_shared>(false); std::shared_ptr> scan_count = std::make_shared>(0); auto single_scan_finish_callback = - [ send_finish, scan_count, results, cb = std::move(callback) ]() - { - // NOTE: make sure fetch_sub is at first of the if expression to make it always execute - if (scan_count->fetch_sub(1) == 1 && send_finish->load()) { - cb(std::move(*results.get())); - } - }; + [send_finish, scan_count, results, cb = std::move(callback)]() { + // NOTE: make sure fetch_sub is at first of the if expression to make it always execute + if (scan_count->fetch_sub(1) == 1 && send_finish->load()) { + cb(std::move(*results.get())); + } + }; for (const auto &cid : cids) { if (cap_ptr->Contains(S2Cell(cid))) { @@ -873,7 +871,7 @@ void geo_client::start_scan(const std::string &hash_key, start_sort_key, stop_sort_key, options, - [ this, cap_ptr, count, cb = std::move(callback), &result ]( + [this, cap_ptr, count, cb = std::move(callback), &result]( int error_code, pegasus_client::pegasus_scanner *hash_scanner) mutable { if (error_code == PERR_OK) { do_scan(hash_scanner->get_smart_wrapper(), cap_ptr, count, std::move(cb), result); @@ -890,7 +888,7 @@ void geo_client::do_scan(pegasus_client::pegasus_scanner_wrapper scanner_wrapper std::list &result) { scanner_wrapper->async_next( - [ this, cap_ptr, count, scanner_wrapper, cb = std::move(callback), &result ]( + [this, cap_ptr, count, scanner_wrapper, cb = std::move(callback), &result]( int ret, std::string &&geo_hash_key, std::string &&geo_sort_key, @@ -984,9 +982,8 @@ void geo_client::async_distance(const std::string &hash_key1, std::shared_ptr ret = std::make_shared(PERR_OK); std::shared_ptr mutex = std::make_shared(); std::shared_ptr> get_result = std::make_shared>(); - auto async_get_callback = [ =, cb = std::move(callback) ]( - int ec_, std::string &&value_, pegasus_client::internal_info &&) - { + auto async_get_callback = [=, cb = std::move(callback)]( + int ec_, std::string &&value_, pegasus_client::internal_info &&) { if (ec_ != PERR_OK) { LOG_ERROR("get data failed. hash_key1={}, sort_key1={}, hash_key2={}, sort_key2={}, " "error={}", diff --git a/src/geo/lib/geo_client.h b/src/geo/lib/geo_client.h index 1d00dc7fe5..92b23e0f5e 100644 --- a/src/geo/lib/geo_client.h +++ b/src/geo/lib/geo_client.h @@ -28,7 +28,7 @@ #include #include "latlng_codec.h" -#include "runtime/task/task_tracker.h" +#include "task/task_tracker.h" class S2Cap; class S2CellId; diff --git a/src/geo/test/geo_test.cpp b/src/geo/test/geo_test.cpp index e66a251d0d..c99628aae1 100644 --- a/src/geo/test/geo_test.cpp +++ b/src/geo/test/geo_test.cpp @@ -40,7 +40,7 @@ #include "geo/lib/geo_client.h" #include "gtest/gtest.h" #include "pegasus/client.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "utils/blob.h" #include "utils/error_code.h" #include "utils/flags.h" diff --git a/docker/clang-format-3.9/Dockerfile b/src/gutil/CMakeLists.txt similarity index 65% rename from docker/clang-format-3.9/Dockerfile rename to src/gutil/CMakeLists.txt index 2ebea2de62..5f030cdb1e 100644 --- a/docker/clang-format-3.9/Dockerfile +++ b/src/gutil/CMakeLists.txt @@ -15,14 +15,7 @@ # specific language governing permissions and limitations # under the License. -FROM ubuntu:18.04 +# TODO(yingchun): add the project after new *.cpp files have been added. +#set(MY_PROJ_NAME pgs_gutil) -LABEL maintainer=wutao - -RUN sed -i 's/archive.ubuntu.com/mirrors.aliyun.com/' /etc/apt/sources.list \ - && apt-get update -y \ - && apt-get install --no-install-recommends -y software-properties-common \ - && add-apt-repository ppa:git-core/ppa \ - && apt-get update -y \ - && apt-get install --no-install-recommends -y clang-format-3.9 git \ - && rm -rf /var/lib/apt/lists/* +add_subdirectory(test) diff --git a/src/gutil/map_traits.h b/src/gutil/map_traits.h new file mode 100644 index 0000000000..2f3b9e90bf --- /dev/null +++ b/src/gutil/map_traits.h @@ -0,0 +1,80 @@ +// +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#pragma once + +// Traits classes for performing uniform lookup on different map value types. +// +// The access is computed as follows: +// +// 1. If T has a `first` or `second` field, use them. +// 2. Otherwise if it has `key()` or `value()` methods, use them. +// 3. Otherwise the program is ill-formed. + +#include + +namespace gutil { +namespace subtle { +namespace internal_map_traits { +struct Rank1 +{ +}; +struct Rank0 : Rank1 +{ +}; + +template +auto GetKey(V &&v, Rank0) -> decltype((std::forward(v).first)) +{ + return std::forward(v).first; +} +template +auto GetKey(V &&v, Rank1) -> decltype(std::forward(v).key()) +{ + return std::forward(v).key(); +} + +template +auto GetMapped(V &&v, Rank0) -> decltype((std::forward(v).second)) +{ + return std::forward(v).second; +} +template +auto GetMapped(V &&v, Rank1) -> decltype(std::forward(v).value()) +{ + return std::forward(v).value(); +} + +} // namespace internal_map_traits + +// Accesses the `key_type` from a `value_type`. +template +auto GetKey(V &&v) + -> decltype(internal_map_traits::GetKey(std::forward(v), internal_map_traits::Rank0())) +{ + return internal_map_traits::GetKey(std::forward(v), internal_map_traits::Rank0()); +} + +// Accesses the `mapped_type` from a `value_type`. +template +auto GetMapped(V &&v) + -> decltype(internal_map_traits::GetMapped(std::forward(v), internal_map_traits::Rank0())) +{ + return internal_map_traits::GetMapped(std::forward(v), internal_map_traits::Rank0()); +} + +} // namespace subtle +} // namespace gutil diff --git a/src/gutil/map_util.h b/src/gutil/map_util.h new file mode 100644 index 0000000000..4ce286fc07 --- /dev/null +++ b/src/gutil/map_util.h @@ -0,0 +1,717 @@ +// +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#pragma once + +// This file provides utility functions for use with STL map-like data +// structures, such as std::map and hash_map. Some functions will also work with +// sets, such as ContainsKey(). +// +// The main functions in this file fall into the following categories: +// +// - Find*() +// - Contains*() +// - Insert*() +// - Lookup*() +// +// These functions often have "...OrDie" or "...OrDieNoPrint" variants. These +// variants will crash the process with a CHECK() failure on error, including +// the offending key/data in the log message. The NoPrint variants will not +// include the key/data in the log output under the assumption that it's not a +// printable type. +// +// Most functions are fairly self explanatory from their names, with the +// exception of Find*() vs Lookup*(). The Find functions typically use the map's +// .find() member function to locate and return the map's value type. The +// Lookup*() functions typically use the map's .insert() (yes, insert) member +// function to insert the given value if necessary and returns (usually a +// reference to) the map's value type for the found item. +// +// See the per-function comments for specifics. +// +// There are also a handful of functions for doing other miscellaneous things. +// +// A note on terminology: +// +// In this file, `m` and `M` represent a map and its type. +// +// Map-like containers are collections of pairs. Like all STL containers they +// contain a few standard typedefs identifying the types of data they contain. +// Given the following map declaration: +// +// std::map my_map; +// +// the notable typedefs would be as follows: +// +// - key_type -- string +// - value_type -- std::pair +// - mapped_type -- int +// +// Note that the map above contains two types of "values": the key-value pairs +// themselves (value_type) and the values within the key-value pairs +// (mapped_type). A value_type consists of a key_type and a mapped_type. +// +// The documentation below is written for programmers thinking in terms of keys +// and the (mapped_type) values associated with a given key. For example, the +// statement +// +// my_map["foo"] = 3; +// +// has a key of "foo" (type: string) with a value of 3 (type: int). +// + +#include +#include +#include +#include + +#include "absl/meta/type_traits.h" +#include "gutil/map_traits.h" +#include "gutil/no_destructor.h" +#include "utils/fmt_logging.h" + +namespace gutil { + +// These helper template aliases are implementation details of map_util, +// provided for notational convenience. Despite the file-level documentation +// about map typedefs, map_util doesn't actually use them. +// It uses the Map's value_type, and the value_type's first_type and +// second_type. This can matter for nonconformant containers. +template +using MapUtilValueT = typename M::value_type; +template +using MapUtilKeyT = typename MapUtilValueT::first_type; +template +using MapUtilMappedT = typename MapUtilValueT::second_type; + +namespace internal_map_util { + +template +struct HasTryEmplace : std::false_type +{ +}; + +template +struct HasTryEmplace< + M, + absl::void_t().try_emplace(std::declval &>()))>> + : std::true_type +{ +}; + +template +struct InitType +{ + using type = MapUtilValueT; +}; + +template +struct InitType>::value>::type> +{ + using type = typename M::init_type; +}; + +template +const V &ValueInitializedDefault() +{ + static const gutil::NoDestructor value_initialized_default{}; + return *value_initialized_default; +} + +} // namespace internal_map_util + +template +using MapUtilInitT = typename internal_map_util::InitType::type; + +// +// Find*() +// + +// Returns a const reference to the value associated with the given key if it +// exists. Crashes otherwise. +// +// This is intended as a replacement for operator[] as an rvalue (for reading) +// when the key is guaranteed to exist. +// +// operator[] for lookup is discouraged for several reasons (note that these +// reasons may apply to only some map types): +// * It has a side-effect of inserting missing keys +// * It is not thread-safe (even when it is not inserting, it can still +// choose to resize the underlying storage) +// * It invalidates iterators (when it chooses to resize) +// * It default constructs a value object even if it doesn't need to +// +// This version assumes the key is printable, and includes it in the fatal log +// message. +template > +const MapUtilMappedT &FindOrDie(const M &m, const KeyType &key) +{ + auto it = m.find(key); + CHECK(it != m.end(), "Map key not found: {}", key); + return gutil::subtle::GetMapped(*it); +} + +// Same as above, but returns a non-const reference. +template > +MapUtilMappedT &FindOrDie(M &m, // NOLINT + const KeyType &key) +{ + auto it = m.find(key); + CHECK(it != m.end(), "Map key not found: {}", key); + return gutil::subtle::GetMapped(*it); +} + +// Same as FindOrDie above, but doesn't log the key on failure. +template > +const MapUtilMappedT &FindOrDieNoPrint(const M &m, const KeyType &key) +{ + auto it = m.find(key); + CHECK(it != m.end(), "Map key not found"); + return gutil::subtle::GetMapped(*it); +} + +// Same as above, but returns a non-const reference. +template > +MapUtilMappedT &FindOrDieNoPrint(M &m, // NOLINT + const KeyType &key) +{ + auto it = m.find(key); + CHECK(it != m.end(), "Map key not found"); + return gutil::subtle::GetMapped(*it); +} + +// Returns a const reference to the value associated with the given key if it +// exists, otherwise returns a const reference to a value-initialized object +// that is never destroyed. +template > +const MapUtilMappedT &FindWithDefault(const M &m, const KeyType &key) +{ + auto it = m.find(key); + if (it != m.end()) + return gutil::subtle::GetMapped(*it); + return internal_map_util::ValueInitializedDefault>(); +} + +// Returns a const reference to the value associated with the given key if it +// exists, otherwise returns a const reference to the provided default value. +// +// Prefer the two-argument form unless you need to specify a custom default +// value (i.e., one that is not equal to a value-initialized instance). +// +// WARNING: If a temporary object is passed as the default "value," +// this function will return a reference to that temporary object, +// which will be destroyed at the end of the statement. A common +// example: if you have a map with string values, and you pass a char* +// as the default "value," either use the returned value immediately +// or store it in a string (not string&). +// +// TODO: Stop using this. +template +const MapUtilMappedT & +FindWithDefault(const M &m, const MapUtilKeyT &key, const MapUtilMappedT &value) +{ + auto it = m.find(key); + if (it != m.end()) + return gutil::subtle::GetMapped(*it); + return value; +} + +// Returns a pointer to the const value associated with the given key if it +// exists, or null otherwise. +template > +const MapUtilMappedT *FindOrNull(const M &m, const KeyType &key) +{ + auto it = m.find(key); + if (it == m.end()) + return nullptr; + return &gutil::subtle::GetMapped(*it); +} + +// Returns a pointer to the non-const value associated with the given key if it +// exists, or null otherwise. +template > +MapUtilMappedT *FindOrNull(M &m, // NOLINT + const KeyType &key) +{ + auto it = m.find(key); + if (it == m.end()) + return nullptr; + return &gutil::subtle::GetMapped(*it); +} + +// Returns the pointer value associated with the given key. If none is found, +// null is returned. The function is designed to be used with a map of keys +// to pointers. +// +// This function does not distinguish between a missing key and a key mapped +// to a null value. +template > +MapUtilMappedT FindPtrOrNull(const M &m, const KeyType &key) +{ + auto it = m.find(key); + if (it == m.end()) + return MapUtilMappedT(); + return gutil::subtle::GetMapped(*it); +} + +// Same as above, except takes non-const reference to m. +// +// This function is needed for containers that propagate constness to the +// pointee, such as boost::ptr_map. +template > +MapUtilMappedT FindPtrOrNull(M &m, // NOLINT + const KeyType &key) +{ + auto it = m.find(key); + if (it == m.end()) + return MapUtilMappedT(); + return gutil::subtle::GetMapped(*it); +} + +// Finds the value associated with the given key and copies it to *value (if +// non-null). Returns false if the key was not found, true otherwise. +template +bool FindCopy(const M &m, const Key &key, Value *value) +{ + auto it = m.find(key); + if (it == m.end()) + return false; + if (value) + *value = gutil::subtle::GetMapped(*it); + return true; +} + +// +// Contains*() +// + +// Returns true if and only if the given m contains the given key. +template +bool ContainsKey(const M &m, const Key &key) +{ + return m.find(key) != m.end(); +} + +// Returns true if and only if the given m contains the given key-value +// pair. +template +bool ContainsKeyValuePair(const M &m, const Key &key, const Value &value) +{ + auto range = m.equal_range(key); + for (auto it = range.first; it != range.second; ++it) { + if (gutil::subtle::GetMapped(*it) == value) { + return true; + } + } + return false; +} + +// +// Insert*() +// + +// Inserts the given key-value pair into the m. Returns true if and +// only if the key from the given pair didn't previously exist. Otherwise, the +// value in the map is replaced with the value from the given pair. +template +bool InsertOrUpdate(M *m, const MapUtilInitT &vt) +{ + auto ret = m->insert(vt); + if (ret.second) + return true; + subtle::GetMapped(*ret.first) = subtle::GetMapped(vt); // update + return false; +} + +// Same as above, except that the key and value are passed separately. +template +bool InsertOrUpdate(M *m, const MapUtilKeyT &key, const MapUtilMappedT &value) +{ + return InsertOrUpdate(m, {key, value}); +} + +// Inserts/updates all the key-value pairs from the range defined by the +// iterators "first" and "last" into the given m. +template +void InsertOrUpdateMany(M *m, InputIterator first, InputIterator last) +{ + for (; first != last; ++first) { + InsertOrUpdate(m, *first); + } +} + +// Change the value associated with a particular key in a map or hash_map +// of the form std::map which owns the objects pointed to by the +// value pointers. If there was an existing value for the key, it is deleted. +// True indicates an insert took place, false indicates an update + delete. +template +bool InsertAndDeleteExisting(M *m, const MapUtilKeyT &key, const MapUtilMappedT &value) +{ + auto ret = m->insert(MapUtilValueT(key, value)); + if (ret.second) + return true; + delete ret.first->second; + ret.first->second = value; + return false; +} + +// Inserts the given key and value into the given m if and only if the +// given key did NOT already exist in the m. If the key previously +// existed in the m, the value is not changed. Returns true if the +// key-value pair was inserted; returns false if the key was already present. +template +bool InsertIfNotPresent(M *m, const MapUtilInitT &vt) +{ + return m->insert(vt).second; +} + +// Same as above except the key and value are passed separately. +template +bool InsertIfNotPresent(M *m, const MapUtilKeyT &key, const MapUtilMappedT &value) +{ + return InsertIfNotPresent(m, {key, value}); +} + +// Same as above except dies if the key already exists in the m. +template +void InsertOrDie(M *m, const MapUtilInitT &value) +{ + CHECK(InsertIfNotPresent(m, value), "duplicate value: {}", value); +} + +// Same as above except doesn't log the value on error. +template +void InsertOrDieNoPrint(M *m, const MapUtilInitT &value) +{ + CHECK(InsertIfNotPresent(m, value), "duplicate value."); +} + +// Inserts the key-value pair into the m. Dies if key was already +// present. +template +void InsertOrDie(M *m, const MapUtilKeyT &key, const MapUtilMappedT &data) +{ + CHECK(InsertIfNotPresent(m, key, data), "duplicate key: {}", key); +} + +// Same as above except doesn't log the key on error. +template +void InsertOrDieNoPrint(M *m, const MapUtilKeyT &key, const MapUtilMappedT &data) +{ + CHECK(InsertIfNotPresent(m, key, data), "duplicate key."); +} + +// Inserts a new key and default-initialized value. Dies if the key was already +// present. Returns a reference to the value. Example usage: +// +// std::map m; +// SomeProto& proto = InsertKeyOrDie(&m, 3); +// proto.set_field("foo"); +template +auto InsertKeyOrDie(M *m, const MapUtilKeyT &key) -> + typename std::enable_if::value, MapUtilMappedT &>::type +{ + auto res = m->try_emplace(key); + CHECK(res.second, "duplicate key: {}", key); + return gutil::subtle::GetMapped(*res.first); +} + +// Anything without try_emplace, we support with the legacy code path. +template +auto InsertKeyOrDie(M *m, const MapUtilKeyT &key) -> + typename std::enable_if::value, MapUtilMappedT &>::type +{ + auto res = m->insert(MapUtilValueT(key, MapUtilMappedT())); + CHECK(res.second, "duplicate key: {}", key); + return res.first->second; +} + +// +// Lookup*() +// + +// Looks up a given key and value pair in m and inserts the key-value pair if +// it's not already present. Returns a reference to the value associated with +// the key. +template +MapUtilMappedT &LookupOrInsert(M *m, const MapUtilInitT &vt) +{ + return subtle::GetMapped(*m->insert(vt).first); +} + +// Same as above except the key-value are passed separately. +template +MapUtilMappedT &LookupOrInsert(M *m, const MapUtilKeyT &key, const MapUtilMappedT &value) +{ + return LookupOrInsert(m, {key, value}); +} + +// Returns a reference to the pointer associated with key. If not found, a +// pointee is constructed and added to the map. In that case, the new pointee is +// forwarded constructor arguments; when no arguments are provided the default +// constructor is used. +// +// Useful for containers of the form Map, where Ptr is pointer-like. +template +MapUtilMappedT &LookupOrInsertNew(M *m, const MapUtilKeyT &key, Args &&...args) +{ + using Mapped = MapUtilMappedT; + using MappedDeref = decltype(*std::declval()); + using Element = typename std::decay::type; + auto ret = m->insert(MapUtilValueT(key, Mapped())); + if (ret.second) { + ret.first->second = Mapped(new Element(std::forward(args)...)); + } + return ret.first->second; +} + +// +// Misc Utility Functions +// + +// Updates the value associated with the given key. If the key was not already +// present, then the key-value pair are inserted and "previous" is unchanged. If +// the key was already present, the value is updated and "*previous" will +// contain a copy of the old value. +// +// Returns true if and only if there was an already existing value. +// +// InsertOrReturnExisting has complementary behavior that returns the +// address of an already existing value, rather than updating it. + +template +bool UpdateReturnCopy(M *m, const MapUtilValueT &vt, MapUtilMappedT *previous) +{ + auto ret = m->insert(vt); + if (ret.second) + return false; + if (previous) + *previous = ret.first->second; + ret.first->second = vt.second; // update + return true; +} + +// Same as above except that the key and mapped value are passed separately. +template +bool UpdateReturnCopy(M *m, + const MapUtilKeyT &key, + const MapUtilMappedT &value, + MapUtilMappedT *previous) +{ + return UpdateReturnCopy(m, MapUtilValueT(key, value), previous); +} + +// Tries to insert the given key-value pair into the m. Returns null +// if the insert succeeds. Otherwise, returns a pointer to the existing value. +// +// This complements UpdateReturnCopy in that it allows to update only after +// verifying the old value and still insert quickly without having to look up +// twice. Unlike UpdateReturnCopy this also does not come with the issue of an +// undefined previous* in case new data was inserted. +template +MapUtilMappedT *InsertOrReturnExisting(M *m, const MapUtilValueT &vt) +{ + auto ret = m->insert(vt); + if (ret.second) + return nullptr; // Inserted, no previous value. + return &ret.first->second; // Return address of previous value. +} + +// Same as above, except for explicit key and data. +template +MapUtilMappedT * +InsertOrReturnExisting(M *m, const MapUtilKeyT &key, const MapUtilMappedT &data) +{ + return InsertOrReturnExisting(m, MapUtilValueT(key, data)); +} + +// Saves the reverse mapping into reverse. Returns true if values could all be +// inserted. +template +bool ReverseMap(const M &m, ReverseM *reverse) +{ + CHECK_NOTNULL(reverse, ""); + bool all_unique = true; + for (const auto &kv : m) { + if (!InsertOrUpdate(reverse, kv.second, kv.first)) { + all_unique = false; + } + } + return all_unique; +} + +// Like ReverseMap above, but returns its output m. Return type has to +// be specified explicitly. Example: +// M::M(...) : m_(...), r_(ReverseMap(m_)) {} +template +ReverseM ReverseMap(const M &m) +{ + typename std::remove_const::type reverse; + ReverseMap(m, &reverse); + return reverse; +} + +// Erases the m item identified by the given key, and returns the value +// associated with that key. It is assumed that the value (i.e., the +// mapped_type) is a pointer. Returns null if the key was not found in the +// m. +// +// Examples: +// std::map my_map; +// +// One line cleanup: +// delete EraseKeyReturnValuePtr(&my_map, "abc"); +// +// Use returned value: +// std::unique_ptr value_ptr( +// EraseKeyReturnValuePtr(&my_map, "abc")); +// if (value_ptr.get()) +// value_ptr->DoSomething(); +// +template +MapUtilMappedT EraseKeyReturnValuePtr(M *m, const MapUtilKeyT &key) +{ + auto it = m->find(key); + if (it == m->end()) + return nullptr; + MapUtilMappedT v = std::move(gutil::subtle::GetMapped(*it)); + m->erase(it); + return v; +} + +// Inserts all the keys from m into key_container, which must +// support insert(M::key_type). +// +// Note: any initial contents of the key_container are not cleared. +template +void InsertKeysFromMap(const M &m, KeyContainer *key_container) +{ + CHECK_NOTNULL(key_container, ""); + for (const auto &kv : m) { + key_container->insert(kv.first); + } +} + +// Appends all the keys from m into key_container, which must +// support push_back(M::key_type). +// +// Note: any initial contents of the key_container are not cleared. +template +void AppendKeysFromMap(const M &m, KeyContainer *key_container) +{ + CHECK_NOTNULL(key_container, ""); + for (const auto &kv : m) { + key_container->push_back(kv.first); + } +} + +// A more specialized overload of AppendKeysFromMap to optimize reallocations +// for the common case in which we're appending keys to a vector and hence can +// (and sometimes should) call reserve() first. +// +// (It would be possible to play SFINAE games to call reserve() for any +// m that supports it, but this seems to get us 99% of what we need +// without the complexity of a SFINAE-based solution.) +template +void AppendKeysFromMap(const M &m, std::vector *key_container) +{ + CHECK_NOTNULL(key_container, ""); + // We now have the opportunity to call reserve(). Calling reserve() every + // time is a bad idea for some use cases: libstdc++'s implementation of + // std::vector<>::reserve() resizes the vector's backing store to exactly the + // given size (unless it's already at least that big). Because of this, + // the use case that involves appending a lot of small maps (total size + // N) one by one to a vector would be O(N^2). But never calling reserve() + // loses the opportunity to improve the use case of adding from a large + // map to an empty vector (this improves performance by up to 33%). A + // number of heuristics are possible. Here we use the simplest one. + if (key_container->empty()) { + key_container->reserve(m.size()); + } + for (const auto &kv : m) { + key_container->push_back(kv.first); + } +} + +// Inserts all the values from m into value_container, which must +// support push_back(M::mapped_type). +// +// Note: any initial contents of the value_container are not cleared. +template +void AppendValuesFromMap(const M &m, ValueContainer *value_container) +{ + CHECK_NOTNULL(value_container, ""); + for (const auto &kv : m) { + value_container->push_back(kv.second); + } +} + +// A more specialized overload of AppendValuesFromMap to optimize reallocations +// for the common case in which we're appending values to a vector and hence +// can (and sometimes should) call reserve() first. +// +// (It would be possible to play SFINAE games to call reserve() for any +// m that supports it, but this seems to get us 99% of what we need +// without the complexity of a SFINAE-based solution.) +template +void AppendValuesFromMap(const M &m, std::vector *value_container) +{ + CHECK_NOTNULL(value_container, ""); + // See AppendKeysFromMap for why this is done. + if (value_container->empty()) { + value_container->reserve(m.size()); + } + for (const auto &kv : m) { + value_container->push_back(kv.second); + } +} + +// Erases all elements of m where predicate evaluates to true. +// Note: To avoid unnecessary temporary copies of map elements passed to the +// predicate, the predicate must accept 'const M::value_type&'. +// In particular, the value type for a map is 'std::pair', and so a +// predicate accepting 'std::pair' will result in temporary copies. +template +auto AssociativeEraseIf(M *m, Predicate predicate) -> + typename std::enable_iferase(m->begin()))>::value>::type +{ + CHECK_NOTNULL(m, ""); + for (auto it = m->begin(); it != m->end();) { + if (predicate(*it)) { + m->erase(it++); + } else { + ++it; + } + } +} + +template +auto AssociativeEraseIf(M *m, Predicate predicate) -> typename std::enable_if< + std::is_samebegin()), decltype(m->erase(m->begin()))>::value>::type +{ + CHECK_NOTNULL(m, ""); + for (auto it = m->begin(); it != m->end();) { + if (predicate(*it)) { + it = m->erase(it); + } else { + ++it; + } + } +} + +} // namespace gutil diff --git a/src/gutil/no_destructor.h b/src/gutil/no_destructor.h new file mode 100644 index 0000000000..d693fe98d1 --- /dev/null +++ b/src/gutil/no_destructor.h @@ -0,0 +1,114 @@ +// +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#pragma once + +#include +#include +#include + +namespace gutil { + +// NoDestructor is a wrapper around an object of type T that +// * stores the object of type T inline inside NoDestructor +// * eagerly forwards constructor arguments to it (i.e. acts like T in terms +// of construction) +// * provides access to the object of type T like a pointer via ->, *, and get() +// (note that const NoDestructor works like a pointer to const T) +// * never calls T's destructor for the object +// (hence NoDestructor objects created on the stack or as member variables +// will lead to memory and/or resource leaks) +// +// One key use case of NoDestructor (which in itself is not lazy) is optimizing +// the following pattern of safe on-demand construction of an object with +// non-trivial constructor in static storage without destruction ever happening: +// const string& MyString() { +// static string* x = new string("foo"); // note the "static" +// return *x; +// } +// By using NoDestructor we do not need to involve heap allocation and +// corresponding pointer following (and hence extra CPU cache usage/needs) +// on each access: +// const string& MyString() { +// static NoDestructor x("foo"); +// return *x; +// } +// Since C++11 this static-in-a-function pattern results in exactly-once, +// thread-safe, on-demand construction of an object, and very fast access +// thereafter (the cost is a few extra cycles). +// NoDestructor makes accesses even faster by storing the object inline in +// static storage. +// +// Note that: +// * Since destructor is never called, the object lives on during program exit +// and can be safely accessed by any threads that have not been joined. +// +// Also note that +// static NoDestructor ptr(whatever); +// can safely replace +// static NonPOD* ptr = new NonPOD(whatever); +// or +// static NonPOD obj(whatever); +// at file-level scope when the safe static-in-a-function pattern is infeasible +// to use for some good reason. +// All three of the NonPOD patterns above suffer from the same issue that +// initialization of that object happens non-thread-safely at +// a globally-undefined point during initialization of static-storage objects, +// but NoDestructor<> usage provides both the safety of having the object alive +// during program exit sequence and the performance of not doing extra memory +// dereference on access. +// +template +class NoDestructor +{ +public: + typedef T element_type; + + // Forwards arguments to the T's constructor: calls T(args...). + template ::type...), void(NoDestructor)>::value, + int>::type = 0> + explicit NoDestructor(Ts &&...args) + { + new (&space_) T(std::forward(args)...); + } + + // Forwards copy and move construction for T. Enables usage like this: + // static NoDestructor> x{{{"1", "2", "3"}}}; + // static NoDestructor> x{{1, 2, 3}}; + explicit NoDestructor(const T &x) { new (&space_) T(x); } + explicit NoDestructor(T &&x) { new (&space_) T(std::move(x)); } + + // No copying. + NoDestructor(const NoDestructor &) = delete; + NoDestructor &operator=(const NoDestructor &) = delete; + + // Pretend to be a smart pointer to T with deep constness. + // Never returns a null pointer. + T &operator*() { return *get(); } + T *operator->() { return get(); } + T *get() { return reinterpret_cast(&space_); } + const T &operator*() const { return *get(); } + const T *operator->() const { return get(); } + const T *get() const { return reinterpret_cast(&space_); } + +private: + typename std::aligned_storage::type space_; +}; + +} // namespace gutil diff --git a/src/gutil/test/CMakeLists.txt b/src/gutil/test/CMakeLists.txt new file mode 100644 index 0000000000..75e489cd73 --- /dev/null +++ b/src/gutil/test/CMakeLists.txt @@ -0,0 +1,36 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set(MY_PROJ_NAME pgs_gutil_test) +set(MY_PROJ_SRC "") +set(MY_SRC_SEARCH_MODE "GLOB") +set(MY_PROJ_LIBS + absl::btree + absl::flat_hash_map + absl::node_hash_map + dsn_runtime + dsn_utils + rocksdb + lz4 + zstd + snappy + gmock + gtest) +set(MY_BOOST_LIBS Boost::system Boost::filesystem) +set(MY_BINPLACES + run.sh) +dsn_add_test() diff --git a/src/gutil/test/main.cpp b/src/gutil/test/main.cpp new file mode 100644 index 0000000000..90a402ea3a --- /dev/null +++ b/src/gutil/test/main.cpp @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include + +#include "runtime/app_model.h" + +GTEST_API_ int main(int argc, char **argv) +{ + testing::InitGoogleTest(&argc, argv); + dsn_exit(RUN_ALL_TESTS()); +} diff --git a/src/gutil/test/map_traits_test.cpp b/src/gutil/test/map_traits_test.cpp new file mode 100644 index 0000000000..9da1bdced5 --- /dev/null +++ b/src/gutil/test/map_traits_test.cpp @@ -0,0 +1,78 @@ +// +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "gutil/map_traits.h" + +#include +#include + +#include "absl/container/node_hash_map.h" +#include "gtest/gtest.h" + +namespace gutil { +namespace subtle { +namespace { + +TEST(MapTraitsTest, UnorderedMap) +{ + absl::node_hash_map m = {{1, 2}}; + EXPECT_EQ(1, GetKey(*m.begin())); + EXPECT_EQ(2, GetMapped(*m.begin())); +} + +TEST(MapTraitsTest, UnorderedMapReferences) +{ + absl::node_hash_map m = {{1, 2}}; + auto it = m.begin(); + const int *k = &it->first; + int *v = &it->second; + EXPECT_EQ(k, &GetKey(*it)); + EXPECT_EQ(v, &GetMapped(*it)); + GetMapped(*it) = 3; + EXPECT_EQ(3, m[1]); +} + +TEST(MapTraitsTest, UnorderedMapConstReferences) +{ + const absl::node_hash_map m = {{1, 2}}; + auto it = m.begin(); + const int *k = &it->first; + const int *v = &it->second; + EXPECT_EQ(k, &GetKey(*it)); + EXPECT_EQ(v, &GetMapped(*it)); +} + +struct CustomMapValueType +{ + int first; + int second; + + // Intentionally add 1 to the result to verify that first/second are preferred + // to key()/value(). + int key() const { return first + 1; } + int value() const { return second + 1; } +}; + +TEST(MapTraitsTest, ValueTypeHasBothFieldsAndGetters) +{ + CustomMapValueType entry = {100, 1000}; + EXPECT_EQ(100, GetKey(entry)); + EXPECT_EQ(1000, GetMapped(entry)); +} + +} // namespace +} // namespace subtle +} // namespace gutil diff --git a/src/gutil/test/map_util_test.h b/src/gutil/test/map_util_test.h new file mode 100644 index 0000000000..655dad0f11 --- /dev/null +++ b/src/gutil/test/map_util_test.h @@ -0,0 +1,475 @@ +// +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#pragma once + +// Contains map_util tests templated on STL std::map-like types. + +#include +#include +#include + +#include "gtest/gtest.h" +#include "gutil/map_util.h" +#include "gutil/test/map_util_test.h" + +namespace gutil { + +template +class MapUtilIntIntTest : public ::testing::Test +{ +}; +TYPED_TEST_SUITE_P(MapUtilIntIntTest); + +template +class MapUtilIntIntPtrTest : public ::testing::Test +{ +}; +TYPED_TEST_SUITE_P(MapUtilIntIntPtrTest); + +template +class MapUtilIntIntSharedPtrTest : public ::testing::Test +{ +}; +TYPED_TEST_SUITE_P(MapUtilIntIntSharedPtrTest); + +template +class MapUtilIntIntSharedPtrOnlyTest : public ::testing::Test +{ +}; +TYPED_TEST_SUITE_P(MapUtilIntIntSharedPtrOnlyTest); + +template +class MultiMapUtilIntIntTest : public ::testing::Test +{ +}; +TYPED_TEST_SUITE_P(MultiMapUtilIntIntTest); + +TYPED_TEST_P(MapUtilIntIntTest, ValueMapTests) +{ + using Map = TypeParam; + Map m; + + // Check that I get a default when the key is not present. + EXPECT_EQ(0, FindWithDefault(m, 0, 0)); + EXPECT_EQ(0, FindWithDefault(m, 0)); + // Check that I can insert a value. + EXPECT_TRUE(InsertOrUpdate(&m, 0, 1)); + // .. and get that value back. + EXPECT_EQ(1, FindWithDefault(m, 0, 0)); + EXPECT_EQ(1, FindWithDefault(m, 0)); + // Check that I can update a value. + EXPECT_FALSE(InsertOrUpdate(&m, 0, 2)); + // .. and get that value back. + EXPECT_EQ(2, FindWithDefault(m, 0, 0)); + EXPECT_EQ(2, FindWithDefault(m, 0)); + // Check that FindOrDie works when the value exists. + EXPECT_EQ(2, FindOrDie(m, 0)); + EXPECT_EQ(2, FindOrDieNoPrint(m, 0)); + + // Check FindCopy + int i = 0; + EXPECT_TRUE(FindCopy(m, 0, &i)); + EXPECT_EQ(i, 2); + EXPECT_FALSE(FindCopy(m, 1, &i)); + EXPECT_TRUE(FindCopy(m, 0, static_cast(nullptr))); + EXPECT_FALSE(FindCopy(m, 1, static_cast(nullptr))); + + // Check FindOrNull + int *p1 = FindOrNull(m, 0); + ASSERT_EQ(*p1, 2); + ++(*p1); + const int *p2 = FindOrNull(const_cast(m), 0); + ASSERT_EQ(*p2, 3); + ASSERT_TRUE(FindOrNull(m, 1) == nullptr); + + // Check contains + EXPECT_TRUE(ContainsKey(m, 0)); + EXPECT_FALSE(ContainsKey(m, 1)); + + // Check ContainsKeyValuePair + EXPECT_TRUE(ContainsKeyValuePair(m, 0, 3)); + EXPECT_FALSE(ContainsKeyValuePair(m, 0, 4)); + EXPECT_FALSE(ContainsKeyValuePair(m, 1, 0)); + + // Check insert if not present + EXPECT_FALSE(InsertIfNotPresent(&m, 0, 2)); + EXPECT_TRUE(InsertIfNotPresent(&m, 1, 3)); + + // Check lookup or insert + EXPECT_EQ(3, LookupOrInsert(&m, 0, 2)); + EXPECT_EQ(4, LookupOrInsert(&m, 2, 4)); + EXPECT_EQ(4, FindWithDefault(m, 2, 0)); + EXPECT_EQ(4, FindWithDefault(m, 2)); + + EXPECT_FALSE(InsertOrUpdate(&m, typename Map::value_type(0, 2))); + EXPECT_EQ(2, FindWithDefault(m, 0, 0)); + EXPECT_EQ(2, FindWithDefault(m, 0)); + + // Check InsertOrUpdateMany + std::vector> entries; + entries.push_back(std::make_pair(0, 100)); + entries.push_back(std::make_pair(100, 101)); + entries.push_back(std::make_pair(200, 102)); + + InsertOrUpdateMany(&m, entries.begin(), entries.end()); + EXPECT_EQ(100, FindWithDefault(m, 0, 0)); + EXPECT_EQ(100, FindWithDefault(m, 0)); + EXPECT_EQ(101, FindWithDefault(m, 100, 0)); + EXPECT_EQ(101, FindWithDefault(m, 100)); + EXPECT_EQ(102, FindWithDefault(m, 200, 0)); + EXPECT_EQ(102, FindWithDefault(m, 200)); +} + +TYPED_TEST_P(MapUtilIntIntPtrTest, LookupOrInsertNewTest) +{ + using PtrMap = TypeParam; + PtrMap m; + int *v1, *v2, *v3, *v4; + + // Check inserting one item. + v1 = LookupOrInsertNew(&m, 7); + EXPECT_EQ(0, *v1); + ASSERT_TRUE(v1 != nullptr); + EXPECT_TRUE(ContainsKey(m, 7)); + EXPECT_EQ(m.size(), 1); + + // Check inserting the same item. + v2 = LookupOrInsertNew(&m, 7); + ASSERT_TRUE(v2 != nullptr); + EXPECT_EQ(v1, v2); + EXPECT_EQ(m.size(), 1); + + // Check a couple more items. + v1 = LookupOrInsertNew(&m, 8); + ASSERT_TRUE(v1 != nullptr); + EXPECT_NE(v1, v2); + EXPECT_TRUE(ContainsKey(m, 8)); + EXPECT_EQ(m.size(), 2); + + v2 = LookupOrInsertNew(&m, 8); + EXPECT_EQ(v1, v2); + EXPECT_EQ(m.size(), 2); + + v3 = LookupOrInsertNew(&m, 8, 88); + EXPECT_NE(88, *v3); + EXPECT_EQ(v3, v2); + EXPECT_EQ(m.size(), 2); + + v4 = LookupOrInsertNew(&m, 9, 99); + EXPECT_EQ(99, *v4); + EXPECT_NE(v1, v4); + EXPECT_NE(v2, v4); + EXPECT_NE(v3, v4); + EXPECT_EQ(m.size(), 3); + + // Return by reference, so that the stored value can be modified in the map. + // We check this by verifying the address of the returned value is identical. + EXPECT_EQ(&LookupOrInsertNew(&m, 9), &LookupOrInsertNew(&m, 9, 999)); + + for (auto &kv : m) + delete kv.second; +} + +TYPED_TEST_P(MapUtilIntIntSharedPtrTest, LookupOrInsertNewSharedPtrTest) +{ + using SharedPtrMap = TypeParam; + using SharedPtr = typename TypeParam::value_type::second_type; + SharedPtrMap m; + SharedPtr v1, v2, v3, v4, v5; + + // Check inserting one item. + v1 = LookupOrInsertNew(&m, 7); + ASSERT_TRUE(v1.get() != nullptr); + EXPECT_TRUE(ContainsKey(m, 7)); + EXPECT_EQ(m.size(), 1); + *v1 = 25; + + // Check inserting the same item. + v2 = LookupOrInsertNew(&m, 7); + ASSERT_TRUE(v2.get() != nullptr); + EXPECT_EQ(v1.get(), v2.get()); + EXPECT_EQ(m.size(), 1); + EXPECT_EQ(25, *v2.get()); + + // Check a couple more items. + v2 = LookupOrInsertNew(&m, 8); + ASSERT_TRUE(v2.get() != nullptr); + EXPECT_NE(v1.get(), v2.get()); + EXPECT_TRUE(ContainsKey(m, 8)); + EXPECT_EQ(m.size(), 2); + *v2 = 42; + + v3 = LookupOrInsertNew(&m, 8); + EXPECT_NE(v1.get(), v2.get()); + EXPECT_EQ(v2.get(), v3.get()); + EXPECT_EQ(m.size(), 2); + EXPECT_EQ(25, *v1.get()); + EXPECT_EQ(42, *v2.get()); + EXPECT_EQ(42, *v3.get()); + + m.clear(); + // Since the container does not own the elements and because we still have the + // shared pointers we can still access the old values. + v3 = LookupOrInsertNew(&m, 7); + EXPECT_NE(v1.get(), v3.get()); + EXPECT_NE(v2.get(), v3.get()); + EXPECT_EQ(m.size(), 1); + EXPECT_EQ(25, *v1.get()); + EXPECT_EQ(42, *v2.get()); + EXPECT_EQ(0, *v3.get()); // Also checks for default init of POD elements + + v4 = LookupOrInsertNew(&m, 7, 77); + EXPECT_NE(v1.get(), v4.get()); + EXPECT_NE(v2.get(), v4.get()); + EXPECT_EQ(v3.get(), v4.get()); + EXPECT_EQ(m.size(), 1); + EXPECT_EQ(25, *v1.get()); + EXPECT_EQ(42, *v2.get()); + EXPECT_EQ(0, *v3.get()); + EXPECT_EQ(0, *v4.get()); + + v5 = LookupOrInsertNew(&m, 8, 88); + EXPECT_NE(v1.get(), v5.get()); + EXPECT_NE(v2.get(), v5.get()); + EXPECT_NE(v3.get(), v5.get()); + EXPECT_NE(v4.get(), v5.get()); + EXPECT_EQ(m.size(), 2); + EXPECT_EQ(25, *v1.get()); + EXPECT_EQ(42, *v2.get()); + EXPECT_EQ(0, *v3.get()); + EXPECT_EQ(0, *v4.get()); + EXPECT_EQ(88, *v5.get()); +} + +TYPED_TEST_P(MapUtilIntIntSharedPtrOnlyTest, LookupOrInsertNewSharedPtrSwapTest) +{ + using SharedPtrMap = TypeParam; + using SharedPtr = typename TypeParam::value_type::second_type; + SharedPtrMap m; + SharedPtr v1, v2, v3, v4; + + v1.reset(new int(1)); + LookupOrInsertNew(&m, 11).swap(v1); + EXPECT_TRUE(v1.get() != nullptr); + EXPECT_EQ(0, *v1.get()); // The element created by LookupOrInsertNew + EXPECT_TRUE(ContainsKey(m, 11)); + EXPECT_EQ(1, m.size()); + // If the functions does not correctly return by ref then v2 will contain 0 + // instead of 1 even though v2 still points to the held entry. The tests that + // depend on return by ref use ASSERT_*(). + v2 = LookupOrInsertNew(&m, 11); + ASSERT_EQ(1, *v2.get()); + EXPECT_EQ(v2.get(), LookupOrInsertNew(&m, 11).get()); + + *v2 = 2; + v3 = LookupOrInsertNew(&m, 11); + EXPECT_EQ(2, *v2.get()); + EXPECT_EQ(2, *v3.get()); + ASSERT_NE(v1.get(), v2.get()); + EXPECT_EQ(v2.get(), v3.get()); + ASSERT_NE(v1.get(), LookupOrInsertNew(&m, 11).get()); + EXPECT_EQ(v2.get(), LookupOrInsertNew(&m, 11).get()); + EXPECT_EQ(v3.get(), LookupOrInsertNew(&m, 11).get()); + + v4.reset(new int(4)); + LookupOrInsertNew(&m, 11).swap(v4); + EXPECT_EQ(2, *v4.get()); + ASSERT_EQ(4, *LookupOrInsertNew(&m, 11).get()); + ASSERT_EQ(v3.get(), v4.get()); +} + +TYPED_TEST_P(MapUtilIntIntPtrTest, InsertAndDeleteExistingTest) +{ + using PtrMap = TypeParam; + PtrMap m; + + // Add a few items. + int *v1 = new int; + int *v2 = new int; + int *v3 = new int; + EXPECT_TRUE(InsertAndDeleteExisting(&m, 1, v1)); + EXPECT_TRUE(InsertAndDeleteExisting(&m, 2, v2)); + EXPECT_TRUE(InsertAndDeleteExisting(&m, 3, v3)); + EXPECT_EQ(v1, FindPtrOrNull(m, 1)); + EXPECT_EQ(v2, FindPtrOrNull(m, 2)); + EXPECT_EQ(v3, FindPtrOrNull(m, 3)); + + // Replace a couple. + int *v4 = new int; + int *v5 = new int; + EXPECT_FALSE(InsertAndDeleteExisting(&m, 1, v4)); + EXPECT_FALSE(InsertAndDeleteExisting(&m, 2, v5)); + EXPECT_EQ(v4, FindPtrOrNull(m, 1)); + EXPECT_EQ(v5, FindPtrOrNull(m, 2)); + EXPECT_EQ(v3, FindPtrOrNull(m, 3)); + + // Add one more item. + int *v6 = new int; + EXPECT_TRUE(InsertAndDeleteExisting(&m, 6, v6)); + EXPECT_EQ(v4, FindPtrOrNull(m, 1)); + EXPECT_EQ(v5, FindPtrOrNull(m, 2)); + EXPECT_EQ(v3, FindPtrOrNull(m, 3)); + EXPECT_EQ(v6, FindPtrOrNull(m, 6)); + + // 6 total allocations, this will only delete 4. Heap-check will fail + // here if the existing entries weren't properly deleted. + EXPECT_EQ(4, m.size()); + for (auto &kv : m) + delete kv.second; +} + +TYPED_TEST_P(MapUtilIntIntTest, UpdateReturnCopyTest) +{ + using Map = TypeParam; + Map m; + + int p = 10; + EXPECT_FALSE(UpdateReturnCopy(&m, 0, 5, &p)); + EXPECT_EQ(10, p); + + EXPECT_TRUE(UpdateReturnCopy(&m, 0, 7, &p)); + EXPECT_EQ(5, p); + + // Check UpdateReturnCopy using value_type + p = 10; + EXPECT_FALSE(UpdateReturnCopy(&m, typename Map::value_type(1, 4), &p)); + EXPECT_EQ(10, p); + + EXPECT_TRUE(UpdateReturnCopy(&m, typename Map::value_type(1, 8), &p)); + EXPECT_EQ(4, p); +} + +TYPED_TEST_P(MapUtilIntIntTest, InsertOrReturnExistingTest) +{ + using Map = TypeParam; + Map m; + + EXPECT_EQ(nullptr, InsertOrReturnExisting(&m, 25, 42)); + EXPECT_EQ(42, m[25]); + + int *previous = InsertOrReturnExisting(&m, 25, 666); + EXPECT_EQ(42, *previous); + EXPECT_EQ(42, m[25]); +} + +TYPED_TEST_P(MapUtilIntIntPtrTest, FindPtrOrNullTest) +{ + // Check FindPtrOrNull + using PtrMap = TypeParam; + PtrMap ptr_map; + InsertOrUpdate(&ptr_map, 35, new int(35)); + int *p1 = FindPtrOrNull(ptr_map, 3); + EXPECT_TRUE(nullptr == p1); + const int *p2 = FindPtrOrNull(const_cast(ptr_map), 3); + EXPECT_TRUE(nullptr == p2); + EXPECT_EQ(35, *FindPtrOrNull(ptr_map, 35)); + + for (auto &kv : ptr_map) + delete kv.second; +} + +TYPED_TEST_P(MapUtilIntIntSharedPtrTest, FindPtrOrNullTest) +{ + using SharedPtrMap = TypeParam; + using SharedPtr = typename TypeParam::value_type::second_type; + SharedPtrMap shared_ptr_map; + InsertOrUpdate(&shared_ptr_map, 35, SharedPtr(new int(35))); + const SharedPtr p1 = FindPtrOrNull(shared_ptr_map, 3); + EXPECT_TRUE(nullptr == p1.get()); + const SharedPtr p2 = FindPtrOrNull(const_cast(shared_ptr_map), 3); + EXPECT_TRUE(nullptr == p2.get()); + const SharedPtr p3 = FindPtrOrNull(shared_ptr_map, 35); + const SharedPtr p4 = FindPtrOrNull(shared_ptr_map, 35); + EXPECT_EQ(35, *p3.get()); + EXPECT_EQ(35, *p4.get()); +} + +TYPED_TEST_P(MapUtilIntIntTest, FindOrDieTest) +{ + using Map = TypeParam; + Map m; + m[10] = 15; + EXPECT_EQ(15, FindOrDie(m, 10)); + ASSERT_DEATH(FindOrDie(m, 8), "Map key not found: 8"); + EXPECT_EQ(15, FindOrDieNoPrint(m, 10)); + ASSERT_DEATH(FindOrDieNoPrint(m, 8), "Map key not found"); + + // Make sure the non-const reference returning version works. + FindOrDie(m, 10) = 20; + EXPECT_EQ(20, FindOrDie(m, 10)); + + // Make sure we can lookup values in a const map. + const Map &const_m = m; + EXPECT_EQ(20, FindOrDie(const_m, 10)); +} + +TYPED_TEST_P(MapUtilIntIntTest, InsertOrDieTest) +{ + using Map = TypeParam; + Map m; + InsertOrDie(&m, 1, 2); + EXPECT_EQ(m[1], 2); + ASSERT_DEATH(InsertOrDie(&m, 1, 3), "duplicate"); +} + +TYPED_TEST_P(MapUtilIntIntTest, InsertKeyOrDieTest) +{ + using Map = TypeParam; + Map m; + int &v = InsertKeyOrDie(&m, 1); + EXPECT_EQ(m[1], 0); + v = 2; + EXPECT_EQ(m[1], 2); + ASSERT_DEATH(InsertKeyOrDie(&m, 1), "duplicate"); +} + +TYPED_TEST_P(MapUtilIntIntPtrTest, EraseKeyReturnValuePtrTest) +{ + using PtrMap = TypeParam; + PtrMap ptr_map; + int *v = new int(35); + InsertOrUpdate(&ptr_map, 35, v); + EXPECT_TRUE(EraseKeyReturnValuePtr(&ptr_map, 0) == nullptr); // Test no-op. + EXPECT_EQ(ptr_map.size(), 1); + int *retv = EraseKeyReturnValuePtr(&ptr_map, 35); // Successful operation + EXPECT_EQ(ptr_map.size(), 0); + EXPECT_EQ(v, retv); + delete v; + EXPECT_TRUE(EraseKeyReturnValuePtr(&ptr_map, 35) == nullptr); // Empty map. +} + +TYPED_TEST_P(MultiMapUtilIntIntTest, ContainsKeyValuePairTest) +{ + using Map = TypeParam; + + Map m; + + m.insert(std::make_pair(1, 10)); + m.insert(std::make_pair(1, 11)); + m.insert(std::make_pair(1, 12)); + + m.insert(std::make_pair(3, 13)); + + EXPECT_FALSE(ContainsKeyValuePair(m, 0, 0)); + EXPECT_FALSE(ContainsKeyValuePair(m, 1, 0)); + EXPECT_TRUE(ContainsKeyValuePair(m, 1, 10)); + EXPECT_TRUE(ContainsKeyValuePair(m, 1, 11)); + EXPECT_TRUE(ContainsKeyValuePair(m, 1, 12)); + EXPECT_FALSE(ContainsKeyValuePair(m, 1, 13)); +} + +} // namespace gutil diff --git a/src/gutil/test/map_util_unittest.cpp b/src/gutil/test/map_util_unittest.cpp new file mode 100644 index 0000000000..8f68bf50a2 --- /dev/null +++ b/src/gutil/test/map_util_unittest.cpp @@ -0,0 +1,523 @@ +// +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "gutil/map_util.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// All of the templates for the tests are defined here. +// This file is critical to understand what is tested. +#include "absl/container/btree_set.h" +#include "absl/container/flat_hash_map.h" +#include "absl/container/node_hash_map.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "map_util_test.h" + +namespace gutil { + +using ::testing::ElementsAre; +using ::testing::IsEmpty; +using ::testing::Pair; +using ::testing::Pointee; +using ::testing::UnorderedElementsAre; + +TEST(MapUtil, ImplicitTypeConversion) +{ + using Map = std::map; + Map m; + + // Check that I can use a type that's implicitly convertible to the + // key or value type, such as const char* -> string. + EXPECT_EQ("", FindWithDefault(m, "foo", "")); + EXPECT_EQ("", FindWithDefault(m, "foo")); + EXPECT_TRUE(InsertOrUpdate(&m, "foo", "bar")); + EXPECT_EQ("bar", FindWithDefault(m, "foo", "")); + EXPECT_EQ("bar", FindWithDefault(m, "foo")); + EXPECT_EQ("bar", *FindOrNull(m, "foo")); + std::string str; + EXPECT_TRUE(FindCopy(m, "foo", &str)); + EXPECT_EQ("bar", str); + EXPECT_TRUE(ContainsKey(m, "foo")); +} + +TEST(MapUtil, HeterogeneousLookup) +{ + absl::flat_hash_map m; + const auto &const_m = m; + + // Verify that I can use a key type that's appropriate for heterogeneous + // lookup, such as string_view -> string. + constexpr std::string_view kLookupKey = "foo"; + EXPECT_EQ(FindWithDefault(m, kLookupKey), ""); + EXPECT_EQ(FindWithDefault(const_m, kLookupKey), ""); + + m["foo"] = "bar"; + + EXPECT_EQ(FindOrDie(m, kLookupKey), "bar"); + EXPECT_EQ(FindOrDie(const_m, kLookupKey), "bar"); + + EXPECT_EQ(FindOrDieNoPrint(m, kLookupKey), "bar"); + EXPECT_EQ(FindOrDieNoPrint(const_m, kLookupKey), "bar"); + + EXPECT_EQ(FindWithDefault(m, kLookupKey), "bar"); + EXPECT_EQ(FindWithDefault(const_m, kLookupKey), "bar"); + + EXPECT_THAT(FindOrNull(m, kLookupKey), Pointee(testing::Eq("bar"))); + EXPECT_THAT(FindOrNull(const_m, kLookupKey), Pointee(testing::Eq("bar"))); + + std::string str; + EXPECT_TRUE(FindCopy(m, kLookupKey, &str)); + EXPECT_EQ(str, "bar"); + + std::string str_from_const; + EXPECT_TRUE(FindCopy(const_m, kLookupKey, &str_from_const)); + EXPECT_EQ(str_from_const, "bar"); + + absl::flat_hash_map ptr_m; + const auto &const_ptr_m = ptr_m; + + // Insert an arbitrary non-null pointer into the map. + ASSERT_TRUE(InsertOrUpdate(&ptr_m, "foo", &ptr_m)); + + EXPECT_EQ(FindPtrOrNull(ptr_m, kLookupKey), &ptr_m); + EXPECT_EQ(FindPtrOrNull(const_ptr_m, kLookupKey), &ptr_m); +} + +TEST(MapUtil, SetOperations) +{ + // Set operations + using Set = std::set; + Set s; + EXPECT_TRUE(InsertIfNotPresent(&s, 0)); + EXPECT_FALSE(InsertIfNotPresent(&s, 0)); + EXPECT_TRUE(ContainsKey(s, 0)); +} + +TEST(MapUtil, ReverseMapWithoutDups) +{ + std::map forward; + forward["1"] = 1; + forward["2"] = 2; + forward["3"] = 3; + forward["4"] = 4; + forward["5"] = 5; + std::map reverse; + EXPECT_TRUE(ReverseMap(forward, &reverse)); + EXPECT_THAT(reverse, + ElementsAre(Pair(1, "1"), Pair(2, "2"), Pair(3, "3"), Pair(4, "4"), Pair(5, "5"))); +} + +TEST(MapUtil, ReverseMapWithDups) +{ + std::map forward; + forward["1"] = 1; + forward["2"] = 2; + forward["3"] = 3; + forward["4"] = 4; + forward["5"] = 5; + forward["6"] = 1; + forward["7"] = 2; + std::map reverse; + EXPECT_FALSE(ReverseMap(forward, &reverse)); + // There are 5 distinct values in forward. + EXPECT_THAT(reverse, + ElementsAre(Pair(1, "6"), Pair(2, "7"), Pair(3, "3"), Pair(4, "4"), Pair(5, "5"))); +} + +TEST(MapUtil, SingleArgumentReverseMapWithoutDups) +{ + std::map forward; + forward["1"] = 1; + forward["2"] = 2; + forward["3"] = 3; + forward["4"] = 4; + forward["5"] = 5; + const std::map reverse = ReverseMap>(forward); + EXPECT_THAT(reverse, + ElementsAre(Pair(1, "1"), Pair(2, "2"), Pair(3, "3"), Pair(4, "4"), Pair(5, "5"))); +} + +TEST(MapUtil, SingleArgumentReverseMapWithDups) +{ + std::map forward; + forward["1"] = 1; + forward["2"] = 2; + forward["3"] = 3; + forward["4"] = 4; + forward["5"] = 5; + forward["6"] = 1; + forward["7"] = 2; + const std::map reverse = ReverseMap>(forward); + // There are 5 distinct values in forward. + EXPECT_THAT(reverse, + ElementsAre(Pair(1, "6"), Pair(2, "7"), Pair(3, "3"), Pair(4, "4"), Pair(5, "5"))); +} + +// Wrapper around an int that we can use to test a key without operator<<. +struct Unprintable +{ + int a; + explicit Unprintable(int a) : a(a) {} + bool operator<(const Unprintable &other) const { return a < other.a; } + bool operator==(const Unprintable &other) const { return a == other.a; } +}; + +TEST(MapUtilDeathTest, FindOrDieNoPrint) +{ + // Test FindOrDieNoPrint with a value with no operator<<. + std::map m; + m[Unprintable(1)] = 8; + EXPECT_EQ(8, FindOrDieNoPrint(m, Unprintable(1))); + ASSERT_DEATH(FindOrDieNoPrint(m, Unprintable(2)), "Map key not found"); + + // Make sure the non-const reference returning version works. + FindOrDieNoPrint(m, Unprintable(1)) = 20; + EXPECT_EQ(20, FindOrDieNoPrint(m, Unprintable(1))); + + // Make sure we can lookup values in a const std::map. + const std::map &const_m = m; + EXPECT_EQ(20, FindOrDieNoPrint(const_m, Unprintable(1))); +} + +TEST(MapUtilDeathTest, SetInsertOrDieTest) +{ + std::set s; + InsertOrDie(&s, 1); + EXPECT_TRUE(ContainsKey(s, 1)); + ASSERT_DEATH(InsertOrDie(&s, 1), "duplicate"); +} + +TEST(MapUtilDeathTest, InsertOrDieNoPrint) +{ + std::pair key = std::make_pair(1, 1); + + std::map, int> m; + InsertOrDieNoPrint(&m, key, 2); + EXPECT_EQ(m[key], 2); + ASSERT_DEATH(InsertOrDieNoPrint(&m, key, 3), "duplicate"); + + std::set> s; + InsertOrDieNoPrint(&s, key); + EXPECT_TRUE(ContainsKey(s, key)); + ASSERT_DEATH(InsertOrDieNoPrint(&s, key), "duplicate"); +} + +TEST(MapUtil, InsertKeysFromMap) +{ + const std::map empty_map; + std::set keys_as_ints; + InsertKeysFromMap(empty_map, &keys_as_ints); + EXPECT_TRUE(keys_as_ints.empty()); + + std::set keys_as_longs; // NOLINT + InsertKeysFromMap(empty_map, &keys_as_longs); + EXPECT_TRUE(keys_as_longs.empty()); + + const std::pair number_names_array[] = { + std::make_pair("one", 1), std::make_pair("two", 2), std::make_pair("three", 3)}; + std::map number_names_map( + number_names_array, + number_names_array + sizeof number_names_array / sizeof *number_names_array); + absl::btree_set names; + InsertKeysFromMap(number_names_map, &names); + // No two numbers have the same name, so the container sizes must match. + EXPECT_EQ(names.size(), number_names_map.size()); + EXPECT_EQ(names.count("one"), 1); + EXPECT_EQ(names.count("two"), 1); + EXPECT_EQ(names.count("three"), 1); +} + +TEST(MapUtil, AppendKeysFromMap) +{ + const std::map empty_map; + std::vector keys_as_ints; + AppendKeysFromMap(empty_map, &keys_as_ints); + EXPECT_TRUE(keys_as_ints.empty()); + + std::list keys_as_longs; // NOLINT + AppendKeysFromMap(empty_map, &keys_as_longs); + EXPECT_TRUE(keys_as_longs.empty()); + + const std::pair number_names_array[] = { + std::make_pair("one", 1), std::make_pair("two", 2), std::make_pair("three", 3)}; + std::map number_names_map( + number_names_array, + number_names_array + sizeof number_names_array / sizeof *number_names_array); + std::deque names; + AppendKeysFromMap(number_names_map, &names); + // No two numbers have the same name, so the container sizes must match. + EXPECT_EQ(names.size(), number_names_map.size()); + // The names are appended in the order in which they are found in the + // map, i.e., lexicographical order. + EXPECT_EQ(names[0], "one"); + EXPECT_EQ(names[1], "three"); + EXPECT_EQ(names[2], "two"); + + // Appending again should double the size of the std::deque + AppendKeysFromMap(number_names_map, &names); + EXPECT_EQ(names.size(), 2 * number_names_map.size()); +} + +// Vector is a special case. +TEST(MapUtil, AppendKeysFromMapIntoVector) +{ + const std::map empty_map; + std::vector keys_as_ints; + AppendKeysFromMap(empty_map, &keys_as_ints); + EXPECT_TRUE(keys_as_ints.empty()); + + std::vector keys_as_longs; // NOLINT + AppendKeysFromMap(empty_map, &keys_as_longs); + EXPECT_TRUE(keys_as_longs.empty()); + + const std::pair number_names_array[] = { + std::make_pair("one", 1), std::make_pair("two", 2), std::make_pair("three", 3)}; + std::map number_names_map( + number_names_array, + number_names_array + sizeof number_names_array / sizeof *number_names_array); + std::vector names; + AppendKeysFromMap(number_names_map, &names); + // No two numbers have the same name, so the container sizes must match. + EXPECT_EQ(names.size(), number_names_map.size()); + // The names are appended in the order in which they are found in the + // map, i.e., lexicographical order. + EXPECT_EQ(names[0], "one"); + EXPECT_EQ(names[1], "three"); + EXPECT_EQ(names[2], "two"); + + // Appending again should double the size of the std::deque + AppendKeysFromMap(number_names_map, &names); + EXPECT_EQ(names.size(), 2 * number_names_map.size()); +} + +TEST(MapUtil, AppendValuesFromMap) +{ + const std::map empty_map; + std::vector values_as_ints; + AppendValuesFromMap(empty_map, &values_as_ints); + EXPECT_TRUE(values_as_ints.empty()); + + std::list values_as_longs; // NOLINT + AppendValuesFromMap(empty_map, &values_as_longs); + EXPECT_TRUE(values_as_longs.empty()); + + const std::pair number_names_array[] = { + std::make_pair("one", 1), std::make_pair("two", 2), std::make_pair("three", 3)}; + std::map number_names_map( + number_names_array, + number_names_array + sizeof number_names_array / sizeof *number_names_array); + std::deque numbers; + AppendValuesFromMap(number_names_map, &numbers); + // No two numbers have the same name, so the container sizes must match. + EXPECT_EQ(numbers.size(), number_names_map.size()); + // The numbers are appended in the order in which they are found in the + // map, i.e., lexicographical order. + EXPECT_EQ(numbers[0], 1); + EXPECT_EQ(numbers[1], 3); + EXPECT_EQ(numbers[2], 2); + + // Appending again should double the size of the std::deque + AppendValuesFromMap(number_names_map, &numbers); + EXPECT_EQ(numbers.size(), 2 * number_names_map.size()); +} + +TEST(MapUtil, AppendValuesFromMapIntoVector) +{ + const std::map empty_map; + std::vector values_as_ints; + AppendValuesFromMap(empty_map, &values_as_ints); + EXPECT_TRUE(values_as_ints.empty()); + + std::list values_as_longs; // NOLINT + AppendValuesFromMap(empty_map, &values_as_longs); + EXPECT_TRUE(values_as_longs.empty()); + + const std::pair number_names_array[] = { + std::make_pair("one", 1), std::make_pair("two", 2), std::make_pair("three", 3)}; + std::map number_names_map( + number_names_array, + number_names_array + sizeof number_names_array / sizeof *number_names_array); + std::vector numbers; + AppendValuesFromMap(number_names_map, &numbers); + // No two numbers have the same name, so the container sizes must match. + EXPECT_EQ(numbers.size(), number_names_map.size()); + // The numbers are appended in the order in which they are found in the + // map, i.e., lexicographical order. + EXPECT_EQ(numbers[0], 1); + EXPECT_EQ(numbers[1], 3); + EXPECT_EQ(numbers[2], 2); + + // Appending again should double the size of the std::deque + AppendValuesFromMap(number_names_map, &numbers); + EXPECT_EQ(numbers.size(), 2 * number_names_map.size()); +} + +//////////////////////////////////////////////////////////////////////////////// +// Instantiate tests for std::map and std::unordered_map. +//////////////////////////////////////////////////////////////////////////////// + +// Finish setup for MapType +REGISTER_TYPED_TEST_SUITE_P(MapUtilIntIntTest, + ValueMapTests, + UpdateReturnCopyTest, + InsertOrReturnExistingTest, + FindOrDieTest, + InsertOrDieTest, + InsertKeyOrDieTest); +using MapIntIntTypes = ::testing:: + Types, absl::node_hash_map, absl::node_hash_map>; +INSTANTIATE_TYPED_TEST_SUITE_P(MapUtilTest, MapUtilIntIntTest, MapIntIntTypes); + +// Finish setup for MapType +REGISTER_TYPED_TEST_SUITE_P(MapUtilIntIntPtrTest, + LookupOrInsertNewTest, + InsertAndDeleteExistingTest, + FindPtrOrNullTest, + EraseKeyReturnValuePtrTest); +using MapIntIntPtrTypes = ::testing::Types, absl::node_hash_map>; +INSTANTIATE_TYPED_TEST_SUITE_P(MapUtilTest, MapUtilIntIntPtrTest, MapIntIntPtrTypes); + +// Finish setup for MapType > +REGISTER_TYPED_TEST_SUITE_P(MapUtilIntIntSharedPtrTest, + FindPtrOrNullTest, + LookupOrInsertNewSharedPtrTest); +using MapIntIntSharedPtrTypes = ::testing::Types>, + absl::node_hash_map>>; +INSTANTIATE_TYPED_TEST_SUITE_P(MapUtilTest, MapUtilIntIntSharedPtrTest, MapIntIntSharedPtrTypes); + +REGISTER_TYPED_TEST_SUITE_P(MapUtilIntIntSharedPtrOnlyTest, LookupOrInsertNewSharedPtrSwapTest); +typedef ::testing::Types>, + absl::node_hash_map>> + MapIntIntSharedPtrOnlyTypes; +INSTANTIATE_TYPED_TEST_SUITE_P(MapUtilTest, + MapUtilIntIntSharedPtrOnlyTest, + MapIntIntSharedPtrOnlyTypes); + +using AssociateEraseMapTypes = + ::testing::Types, absl::node_hash_map>; + +template +class AssociativeEraseIfTest : public ::testing::Test +{ +}; +TYPED_TEST_SUITE_P(AssociativeEraseIfTest); + +TYPED_TEST_P(AssociativeEraseIfTest, Basic) +{ + using ValueType = std::pair; + TypeParam m; + m["a"] = 1; + m["b"] = 2; + m["c"] = 3; + m["d"] = 4; + + // Test that none of the elements are removed when the predicate always + // returns false. + struct FalseFunc + { + bool operator()(const ValueType &unused) const { return 0; } + }; + AssociativeEraseIf(&m, FalseFunc()); + EXPECT_THAT(m, UnorderedElementsAre(Pair("a", 1), Pair("b", 2), Pair("c", 3), Pair("d", 4))); + + // Test removing a single element. + struct KeyEqualsA + { + bool operator()(const ValueType &pair) const { return pair.first == "a"; } + }; + AssociativeEraseIf(&m, KeyEqualsA()); + EXPECT_THAT(m, UnorderedElementsAre(Pair("b", 2), Pair("c", 3), Pair("d", 4))); + + // Put the element back and test removing a couple elements, + m["a"] = 1; + struct ValueGreaterThanTwo + { + bool operator()(const ValueType &pair) const { return pair.second > 2; } + }; + AssociativeEraseIf(&m, ValueGreaterThanTwo()); + EXPECT_THAT(m, UnorderedElementsAre(Pair("a", 1), Pair("b", 2))); + + // Put the elements back and test removing all of them. + m["c"] = 3; + m["d"] = 4; + struct TrueFunc + { + bool operator()(const ValueType &unused) const { return 1; } + }; + AssociativeEraseIf(&m, TrueFunc()); + EXPECT_THAT(m, IsEmpty()); +} +REGISTER_TYPED_TEST_SUITE_P(AssociativeEraseIfTest, Basic); + +INSTANTIATE_TYPED_TEST_SUITE_P(MapUtilTest, AssociativeEraseIfTest, AssociateEraseMapTypes); + +TEST(MapUtil, InsertKeyOrDie_SmartPtrTest) +{ + absl::node_hash_map> m; + m[1].reset(new int(10)); + m[2].reset(new int(20)); + + EXPECT_THAT( + m, UnorderedElementsAre(Pair(1, ::testing::Pointee(10)), Pair(2, ::testing::Pointee(20)))); + InsertKeyOrDie(&m, 3).reset(new int(30)); + EXPECT_THAT(m, + UnorderedElementsAre(Pair(1, ::testing::Pointee(10)), + Pair(2, ::testing::Pointee(20)), + Pair(3, ::testing::Pointee(30)))); +} + +TEST(MapUtil, EraseKeyReturnValuePtr_SmartPtrTest) +{ + std::map> m; + m[1] = std::unique_ptr(new int(10)); + m[2] = std::unique_ptr(new int(20)); + + std::unique_ptr val1 = EraseKeyReturnValuePtr(&m, 1); + EXPECT_EQ(10, *val1); + EXPECT_THAT(m, ElementsAre(Pair(2, ::testing::Pointee(20)))); + auto val2 = EraseKeyReturnValuePtr(&m, 2); + EXPECT_EQ(20, *val2); +} + +TEST(MapUtil, LookupOrInsertNewVariadicTest) +{ + struct TwoArg + { + TwoArg(int one_in, int two_in) : one(one_in), two(two_in) {} + int one; + int two; + }; + + std::map> m; + TwoArg *val = LookupOrInsertNew(&m, 1, 100, 200).get(); + EXPECT_EQ(100, val->one); + EXPECT_EQ(200, val->two); +} + +} // namespace gutil diff --git a/src/gutil/test/no_destructor_test.cpp b/src/gutil/test/no_destructor_test.cpp new file mode 100644 index 0000000000..a0f5885877 --- /dev/null +++ b/src/gutil/test/no_destructor_test.cpp @@ -0,0 +1,184 @@ +// +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "gutil/no_destructor.h" + +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "utils/fmt_logging.h" + +namespace gutil { +namespace { + +struct Blob +{ + Blob() : val(42) {} + Blob(int x, int y) : val(x + y) {} + Blob(std::initializer_list xs) + { + val = 0; + for (auto &x : xs) + val += x; + } + + Blob(const Blob & /*b*/) = delete; + Blob(Blob &&b) noexcept : val(b.val) { b.moved_out = true; } // moving is fine + + // no crash: NoDestructor indeed does not destruct (the moved-out Blob + // temporaries do get destroyed though) + ~Blob() { CHECK(moved_out, "~Blob"); } + + int val; + bool moved_out = false; +}; + +struct TypeWithDeletedDestructor +{ + ~TypeWithDeletedDestructor() = delete; +}; + +TEST(NoDestructorTest, DestructorNeverCalled) +{ + NoDestructor a; + (void)a; +} + +TEST(NoDestructorTest, Noncopyable) +{ + using T = NoDestructor; + + EXPECT_FALSE((std::is_constructible::value)); + EXPECT_FALSE((std::is_constructible::value)); + EXPECT_FALSE((std::is_constructible::value)); + EXPECT_FALSE((std::is_constructible::value)); + + EXPECT_FALSE((std::is_assignable::value)); + EXPECT_FALSE((std::is_assignable::value)); + EXPECT_FALSE((std::is_assignable::value)); + EXPECT_FALSE((std::is_assignable::value)); +} + +TEST(NoDestructorTest, Interface) +{ + EXPECT_TRUE(std::is_trivially_destructible>::value); + EXPECT_TRUE(std::is_trivially_destructible>::value); + { + NoDestructor b; // default c-tor + // access: *, ->, get() + EXPECT_EQ(42, (*b).val); + (*b).val = 55; + EXPECT_EQ(55, b->val); + b->val = 66; + EXPECT_EQ(66, b.get()->val); + b.get()->val = 42; + EXPECT_EQ(42, (*b).val); + } + { + NoDestructor b(70, 7); // regular c-tor, const + EXPECT_EQ(77, (*b).val); + EXPECT_EQ(77, b->val); + EXPECT_EQ(77, b.get()->val); + } + { + const NoDestructor b{{20, 28, 40}}; // init-list c-tor, deep const + // This only works in clang, not in gcc: + // const NoDestructor b({20, 28, 40}); + EXPECT_EQ(88, (*b).val); + EXPECT_EQ(88, b->val); + EXPECT_EQ(88, b.get()->val); + } +} + +// ========================================================================= // + +std::string *Str0() +{ + static NoDestructor x; + return x.get(); +} + +extern const std::string &Str2(); + +const char *Str1() +{ + static NoDestructor x(Str2() + "_Str1"); + return x->c_str(); +} + +const std::string &Str2() +{ + static NoDestructor x("Str2"); + return *x; +} + +const std::string &Str2Copy() +{ + static NoDestructor x(Str2()); // exercise copy construction + return *x; +} + +typedef std::array MyArray; +const MyArray &Array() +{ + static NoDestructor x{{{"foo", "bar", "baz"}}}; + // This only works in clang, not in gcc: + // static NoDestructor x({{"foo", "bar", "baz"}}); + return *x; +} + +typedef std::vector MyVector; +const MyVector &Vector() +{ + static NoDestructor x{{1, 2, 3}}; + return *x; +} + +const int &Int() +{ + static NoDestructor x; + return *x; +} + +TEST(NoDestructorTest, StaticPattern) +{ + EXPECT_TRUE(std::is_trivially_destructible>::value); + EXPECT_TRUE(std::is_trivially_destructible>::value); + EXPECT_TRUE(std::is_trivially_destructible>::value); + EXPECT_TRUE(std::is_trivially_destructible>::value); + + EXPECT_EQ(*Str0(), ""); + Str0()->append("foo"); + EXPECT_EQ(*Str0(), "foo"); + + EXPECT_EQ(std::string(Str1()), "Str2_Str1"); + + EXPECT_EQ(Str2(), "Str2"); + EXPECT_EQ(Str2Copy(), "Str2"); + + EXPECT_THAT(Array(), testing::ElementsAre("foo", "bar", "baz")); + + EXPECT_THAT(Vector(), testing::ElementsAre(1, 2, 3)); + + EXPECT_EQ(0, Int()); // should get zero-initialized +} + +} // namespace +} // namespace gutil diff --git a/src/gutil/test/run.sh b/src/gutil/test/run.sh new file mode 100755 index 0000000000..798d1ac202 --- /dev/null +++ b/src/gutil/test/run.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +./pgs_gutil_test diff --git a/src/http/http_call_registry.h b/src/http/http_call_registry.h index 69eb80fc1c..7decdc9ff3 100644 --- a/src/http/http_call_registry.h +++ b/src/http/http_call_registry.h @@ -20,6 +20,7 @@ #include "utils/fmt_logging.h" #include "http_server.h" #include "utils/errors.h" +#include "gutil/map_util.h" #include "utils/singleton.h" namespace dsn { @@ -35,11 +36,8 @@ class http_call_registry : public utils::singleton std::shared_ptr find(const std::string &path) const { std::lock_guard guard(_mu); - const auto &iter = _call_map.find(path); - if (iter == _call_map.end()) { - return nullptr; - } - return iter->second; + const auto *hc = gutil::FindOrNull(_call_map, path); + return hc == nullptr ? nullptr : *hc; } void remove(const std::string &path) diff --git a/src/http/http_client.cpp b/src/http/http_client.cpp index 24dd575f5c..a369fe3e97 100644 --- a/src/http/http_client.cpp +++ b/src/http/http_client.cpp @@ -483,15 +483,15 @@ void http_client::free_header_list() _header_list = nullptr; } -void http_client::set_header_field(absl::string_view key, absl::string_view val) +void http_client::set_header_field(std::string_view key, std::string_view val) { _header_fields[std::string(key)] = std::string(val); _header_changed = true; } -void http_client::set_accept(absl::string_view val) { set_header_field("Accept", val); } +void http_client::set_accept(std::string_view val) { set_header_field("Accept", val); } -void http_client::set_content_type(absl::string_view val) { set_header_field("Content-Type", val); } +void http_client::set_content_type(std::string_view val) { set_header_field("Content-Type", val); } dsn::error_s http_client::process_header() { diff --git a/src/http/http_client.h b/src/http/http_client.h index 0d8b35e13c..9624f1e467 100644 --- a/src/http/http_client.h +++ b/src/http/http_client.h @@ -27,7 +27,7 @@ #include #include -#include "absl/strings/string_view.h" +#include #include "http/http_method.h" #include "http/http_status_code.h" #include "utils/enum_helper.h" @@ -199,8 +199,8 @@ class http_client // Operations for the header fields. void clear_header_fields(); - void set_accept(absl::string_view val); - void set_content_type(absl::string_view val); + void set_accept(std::string_view val); + void set_content_type(std::string_view val); // Submit request to remote http service, with response processed by callback function. // @@ -235,7 +235,7 @@ class http_client dsn::error_s set_method(http_method method); void free_header_list(); - void set_header_field(absl::string_view key, absl::string_view val); + void set_header_field(std::string_view key, std::string_view val); dsn::error_s process_header(); // The size of a buffer that is used by libcurl to store human readable diff --git a/src/http/http_message_parser.cpp b/src/http/http_message_parser.cpp index df873eea2e..6a5fa83cf8 100644 --- a/src/http/http_message_parser.cpp +++ b/src/http/http_message_parser.cpp @@ -34,7 +34,7 @@ #include "http/http_method.h" #include "nodejs/http_parser.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/rpc_message.h" #include "utils/blob.h" #include "utils/crc.h" #include "utils/fmt_logging.h" diff --git a/src/http/http_message_parser.h b/src/http/http_message_parser.h index 5f19576798..e4aa5d9e72 100644 --- a/src/http/http_message_parser.h +++ b/src/http/http_message_parser.h @@ -32,9 +32,9 @@ #include #include -#include "runtime/rpc/message_parser.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task_spec.h" +#include "rpc/message_parser.h" +#include "rpc/rpc_message.h" +#include "task/task_spec.h" #include "utils/customizable_id.h" namespace dsn { diff --git a/src/http/http_server.cpp b/src/http/http_server.cpp index ce2178bfde..2a40da4ceb 100644 --- a/src/http/http_server.cpp +++ b/src/http/http_server.cpp @@ -23,19 +23,20 @@ #include #include -#include "http/builtin_http_calls.h" #include "fmt/core.h" -#include "http/http_method.h" +#include "gutil/map_util.h" +#include "http/builtin_http_calls.h" #include "http/http_call_registry.h" #include "http/http_message_parser.h" +#include "http/http_method.h" #include "http/http_server_impl.h" +#include "http/uri_decoder.h" #include "nodejs/http_parser.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/rpc_stream.h" +#include "rpc/rpc_message.h" +#include "rpc/rpc_stream.h" #include "runtime/serverlet.h" #include "runtime/tool_api.h" -#include "http/uri_decoder.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" #include "utils/output_utils.h" @@ -250,8 +251,7 @@ void http_server::serve(message_ex *msg) if (sep + 1 < arg_val.size()) { value = arg_val.substr(sep + 1, arg_val.size() - sep); } - auto iter = ret.query_args.find(name); - if (iter != ret.query_args.end()) { + if (gutil::ContainsKey(ret.query_args, name)) { return FMT_ERR(ERR_INVALID_PARAMETERS, "duplicate parameter: {}", name); } ret.query_args.emplace(std::move(name), std::move(value)); diff --git a/src/http/http_server.h b/src/http/http_server.h index f2c3930859..a0d07ced68 100644 --- a/src/http/http_server.h +++ b/src/http/http_server.h @@ -28,7 +28,7 @@ #include "fmt/core.h" #include "http/http_method.h" #include "http/http_status_code.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "utils/blob.h" #include "utils/errors.h" #include "utils/flags.h" diff --git a/src/http/http_server_impl.h b/src/http/http_server_impl.h index f78226a1cd..27166b941c 100644 --- a/src/http/http_server_impl.h +++ b/src/http/http_server_impl.h @@ -17,7 +17,7 @@ #pragma once -#include "runtime/rpc/rpc_message.h" +#include "rpc/rpc_message.h" #include "runtime/serverlet.h" #include "http_server.h" diff --git a/src/http/test/http_server_test.cpp b/src/http/test/http_server_test.cpp index dc7f0bce79..54efa14a26 100644 --- a/src/http/test/http_server_test.cpp +++ b/src/http/test/http_server_test.cpp @@ -30,8 +30,8 @@ #include "http/http_method.h" #include "http/http_server.h" #include "http/http_status_code.h" -#include "runtime/rpc/message_parser.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/message_parser.h" +#include "rpc/rpc_message.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/error_code.h" diff --git a/src/http/test/uri_decoder_test.cpp b/src/http/test/uri_decoder_test.cpp index b62e5b7bdf..43b964e601 100644 --- a/src/http/test/uri_decoder_test.cpp +++ b/src/http/test/uri_decoder_test.cpp @@ -90,5 +90,5 @@ TEST_F(uri_decoder_test, decode) } } -} // namespace dsn } // namespace uri +} // namespace dsn diff --git a/src/http/uri_decoder.cpp b/src/http/uri_decoder.cpp index d6a1a1de9f..63b784caec 100644 --- a/src/http/uri_decoder.cpp +++ b/src/http/uri_decoder.cpp @@ -40,7 +40,7 @@ error_with from_hex(const char c) } } -error_with decode_char(const absl::string_view &hex) +error_with decode_char(const std::string_view &hex) { CHECK_EQ(2, hex.size()); @@ -53,7 +53,7 @@ error_with decode_char(const absl::string_view &hex) return error_s::make(ERR_INVALID_PARAMETERS); } -error_with decode(const absl::string_view &encoded_uri) +error_with decode(const std::string_view &encoded_uri) { std::string decoded_uri; for (size_t i = 0; i < encoded_uri.size(); ++i) { @@ -64,7 +64,7 @@ error_with decode(const absl::string_view &encoded_uri) "Encountered partial escape sequence at end of string"); } - const absl::string_view encoded_char(encoded_uri.data() + i + 1, 2); + const std::string_view encoded_char(encoded_uri.data() + i + 1, 2); auto decoded_char = decode_char(encoded_char); if (!decoded_char.is_ok()) { return error_s::make( diff --git a/src/http/uri_decoder.h b/src/http/uri_decoder.h index b1d28a13ba..93266f4523 100644 --- a/src/http/uri_decoder.h +++ b/src/http/uri_decoder.h @@ -20,14 +20,14 @@ #include #include "utils/errors.h" -#include "absl/strings/string_view.h" +#include namespace dsn { namespace uri { /// \brief Decodes a sequence according to the percent decoding rules. /// \returns the decoded uri path -error_with decode(const absl::string_view &encoded_uri); +error_with decode(const std::string_view &encoded_uri); } // namespace uri } // namespace dsn diff --git a/src/include/pegasus/error.h b/src/include/pegasus/error.h index 1638e6abe0..73cabfcafd 100644 --- a/src/include/pegasus/error.h +++ b/src/include/pegasus/error.h @@ -25,4 +25,4 @@ namespace pegasus { #include #undef PEGASUS_ERR_CODE -} // namespace +} // namespace pegasus diff --git a/src/include/rrdb/rrdb.client.h b/src/include/rrdb/rrdb.client.h index 4645e781f4..899d9e697c 100644 --- a/src/include/rrdb/rrdb.client.h +++ b/src/include/rrdb/rrdb.client.h @@ -25,8 +25,11 @@ #include "duplication_internal_types.h" #include "rrdb.code.definition.h" #include "rrdb_types.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/task/task_tracker.h" +#include "rpc/rpc_holder.h" +#include "task/task_tracker.h" +#include "rrdb/rrdb_types.h" +#include "rpc/rpc_holder.h" +#include "task/task_tracker.h" #include "utils/optional.h" namespace dsn { diff --git a/src/include/rrdb/rrdb.code.definition.h b/src/include/rrdb/rrdb.code.definition.h index 9607883d1f..ac23da7d92 100644 --- a/src/include/rrdb/rrdb.code.definition.h +++ b/src/include/rrdb/rrdb.code.definition.h @@ -39,5 +39,5 @@ DEFINE_STORAGE_SCAN_RPC_CODE(RPC_RRDB_RRDB_SCAN) DEFINE_STORAGE_SCAN_RPC_CODE(RPC_RRDB_RRDB_CLEAR_SCANNER) DEFINE_STORAGE_SCAN_RPC_CODE(RPC_RRDB_RRDB_MULTI_GET) DEFINE_STORAGE_READ_RPC_CODE(RPC_RRDB_RRDB_BATCH_GET) -} -} +} // namespace apps +} // namespace dsn diff --git a/src/meta/CMakeLists.txt b/src/meta/CMakeLists.txt index 95ecb3b9a8..eb5ff8d3ca 100644 --- a/src/meta/CMakeLists.txt +++ b/src/meta/CMakeLists.txt @@ -39,6 +39,7 @@ set(MY_PROJ_LIBS dsn_http dsn_runtime dsn_aio + prometheus-cpp-core zookeeper hashtable hdfs diff --git a/src/meta/app_balance_policy.cpp b/src/meta/app_balance_policy.cpp index 3f7a0a985e..76ce2fba76 100644 --- a/src/meta/app_balance_policy.cpp +++ b/src/meta/app_balance_policy.cpp @@ -18,11 +18,11 @@ #include #include #include -#include #include #include "app_balance_policy.h" #include "common/gpid.h" +#include "gutil/map_util.h" #include "meta/load_balance_policy.h" #include "metadata_types.h" #include "utils/flags.h" @@ -162,7 +162,7 @@ bool copy_secondary_operation::can_select(gpid pid, migration_list *result) } // if the pid have been used - if (result->find(pid) != result->end()) { + if (gutil::ContainsKey(*result, pid)) { LOG_DEBUG("{}: skip gpid({}.{}) coz it is already copyed", _app->get_logname(), pid.get_app_id(), diff --git a/src/meta/app_env_validator.cpp b/src/meta/app_env_validator.cpp index 229389b3b2..06288cb611 100644 --- a/src/meta/app_env_validator.cpp +++ b/src/meta/app_env_validator.cpp @@ -29,6 +29,7 @@ #include "common/replica_envs.h" #include "http/http_status_code.h" #include "utils/fmt_logging.h" +#include "gutil/map_util.h" #include "utils/string_conv.h" #include "utils/strings.h" #include "utils/throttling_controller.h" @@ -51,11 +52,9 @@ app_env_validator::~app_env_validator() { deregister_handler("list"); } bool app_env_validator::validate_app_envs(const std::map &envs) { // only check rocksdb app envs currently - for (const auto & [ key, value ] : envs) { - if (replica_envs::ROCKSDB_STATIC_OPTIONS.find(key) == - replica_envs::ROCKSDB_STATIC_OPTIONS.end() && - replica_envs::ROCKSDB_DYNAMIC_OPTIONS.find(key) == - replica_envs::ROCKSDB_DYNAMIC_OPTIONS.end()) { + for (const auto &[key, value] : envs) { + if (!gutil::ContainsKey(replica_envs::ROCKSDB_STATIC_OPTIONS, key) && + !gutil::ContainsKey(replica_envs::ROCKSDB_DYNAMIC_OPTIONS, key)) { continue; } std::string hint_message; @@ -155,15 +154,15 @@ bool app_env_validator::validate_app_env(const std::string &env_name, std::string &hint_message) { // Check if the env is supported. - const auto func_iter = _validator_funcs.find(env_name); - if (func_iter == _validator_funcs.end()) { + const auto *func = gutil::FindOrNull(_validator_funcs, env_name); + if (func == nullptr) { hint_message = fmt::format("app_env '{}' is not supported", env_name); return false; } // 'int_result' will be used if the env variable is integer type. int64_t int_result = 0; - switch (func_iter->second.type) { + switch (func->type) { case ValueType::kBool: { // Check by the default boolean validator. bool result = false; @@ -197,8 +196,7 @@ bool app_env_validator::validate_app_env(const std::string &env_name, } case ValueType::kString: { // Check by the self defined validator. - if (nullptr != func_iter->second.string_validator && - !func_iter->second.string_validator(env_value, hint_message)) { + if (nullptr != func->string_validator && !func->string_validator(env_value, hint_message)) { return false; } break; @@ -208,13 +206,11 @@ bool app_env_validator::validate_app_env(const std::string &env_name, __builtin_unreachable(); } - if (func_iter->second.type == ValueType::kInt32 || - func_iter->second.type == ValueType::kInt64) { + if (func->type == ValueType::kInt32 || func->type == ValueType::kInt64) { // Check by the self defined validator. - if (nullptr != func_iter->second.int_validator && - !func_iter->second.int_validator(int_result)) { - hint_message = fmt::format( - "invalid value '{}', should be '{}'", env_value, func_iter->second.limit_desc); + if (nullptr != func->int_validator && !func->int_validator(int_result)) { + hint_message = + fmt::format("invalid value '{}', should be '{}'", env_value, func->limit_desc); return false; } } @@ -354,10 +350,10 @@ const std::unordered_map nlohmann::json app_env_validator::EnvInfo::to_json() const { - const auto &type_str = ValueType2String.find(type); - CHECK_TRUE(type_str != ValueType2String.end()); + const auto *type_str = gutil::FindOrNull(ValueType2String, type); + CHECK_NOTNULL(type_str, ""); nlohmann::json info; - info["type"] = type_str->second; + info["type"] = *type_str; info["limitation"] = limit_desc; info["sample"] = sample; return info; diff --git a/src/meta/backup_engine.cpp b/src/meta/backup_engine.cpp index 1707fe65e2..7197f4e48b 100644 --- a/src/meta/backup_engine.cpp +++ b/src/meta/backup_engine.cpp @@ -35,15 +35,15 @@ #include "meta/meta_backup_service.h" #include "meta/meta_data.h" #include "meta/meta_service.h" +#include "rpc/dns_resolver.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_host_port.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_tracker.h" #include "server_state.h" +#include "task/async_calls.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_tracker.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/chrono_literals.h" @@ -182,7 +182,7 @@ void backup_engine::backup_app_partition(const gpid &pid) _is_backup_failed = true; return; } - partition_primary = app->partitions[pid.get_partition_index()].hp_primary; + partition_primary = app->pcs[pid.get_partition_index()].hp_primary; } if (!partition_primary) { @@ -190,11 +190,12 @@ void backup_engine::backup_app_partition(const gpid &pid) "backup_id({}): partition {} doesn't have a primary now, retry to backup it later.", _cur_backup.backup_id, pid); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, pid]() { backup_app_partition(pid); }, - 0, - std::chrono::seconds(10)); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, pid]() { backup_app_partition(pid); }, + 0, + std::chrono::seconds(10)); return; } @@ -243,11 +244,12 @@ inline void backup_engine::handle_replica_backup_failed(const backup_response &r inline void backup_engine::retry_backup(const dsn::gpid pid) { - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, pid]() { backup_app_partition(pid); }, - 0, - std::chrono::seconds(1)); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, pid]() { backup_app_partition(pid); }, + 0, + std::chrono::seconds(1)); } void backup_engine::on_backup_reply(const error_code err, @@ -328,11 +330,12 @@ void backup_engine::write_backup_info() if (err != ERR_OK) { LOG_WARNING("backup_id({}): write backup info failed, retry it later.", _cur_backup.backup_id); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this]() { write_backup_info(); }, - 0, - std::chrono::seconds(1)); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this]() { write_backup_info(); }, + 0, + std::chrono::seconds(1)); return; } LOG_INFO("backup_id({}): successfully wrote backup info, backup for app {} completed.", diff --git a/src/meta/backup_engine.h b/src/meta/backup_engine.h index a762f3cc76..2f68b92b11 100644 --- a/src/meta/backup_engine.h +++ b/src/meta/backup_engine.h @@ -24,7 +24,7 @@ #include "backup_types.h" #include "common/json_helper.h" -#include "runtime/task/task_tracker.h" +#include "task/task_tracker.h" #include "utils/error_code.h" #include "utils/zlocks.h" diff --git a/src/meta/cluster_balance_policy.cpp b/src/meta/cluster_balance_policy.cpp index 516d782238..73cc448a6b 100644 --- a/src/meta/cluster_balance_policy.cpp +++ b/src/meta/cluster_balance_policy.cpp @@ -25,10 +25,11 @@ #include #include "dsn.layer2_types.h" +#include "gutil/map_util.h" #include "meta/load_balance_policy.h" -#include "runtime/rpc/dns_resolver.h" // IWYU pragma: keep -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/dns_resolver.h" // IWYU pragma: keep +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" #include "utils/flags.h" #include "utils/fmt_logging.h" #include "utils/utils.h" @@ -223,18 +224,18 @@ bool cluster_balance_policy::get_app_migration_info(std::shared_ptr a { info.app_id = app->app_id; info.app_name = app->app_name; - info.partitions.resize(app->partitions.size()); - for (auto i = 0; i < app->partitions.size(); ++i) { + info.partitions.reserve(app->pcs.size()); + for (const auto &pc : app->pcs) { std::map pstatus_map; - pstatus_map[app->partitions[i].hp_primary] = partition_status::PS_PRIMARY; - if (app->partitions[i].hp_secondaries.size() != app->partitions[i].max_replica_count - 1) { + pstatus_map[pc.hp_primary] = partition_status::PS_PRIMARY; + if (pc.hp_secondaries.size() != pc.max_replica_count - 1) { // partition is unhealthy return false; } - for (const auto &hp : app->partitions[i].hp_secondaries) { - pstatus_map[hp] = partition_status::PS_SECONDARY; + for (const auto &secondary : pc.hp_secondaries) { + pstatus_map[secondary] = partition_status::PS_SECONDARY; } - info.partitions[i] = pstatus_map; + info.partitions.push_back(std::move(pstatus_map)); } for (const auto &it : nodes) { @@ -258,14 +259,8 @@ void cluster_balance_policy::get_node_migration_info(const node_state &ns, if (!context.get_disk_tag(ns.host_port(), disk_tag)) { continue; } - auto pid = context.config_owner->pid; - if (info.partitions.find(disk_tag) != info.partitions.end()) { - info.partitions[disk_tag].insert(pid); - } else { - partition_set pset; - pset.insert(pid); - info.partitions.emplace(disk_tag, pset); - } + auto &partitions_of_disk = gutil::LookupOrInsert(&info.partitions, disk_tag, {}); + partitions_of_disk.insert(context.pc->pid); } } } diff --git a/src/meta/cluster_balance_policy.h b/src/meta/cluster_balance_policy.h index c7189fd359..94fb013ecd 100644 --- a/src/meta/cluster_balance_policy.h +++ b/src/meta/cluster_balance_policy.h @@ -32,7 +32,7 @@ #include "load_balance_policy.h" #include "meta/meta_data.h" #include "metadata_types.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" namespace dsn { namespace replication { @@ -54,6 +54,7 @@ class cluster_balance_policy : public load_balance_policy private: struct app_disk_info; + struct app_migration_info; struct cluster_migration_info; struct move_info; diff --git a/src/meta/distributed_lock_service_simple.cpp b/src/meta/distributed_lock_service_simple.cpp index cc95287850..f6134cd213 100644 --- a/src/meta/distributed_lock_service_simple.cpp +++ b/src/meta/distributed_lock_service_simple.cpp @@ -29,7 +29,7 @@ #include "common/replication.codes.h" #include "distributed_lock_service_simple.h" #include "runtime/api_layer1.h" -#include "runtime/task/async_calls.h" +#include "task/async_calls.h" namespace dsn { namespace dist { @@ -162,12 +162,13 @@ distributed_lock_service_simple::lock(const std::string &lock_id, } if (is_new) { - tasking::enqueue_timer(LPC_DIST_LOCK_SVC_RANDOM_EXPIRE, - &_tracker, - [=]() { random_lock_lease_expire(lock_id); }, - std::chrono::minutes(5), - 0, - std::chrono::seconds(1)); + tasking::enqueue_timer( + LPC_DIST_LOCK_SVC_RANDOM_EXPIRE, + &_tracker, + [=]() { random_lock_lease_expire(lock_id); }, + std::chrono::minutes(5), + 0, + std::chrono::seconds(1)); } if (err != ERR_IO_PENDING) { @@ -300,5 +301,5 @@ error_code distributed_lock_service_simple::query_cache(const std::string &lock_ } return err; } -} -} +} // namespace dist +} // namespace dsn diff --git a/src/meta/distributed_lock_service_simple.h b/src/meta/distributed_lock_service_simple.h index fe43a950d4..d52f3ab99b 100644 --- a/src/meta/distributed_lock_service_simple.h +++ b/src/meta/distributed_lock_service_simple.h @@ -33,10 +33,10 @@ #include #include -#include "runtime/task/future_types.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_tracker.h" +#include "task/future_types.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_tracker.h" #include "utils/autoref_ptr.h" #include "utils/distributed_lock_service.h" #include "utils/error_code.h" @@ -107,5 +107,5 @@ class distributed_lock_service_simple : public distributed_lock_service dsn::task_tracker _tracker; }; -} -} +} // namespace dist +} // namespace dsn diff --git a/src/meta/dump_file.h b/src/meta/dump_file.h index 062ffd1fcc..5fc9a0b9d1 100644 --- a/src/meta/dump_file.h +++ b/src/meta/dump_file.h @@ -37,14 +37,14 @@ #include "utils/api_utilities.h" #include "utils/error_code.h" #include "utils/threadpool_code.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "common/gpid.h" -#include "runtime/rpc/serialization.h" -#include "runtime/rpc/rpc_stream.h" +#include "rpc/serialization.h" +#include "rpc/rpc_stream.h" #include "runtime/serverlet.h" #include "runtime/service_app.h" #include "utils/fmt_logging.h" -#include "runtime/rpc/rpc_address.h" +#include "rpc/rpc_address.h" #include "utils/crc.h" #include #include diff --git a/src/meta/duplication/duplication_info.cpp b/src/meta/duplication/duplication_info.cpp index 4036cf2936..7f7bb62b69 100644 --- a/src/meta/duplication/duplication_info.cpp +++ b/src/meta/duplication/duplication_info.cpp @@ -173,6 +173,7 @@ void duplication_info::persist_status() _is_altering = false; _status = _next_status; + // Now we don't know what exactly is the next status, thus set DS_INIT temporarily. _next_status = duplication_status::DS_INIT; _fail_mode = _next_fail_mode; } diff --git a/src/meta/duplication/duplication_info.h b/src/meta/duplication/duplication_info.h index feb7503082..e1ddcacf38 100644 --- a/src/meta/duplication/duplication_info.h +++ b/src/meta/duplication/duplication_info.h @@ -31,7 +31,7 @@ #include "common/json_helper.h" #include "common/replication_other_types.h" #include "duplication_types.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "utils/blob.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" @@ -42,6 +42,7 @@ namespace dsn { namespace replication { class app_state; + class duplication_info; using duplication_info_s_ptr = std::shared_ptr; @@ -108,8 +109,9 @@ class duplication_info bool is_valid_alteration(duplication_status::type to_status) const { - return to_status == _status || (to_status == duplication_status::DS_PREPARE && - _status == duplication_status::DS_INIT) || + return to_status == _status || + (to_status == duplication_status::DS_PREPARE && + _status == duplication_status::DS_INIT) || (to_status == duplication_status::DS_APP && _status == duplication_status::DS_PREPARE) || (to_status == duplication_status::DS_LOG && @@ -173,13 +175,13 @@ class duplication_info bool all_checkpoint_has_prepared() { int prepared = 0; - bool completed = - std::all_of(_progress.begin(), - _progress.end(), - [&](std::pair item) -> bool { - prepared = item.second.checkpoint_prepared ? prepared + 1 : prepared; - return item.second.checkpoint_prepared; - }); + bool completed = std::all_of(_progress.begin(), + _progress.end(), + [&](std::pair item) -> bool { + prepared = item.second.checkpoint_prepared ? prepared + 1 + : prepared; + return item.second.checkpoint_prepared; + }); if (!completed) { LOG_WARNING("replica checkpoint still running: {}/{}", prepared, _progress.size()); } diff --git a/src/meta/duplication/meta_duplication_service.cpp b/src/meta/duplication/meta_duplication_service.cpp index c8a0d888ed..66cd2ed125 100644 --- a/src/meta/duplication/meta_duplication_service.cpp +++ b/src/meta/duplication/meta_duplication_service.cpp @@ -15,13 +15,16 @@ // specific language governing permissions and limitations // under the License. +// IWYU pragma: no_include #include +#include #include #include +#include #include +#include #include -#include "absl/strings/string_view.h" #include "common//duplication_common.h" #include "common/common.h" #include "common/gpid.h" @@ -30,19 +33,20 @@ #include "common/replication_other_types.h" #include "dsn.layer2_types.h" #include "duplication_types.h" +#include "gutil/map_util.h" #include "meta/meta_service.h" #include "meta/meta_state_service_utils.h" #include "meta_admin_types.h" #include "meta_duplication_service.h" #include "metadata_types.h" +#include "rpc/dns_resolver.h" +#include "rpc/group_host_port.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/group_host_port.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" +#include "task/async_calls.h" #include "utils/api_utilities.h" #include "utils/blob.h" #include "utils/chrono_literals.h" @@ -53,6 +57,7 @@ #include "utils/fmt_logging.h" #include "utils/ports.h" #include "utils/string_conv.h" +#include "utils/strings.h" #include "utils/zlocks.h" DSN_DECLARE_bool(dup_ignore_other_cluster_ids); @@ -78,7 +83,7 @@ void meta_duplication_service::query_duplication_info(const duplication_query_re } response.appid = app->app_id; - for (const auto & [ _, dup ] : app->duplications) { + for (const auto &[_, dup] : app->duplications) { dup->append_if_valid_for_query(*app, response.entry_list); } } @@ -104,13 +109,12 @@ void meta_duplication_service::modify_duplication(duplication_modify_rpc rpc) return; } - auto it = app->duplications.find(dupid); - if (it == app->duplications.end()) { + auto dup = gutil::FindPtrOrNull(app->duplications, dupid); + if (!dup) { response.err = ERR_OBJECT_NOT_FOUND; return; } - duplication_info_s_ptr dup = it->second; auto to_status = request.__isset.status ? request.status : dup->status(); auto to_fail_mode = request.__isset.fail_mode ? request.fail_mode : dup->fail_mode(); response.err = dup->alter_status(to_status, to_fail_mode); @@ -207,7 +211,7 @@ void meta_duplication_service::add_duplication(duplication_add_rpc rpc) remote_replica_count); LOG_WARNING_DUP_HINT_AND_RETURN_IF_NOT(request.remote_cluster_name != - get_current_cluster_name(), + get_current_dup_cluster_name(), response, ERR_INVALID_PARAMETERS, "illegal operation: adding duplication to itself"); @@ -241,6 +245,7 @@ void meta_duplication_service::add_duplication(duplication_add_rpc rpc) std::shared_ptr app; duplication_info_s_ptr dup; + error_code resp_err = ERR_OK; { zauto_read_lock l(app_lock()); @@ -258,7 +263,7 @@ void meta_duplication_service::add_duplication(duplication_add_rpc rpc) request.app_name, enum_to_string(app->status)); - for (const auto & [ _, dup_info ] : app->duplications) { + for (const auto &[_, dup_info] : app->duplications) { if (dup_info->remote_cluster_name == request.remote_cluster_name) { dup = dup_info; break; @@ -273,22 +278,22 @@ void meta_duplication_service::add_duplication(duplication_add_rpc rpc) if (dup) { // The duplication for the same app to the same remote cluster has existed. - remote_app_name = dup->remote_app_name; - remote_replica_count = dup->remote_replica_count; - LOG_INFO("no need to add duplication, since it has existed: app_name={}, " + resp_err = ERR_DUP_EXIST; + LOG_INFO("[{}] duplication has been existing: app_name={}, " "remote_cluster_name={}, remote_app_name={}", + dup->log_prefix(), request.app_name, request.remote_cluster_name, - remote_app_name); + dup->remote_app_name); } else { // Check if other apps of this cluster are duplicated to the same remote app. - for (const auto & [ app_name, cur_app_state ] : _state->_exist_apps) { + for (const auto &[app_name, cur_app_state] : _state->_exist_apps) { if (app_name == request.app_name) { // Skip this app since we want to check other apps. continue; } - for (const auto & [ _, dup_info ] : cur_app_state->duplications) { + for (const auto &[_, dup_info] : cur_app_state->duplications) { LOG_WARNING_DUP_HINT_AND_RETURN_IF_NOT( dup_info->remote_cluster_name != request.remote_cluster_name || dup_info->remote_app_name != remote_app_name, @@ -313,15 +318,14 @@ void meta_duplication_service::add_duplication(duplication_add_rpc rpc) app); } - do_add_duplication(app, dup, rpc, remote_app_name, remote_replica_count); + do_add_duplication(app, dup, rpc, resp_err); } // ThreadPool(WRITE): THREAD_POOL_META_STATE void meta_duplication_service::do_add_duplication(std::shared_ptr &app, duplication_info_s_ptr &dup, duplication_add_rpc &rpc, - const std::string &remote_app_name, - const int32_t remote_replica_count) + const error_code &resp_err) { const auto &ec = dup->start(rpc.request().is_duplicating_checkpoint); LOG_ERROR_DUP_HINT_AND_RETURN_IF_NOT(ec == ERR_OK, @@ -335,23 +339,23 @@ void meta_duplication_service::do_add_duplication(std::shared_ptr &ap auto value = dup->to_json_blob(); std::queue nodes({get_duplication_path(*app), std::to_string(dup->id)}); _meta_svc->get_meta_storage()->create_node_recursively( - std::move(nodes), - std::move(value), - [app, this, dup, rpc, remote_app_name, remote_replica_count]() mutable { - LOG_INFO("[{}] add duplication successfully [app_name: {}, follower: {}]", + std::move(nodes), std::move(value), [app, this, dup, rpc, resp_err]() mutable { + LOG_INFO("[{}] add duplication successfully [app_name: {}, remote_cluster_name: {}, " + "remote_app_name: {}]", dup->log_prefix(), app->app_name, - dup->remote_cluster_name); + dup->remote_cluster_name, + dup->remote_app_name); // The duplication starts only after it's been persisted. dup->persist_status(); auto &resp = rpc.response(); - resp.err = ERR_OK; + resp.err = resp_err; resp.appid = app->app_id; resp.dupid = dup->id; - resp.__set_remote_app_name(remote_app_name); - resp.__set_remote_replica_count(remote_replica_count); + resp.__set_remote_app_name(dup->remote_app_name); + resp.__set_remote_replica_count(dup->remote_replica_count); zauto_write_lock l(app_lock()); refresh_duplicating_no_lock(app); @@ -474,7 +478,7 @@ void meta_duplication_service::create_follower_app_for_duplication( // `kDuplicationEnvMasterClusterKey=>{master_cluster_name}` // `kDuplicationEnvMasterMetasKey=>{master_meta_list}` request.options.envs.emplace(duplication_constants::kDuplicationEnvMasterClusterKey, - get_current_cluster_name()); + get_current_dup_cluster_name()); request.options.envs.emplace(duplication_constants::kDuplicationEnvMasterMetasKey, _meta_svc->get_meta_list_string()); request.options.envs.emplace(duplication_constants::kDuplicationEnvMasterAppNameKey, @@ -490,21 +494,23 @@ void meta_duplication_service::create_follower_app_for_duplication( dsn::dns_resolver::instance().resolve_address(meta_servers), msg, _meta_svc->tracker(), - [=](error_code err, configuration_create_app_response &&resp) mutable { + [dup, this](error_code err, configuration_create_app_response &&resp) mutable { FAIL_POINT_INJECT_NOT_RETURN_F("update_app_request_ok", - [&](absl::string_view s) -> void { err = ERR_OK; }); - error_code create_err = err == ERR_OK ? resp.err : err; - error_code update_err = ERR_NO_NEED_OPERATE; + [&err](std::string_view) -> void { err = ERR_OK; }); + error_code create_err = err == ERR_OK ? resp.err : err; FAIL_POINT_INJECT_NOT_RETURN_F( "persist_dup_status_failed", - [&](absl::string_view s) -> void { create_err = ERR_OK; }); + [&create_err](std::string_view) -> void { create_err = ERR_OK; }); + + error_code update_err = ERR_NO_NEED_OPERATE; if (create_err == ERR_OK) { update_err = dup->alter_status(duplication_status::DS_APP); } FAIL_POINT_INJECT_F("persist_dup_status_failed", - [&](absl::string_view s) -> void { return; }); + [](std::string_view) -> void { return; }); + if (update_err == ERR_OK) { blob value = dup->to_json_blob(); // Note: this function is `async`, it may not be persisted completed @@ -512,12 +518,12 @@ void meta_duplication_service::create_follower_app_for_duplication( // `completed`, if `_is_altering`, dup->alter_status() will return `ERR_BUSY` _meta_svc->get_meta_storage()->set_data(std::string(dup->store_path), std::move(value), - [=]() { dup->persist_status(); }); + [dup]() { dup->persist_status(); }); } else { - LOG_ERROR("created follower app[{}.{}] to trigger duplicate checkpoint failed: " + LOG_ERROR("create follower app[{}.{}] to trigger duplicate checkpoint failed: " "duplication_status = {}, create_err = {}, update_err = {}", dup->remote_cluster_name, - dup->app_name, + dup->remote_app_name, duplication_status_to_string(dup->status()), create_err, update_err); @@ -525,6 +531,62 @@ void meta_duplication_service::create_follower_app_for_duplication( }); } +namespace { + +// The format of `replica_state_str` is ",,": +// +// : bool, true means if the address of primary replica is valid, +// otherwise false. +// : uint32_t, the number of secondaries whose address are valid. +// : uint32_t, the number of secondaries whose address are invalid. +void mock_create_app(std::string_view replica_state_str, + const std::shared_ptr &dup, + dsn::query_cfg_response &resp, + dsn::error_code &err) +{ + std::vector strs; + utils::split_args(replica_state_str.data(), strs, ','); + CHECK_EQ(strs.size(), 3); + + bool has_primary = 0; + CHECK_TRUE(buf2bool(strs[0], has_primary)); + + uint32_t valid_secondaries = 0; + CHECK_TRUE(buf2uint32(strs[1], valid_secondaries)); + + uint32_t invalid_secondaries = 0; + CHECK_TRUE(buf2uint32(strs[2], invalid_secondaries)); + + std::vector nodes; + if (has_primary) { + nodes.emplace_back("localhost", 34801); + } else { + nodes.emplace_back(); + } + for (uint32_t i = 0; i < valid_secondaries; ++i) { + nodes.emplace_back("localhost", static_cast(34802 + i)); + } + for (uint32_t i = 0; i < invalid_secondaries; ++i) { + nodes.emplace_back(); + } + + for (int32_t i = 0; i < dup->partition_count; ++i) { + partition_configuration pc; + pc.max_replica_count = dup->remote_replica_count; + + SET_IP_AND_HOST_PORT_BY_DNS(pc, primary, nodes[0]); + for (size_t j = 1; j < nodes.size(); ++j) { + ADD_IP_AND_HOST_PORT_BY_DNS(pc, secondaries, nodes[j]); + } + + resp.partitions.push_back(std::move(pc)); + } + + err = ERR_OK; +} + +} // anonymous namespace + void meta_duplication_service::check_follower_app_if_create_completed( const std::shared_ptr &dup) { @@ -533,83 +595,82 @@ void meta_duplication_service::check_follower_app_if_create_completed( meta_servers.group_host_port()->add_list(dup->remote_cluster_metas); query_cfg_request meta_config_request; - meta_config_request.app_name = dup->app_name; + meta_config_request.app_name = dup->remote_app_name; dsn::message_ex *msg = dsn::message_ex::create_request(RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX); dsn::marshall(msg, meta_config_request); - rpc::call(dsn::dns_resolver::instance().resolve_address(meta_servers), - msg, - _meta_svc->tracker(), - [=](error_code err, query_cfg_response &&resp) mutable { - FAIL_POINT_INJECT_NOT_RETURN_F("create_app_ok", [&](absl::string_view s) -> void { - err = ERR_OK; - int count = dup->partition_count; - while (count-- > 0) { - const host_port primary("localhost", 34801); - const host_port secondary1("localhost", 34802); - const host_port secondary2("localhost", 34803); - - partition_configuration p; - SET_IP_AND_HOST_PORT_BY_DNS(p, primary, primary); - SET_IPS_AND_HOST_PORTS_BY_DNS(p, secondaries, secondary1, secondary2); - resp.partitions.emplace_back(p); - } - }); - - // - ERR_INCONSISTENT_STATE: partition count of response isn't equal with local - // - ERR_INACTIVE_STATE: the follower table hasn't been healthy - error_code query_err = err == ERR_OK ? resp.err : err; - if (query_err == ERR_OK) { - if (resp.partitions.size() != dup->partition_count) { - query_err = ERR_INCONSISTENT_STATE; - } else { - for (const auto &partition : resp.partitions) { - if (!partition.hp_primary) { - query_err = ERR_INACTIVE_STATE; - break; - } - - if (partition.hp_secondaries.empty()) { - query_err = ERR_NOT_ENOUGH_MEMBER; - break; - } - - for (const auto &secondary : partition.hp_secondaries) { - if (!secondary) { - query_err = ERR_INACTIVE_STATE; - break; - } - } - } - } - } - - error_code update_err = ERR_NO_NEED_OPERATE; - if (query_err == ERR_OK) { - update_err = dup->alter_status(duplication_status::DS_LOG); - } - - FAIL_POINT_INJECT_F("persist_dup_status_failed", - [&](absl::string_view s) -> void { return; }); - if (update_err == ERR_OK) { - blob value = dup->to_json_blob(); - // Note: this function is `async`, it may not be persisted completed - // after executing, now using `_is_altering` to judge whether `updating` or - // `completed`, if `_is_altering`, dup->alter_status() will return `ERR_BUSY` - _meta_svc->get_meta_storage()->set_data(std::string(dup->store_path), - std::move(value), - [dup]() { dup->persist_status(); }); - } else { - LOG_ERROR( - "query follower app[{}.{}] replica configuration completed, result: " + rpc::call( + dsn::dns_resolver::instance().resolve_address(meta_servers), + msg, + _meta_svc->tracker(), + [dup, this](error_code err, query_cfg_response &&resp) mutable { + FAIL_POINT_INJECT_NOT_RETURN_F( + "create_app_ok", + std::bind( + mock_create_app, std::placeholders::_1, dup, std::ref(resp), std::ref(err))); + + // - ERR_INCONSISTENT_STATE: partition count of response isn't equal with local + // - ERR_INACTIVE_STATE: the follower table hasn't been healthy + error_code query_err = err == ERR_OK ? resp.err : err; + if (query_err == ERR_OK) { + if (resp.partitions.size() != dup->partition_count) { + query_err = ERR_INCONSISTENT_STATE; + } else { + for (const auto &pc : resp.partitions) { + if (!pc.hp_primary) { + // Fail once the primary replica is unavailable. + query_err = ERR_INACTIVE_STATE; + break; + } + + // Once replica count is more than 1, at least one secondary replica + // is required. + if (1 + pc.hp_secondaries.size() < pc.max_replica_count && + pc.hp_secondaries.empty()) { + query_err = ERR_NOT_ENOUGH_MEMBER; + break; + } + + for (const auto &secondary : pc.hp_secondaries) { + if (!secondary) { + // Fail once any secondary replica is unavailable. + query_err = ERR_INACTIVE_STATE; + break; + } + } + if (query_err != ERR_OK) { + break; + } + } + } + } + + error_code update_err = ERR_NO_NEED_OPERATE; + if (query_err == ERR_OK) { + update_err = dup->alter_status(duplication_status::DS_LOG); + } + + FAIL_POINT_INJECT_F("persist_dup_status_failed", + [](std::string_view) -> void { return; }); + + if (update_err == ERR_OK) { + blob value = dup->to_json_blob(); + // Note: this function is `async`, it may not be persisted completed + // after executing, now using `_is_altering` to judge whether `updating` or + // `completed`, if `_is_altering`, dup->alter_status() will return `ERR_BUSY` + _meta_svc->get_meta_storage()->set_data(std::string(dup->store_path), + std::move(value), + [dup]() { dup->persist_status(); }); + } else { + LOG_ERROR("query follower app[{}.{}] replica configuration completed, result: " "duplication_status = {}, query_err = {}, update_err = {}", dup->remote_cluster_name, - dup->app_name, + dup->remote_app_name, duplication_status_to_string(dup->status()), query_err, update_err); - } - }); + } + }); } void meta_duplication_service::do_update_partition_confirmed( @@ -770,7 +831,7 @@ void meta_duplication_service::do_restore_duplication(dupid_t dup_id, // restore duplication info from json _meta_svc->get_meta_storage()->get_data( std::string(store_path), - [ dup_id, this, app = std::move(app), store_path ](const blob &json) { + [dup_id, this, app = std::move(app), store_path](const blob &json) { zauto_write_lock l(app_lock()); auto dup = duplication_info::decode_from_blob(dup_id, diff --git a/src/meta/duplication/meta_duplication_service.h b/src/meta/duplication/meta_duplication_service.h index 2bcda880ef..3f06d63265 100644 --- a/src/meta/duplication/meta_duplication_service.h +++ b/src/meta/duplication/meta_duplication_service.h @@ -31,6 +31,7 @@ #include "utils/fmt_logging.h" namespace dsn { +class error_code; class host_port; class zrwlock_nr; @@ -81,8 +82,7 @@ class meta_duplication_service void do_add_duplication(std::shared_ptr &app, duplication_info_s_ptr &dup, duplication_add_rpc &rpc, - const std::string &remote_app_name, - const int32_t remote_replica_count); + const error_code &resp_err); void do_modify_duplication(std::shared_ptr &app, duplication_info_s_ptr &dup, diff --git a/src/meta/greedy_load_balancer.cpp b/src/meta/greedy_load_balancer.cpp index 81d7482bd9..0b3017eda3 100644 --- a/src/meta/greedy_load_balancer.cpp +++ b/src/meta/greedy_load_balancer.cpp @@ -46,7 +46,7 @@ #include "meta/table_metrics.h" #include "meta_admin_types.h" #include "meta_data.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "utils/command_manager.h" #include "utils/flags.h" #include "utils/fmt_logging.h" diff --git a/src/meta/load_balance_policy.cpp b/src/meta/load_balance_policy.cpp index 7b0bd19d84..f35fd5132d 100644 --- a/src/meta/load_balance_policy.cpp +++ b/src/meta/load_balance_policy.cpp @@ -29,12 +29,12 @@ #include #include -#include "absl/strings/string_view.h" +#include #include "dsn.layer2_types.h" #include "meta/meta_data.h" #include "meta_admin_types.h" -#include "runtime/rpc/dns_resolver.h" // IWYU pragma: keep -#include "runtime/rpc/rpc_address.h" +#include "rpc/dns_resolver.h" // IWYU pragma: keep +#include "rpc/rpc_address.h" #include "utils/command_manager.h" #include "utils/fail_point.h" #include "utils/flags.h" @@ -146,8 +146,7 @@ generate_balancer_request(const app_mapper &apps, const host_port &from, const host_port &to) { - FAIL_POINT_INJECT_F("generate_balancer_request", - [](absl::string_view name) { return nullptr; }); + FAIL_POINT_INJECT_F("generate_balancer_request", [](std::string_view name) { return nullptr; }); configuration_balancer_request result; result.gpid = pc.pid; @@ -320,7 +319,7 @@ void load_balance_policy::start_moving_primary(const std::shared_ptr while (plan_moving-- > 0) { dsn::gpid selected = select_moving(potential_moving, prev_load, current_load, from, to); - const partition_configuration &pc = app->partitions[selected.get_partition_index()]; + const auto &pc = app->pcs[selected.get_partition_index()]; auto balancer_result = _migration_result->emplace( selected, generate_balancer_request( @@ -338,7 +337,7 @@ std::list load_balance_policy::calc_potential_moving( std::list potential_moving; const node_state &ns = _global_view->nodes->find(from)->second; ns.for_each_primary(app->app_id, [&](const gpid &pid) { - const partition_configuration &pc = app->partitions[pid.get_partition_index()]; + const auto &pc = app->pcs[pid.get_partition_index()]; if (is_secondary(pc, to)) { potential_moving.push_back(pid); } @@ -566,10 +565,10 @@ void ford_fulkerson::add_edge(int node_id, const node_state &ns) void ford_fulkerson::update_decree(int node_id, const node_state &ns) { ns.for_each_primary(_app->app_id, [&, this](const gpid &pid) { - const partition_configuration &pc = _app->partitions[pid.get_partition_index()]; + const auto &pc = _app->pcs[pid.get_partition_index()]; for (const auto &secondary : pc.hp_secondaries) { auto i = _host_port_id.find(secondary); - CHECK(i != _host_port_id.end(), "invalid secondary address, address = {}", secondary); + CHECK(i != _host_port_id.end(), "invalid secondary: {}", secondary); _network[node_id][i->second]++; } return true; @@ -709,7 +708,7 @@ void copy_replica_operation::copy_once(gpid selected_pid, migration_list *result const auto &from = _host_port_vec[*_ordered_host_port_ids.rbegin()]; const auto &to = _host_port_vec[*_ordered_host_port_ids.begin()]; - auto pc = _app->partitions[selected_pid.get_partition_index()]; + auto pc = _app->pcs[selected_pid.get_partition_index()]; auto request = generate_balancer_request(_apps, pc, get_balance_type(), from, to); result->emplace(selected_pid, request); } diff --git a/src/meta/load_balance_policy.h b/src/meta/load_balance_policy.h index 486b0d2e69..30c0f41cdb 100644 --- a/src/meta/load_balance_policy.h +++ b/src/meta/load_balance_policy.h @@ -34,12 +34,13 @@ #include "common/replication_other_types.h" #include "meta_admin_types.h" #include "meta_data.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "utils/enum_helper.h" #include "utils/zlocks.h" namespace dsn { class command_deregister; + class partition_configuration; namespace replication { diff --git a/src/meta/meta_backup_service.cpp b/src/meta/meta_backup_service.cpp index 5f8d04b551..5dd49a6c0b 100644 --- a/src/meta/meta_backup_service.cpp +++ b/src/meta/meta_backup_service.cpp @@ -15,12 +15,14 @@ // specific language governing permissions and limitations // under the License. -#include #include #include #include +#include +#include #include #include +#include #include #include @@ -36,15 +38,15 @@ #include "meta/meta_state_service.h" #include "meta_backup_service.h" #include "meta_service.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" #include "security/access_controller.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task_code.h" #include "server_state.h" +#include "task/async_calls.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/chrono_literals.h" @@ -182,14 +184,15 @@ void policy_context::start_backup_app_meta_unlocked(int32_t app_id) LOG_ERROR("{}: create file {} failed, restart this backup later", _backup_sig, create_file_req.file_name); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, app_id]() { - zauto_lock l(_lock); - start_backup_app_meta_unlocked(app_id); - }, - 0, - _backup_service->backup_option().block_retry_delay_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, app_id]() { + zauto_lock l(_lock); + start_backup_app_meta_unlocked(app_id); + }, + 0, + _backup_service->backup_option().block_retry_delay_ms); return; } CHECK_NOTNULL(remote_file, @@ -221,14 +224,15 @@ void policy_context::start_backup_app_meta_unlocked(int32_t app_id) LOG_WARNING("write {} failed, reason({}), try it later", remote_file->file_name(), resp.err); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, app_id]() { - zauto_lock l(_lock); - start_backup_app_meta_unlocked(app_id); - }, - 0, - _backup_service->backup_option().block_retry_delay_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, app_id]() { + zauto_lock l(_lock); + start_backup_app_meta_unlocked(app_id); + }, + 0, + _backup_service->backup_option().block_retry_delay_ms); } }, &_tracker); @@ -288,14 +292,15 @@ void policy_context::write_backup_app_finish_flag_unlocked(int32_t app_id, LOG_ERROR("{}: create file {} failed, restart this backup later", _backup_sig, create_file_req.file_name); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, app_id, write_callback]() { - zauto_lock l(_lock); - write_backup_app_finish_flag_unlocked(app_id, write_callback); - }, - 0, - _backup_service->backup_option().block_retry_delay_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, app_id, write_callback]() { + zauto_lock l(_lock); + write_backup_app_finish_flag_unlocked(app_id, write_callback); + }, + 0, + _backup_service->backup_option().block_retry_delay_ms); return; } @@ -337,14 +342,15 @@ void policy_context::write_backup_app_finish_flag_unlocked(int32_t app_id, LOG_WARNING("write {} failed, reason({}), try it later", remote_file->file_name(), resp.err); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, app_id, write_callback]() { - zauto_lock l(_lock); - write_backup_app_finish_flag_unlocked(app_id, write_callback); - }, - 0, - _backup_service->backup_option().block_retry_delay_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, app_id, write_callback]() { + zauto_lock l(_lock); + write_backup_app_finish_flag_unlocked(app_id, write_callback); + }, + 0, + _backup_service->backup_option().block_retry_delay_ms); } }); } @@ -404,14 +410,15 @@ void policy_context::write_backup_info_unlocked(const backup_info &b_info, LOG_ERROR("{}: create file {} failed, restart this backup later", _backup_sig, create_file_req.file_name); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, b_info, write_callback]() { - zauto_lock l(_lock); - write_backup_info_unlocked(b_info, write_callback); - }, - 0, - _backup_service->backup_option().block_retry_delay_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, b_info, write_callback]() { + zauto_lock l(_lock); + write_backup_info_unlocked(b_info, write_callback); + }, + 0, + _backup_service->backup_option().block_retry_delay_ms); return; } @@ -444,14 +451,15 @@ void policy_context::write_backup_info_unlocked(const backup_info &b_info, LOG_WARNING("write {} failed, reason({}), try it later", remote_file->file_name(), resp.err); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, b_info, write_callback]() { - zauto_lock l(_lock); - write_backup_info_unlocked(b_info, write_callback); - }, - 0, - _backup_service->backup_option().block_retry_delay_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, b_info, write_callback]() { + zauto_lock l(_lock); + write_backup_info_unlocked(b_info, write_callback); + }, + 0, + _backup_service->backup_option().block_retry_delay_ms); } }); } @@ -524,20 +532,21 @@ void policy_context::start_backup_partition_unlocked(gpid pid) pid, cold_backup_constant::PROGRESS_FINISHED, dsn::host_port()); return; } - partition_primary = app->partitions[pid.get_partition_index()].hp_primary; + partition_primary = app->pcs[pid.get_partition_index()].hp_primary; } if (!partition_primary) { LOG_WARNING("{}: partition {} doesn't have a primary now, retry to backup it later", _backup_sig, pid); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, pid]() { - zauto_lock l(_lock); - start_backup_partition_unlocked(pid); - }, - 0, - _backup_service->backup_option().reconfiguration_retry_delay_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, pid]() { + zauto_lock l(_lock); + start_backup_partition_unlocked(pid); + }, + 0, + _backup_service->backup_option().reconfiguration_retry_delay_ms); return; } @@ -625,14 +634,15 @@ void policy_context::on_backup_reply(error_code err, } // retry to backup the partition. - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, pid]() { - zauto_lock l(_lock); - start_backup_partition_unlocked(pid); - }, - 0, - _backup_service->backup_option().request_backup_period_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, pid]() { + zauto_lock l(_lock); + start_backup_partition_unlocked(pid); + }, + 0, + _backup_service->backup_option().request_backup_period_ms); } void policy_context::initialize_backup_progress_unlocked() @@ -660,7 +670,7 @@ void policy_context::initialize_backup_progress_unlocked() // unfinished_partitions_per_app & partition_progress & app_chkpt_size _progress.unfinished_partitions_per_app[app_id] = app->partition_count; std::map partition_chkpt_size; - for (const partition_configuration &pc : app->partitions) { + for (const auto &pc : app->pcs) { _progress.partition_progress[pc.pid] = 0; partition_chkpt_size[pc.pid.get_app_id()] = 0; } @@ -707,15 +717,16 @@ void policy_context::sync_backup_to_remote_storage_unlocked(const backup_info &b LOG_ERROR("{}: sync backup info({}) to remote storage got timeout, retry it later", _policy.policy_name, b_info.backup_id); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this, b_info, sync_callback, create_new_node]() { - zauto_lock l(_lock); - sync_backup_to_remote_storage_unlocked( - std::move(b_info), std::move(sync_callback), create_new_node); - }, - 0, - _backup_service->backup_option().meta_retry_delay_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this, b_info, sync_callback, create_new_node]() { + zauto_lock l(_lock); + sync_backup_to_remote_storage_unlocked( + std::move(b_info), std::move(sync_callback), create_new_node); + }, + 0, + _backup_service->backup_option().meta_retry_delay_ms); } else { CHECK(false, "{}: we can't handle this right now, error({})", _backup_sig, err); } @@ -735,14 +746,15 @@ void policy_context::continue_current_backup_unlocked() if (_policy.is_disable) { LOG_INFO("{}: policy is disabled, ignore this backup and try it later", _policy.policy_name); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this]() { - zauto_lock l(_lock); - issue_new_backup_unlocked(); - }, - 0, - _backup_service->backup_option().issue_backup_interval_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this]() { + zauto_lock l(_lock); + issue_new_backup_unlocked(); + }, + 0, + _backup_service->backup_option().issue_backup_interval_ms); return; } @@ -820,26 +832,28 @@ void policy_context::issue_new_backup_unlocked() // before issue new backup, we check whether the policy is dropped if (_policy.is_disable) { LOG_INFO("{}: policy is disabled, just ignore backup, try it later", _policy.policy_name); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this]() { - zauto_lock l(_lock); - issue_new_backup_unlocked(); - }, - 0, - _backup_service->backup_option().issue_backup_interval_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this]() { + zauto_lock l(_lock); + issue_new_backup_unlocked(); + }, + 0, + _backup_service->backup_option().issue_backup_interval_ms); return; } if (!should_start_backup_unlocked()) { - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this]() { - zauto_lock l(_lock); - issue_new_backup_unlocked(); - }, - 0, - _backup_service->backup_option().issue_backup_interval_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this]() { + zauto_lock l(_lock); + issue_new_backup_unlocked(); + }, + 0, + _backup_service->backup_option().issue_backup_interval_ms); LOG_INFO("{}: start issue new backup {}ms later", _policy.policy_name, _backup_service->backup_option().issue_backup_interval_ms.count()); @@ -852,14 +866,15 @@ void policy_context::issue_new_backup_unlocked() // TODO: just ignore this backup and wait next backup LOG_WARNING("{}: all apps have been dropped, ignore this backup and retry it later", _backup_sig); - tasking::enqueue(LPC_DEFAULT_CALLBACK, - &_tracker, - [this]() { - zauto_lock l(_lock); - issue_new_backup_unlocked(); - }, - 0, - _backup_service->backup_option().issue_backup_interval_ms); + tasking::enqueue( + LPC_DEFAULT_CALLBACK, + &_tracker, + [this]() { + zauto_lock l(_lock); + issue_new_backup_unlocked(); + }, + 0, + _backup_service->backup_option().issue_backup_interval_ms); } else { task_ptr continue_to_backup = tasking::create_task(LPC_DEFAULT_CALLBACK, &_tracker, [this]() { @@ -1349,9 +1364,10 @@ void backup_service::add_backup_policy(dsn::message_ex *msg) { // check policy name zauto_lock l(_lock); - if (!is_valid_policy_name_unlocked(request.policy_name)) { + if (!is_valid_policy_name_unlocked(request.policy_name, hint_message)) { response.err = ERR_INVALID_PARAMETERS; - response.hint_message = "invalid policy_name: " + request.policy_name; + response.hint_message = + fmt::format("invalid policy name: '{}', {}", request.policy_name, hint_message); _meta_svc->reply_data(msg, response); msg->release_ref(); return; @@ -1394,7 +1410,7 @@ void backup_service::do_add_policy(dsn::message_ex *req, _meta_svc->get_remote_storage()->create_node( policy_path, LPC_DEFAULT_CALLBACK, // TASK_CODE_EXEC_INLINED, - [ this, req, p, hint_msg, policy_name = cur_policy.policy_name ](error_code err) { + [this, req, p, hint_msg, policy_name = cur_policy.policy_name](error_code err) { if (err == ERR_OK || err == ERR_NODE_ALREADY_EXIST) { configuration_add_backup_policy_response resp; resp.hint_message = hint_msg; @@ -1459,10 +1475,39 @@ void backup_service::do_update_policy_to_remote_storage( }); } -bool backup_service::is_valid_policy_name_unlocked(const std::string &policy_name) +bool backup_service::is_valid_policy_name_unlocked(const std::string &policy_name, + std::string &hint_message) { - auto iter = _policy_states.find(policy_name); - return (iter == _policy_states.end()); + // BACKUP_INFO and policy_name should not be the same, because they are in the same level in the + // output when query the policy details, use different names to distinguish the respective + // contents. + if (policy_name.find(cold_backup_constant::BACKUP_INFO) != std::string::npos) { + hint_message = "policy name is reserved"; + return false; + } + + // Validate the policy name as a metric name in prometheus. + if (!prometheus::CheckMetricName(policy_name)) { + hint_message = "policy name should match regex '[a-zA-Z_:][a-zA-Z0-9_:]*' when act as a " + "metric name in prometheus"; + return false; + } + + // Validate the policy name as a metric label in prometheus. + if (!prometheus::CheckLabelName(policy_name, prometheus::MetricType::Gauge)) { + hint_message = "policy name should match regex '[a-zA-Z_][a-zA-Z0-9_]*' when act as a " + "metric label in prometheus"; + return false; + } + + const auto iter = _policy_states.find(policy_name); + if (iter != _policy_states.end()) { + hint_message = "policy name is already exist"; + return false; + } + + hint_message.clear(); + return true; } void backup_service::query_backup_policy(query_backup_policy_rpc rpc) @@ -1602,9 +1647,16 @@ void backup_service::modify_backup_policy(configuration_modify_backup_policy_rpc if (request.__isset.is_disable) { if (request.is_disable) { if (is_under_backup) { - LOG_INFO("{}: policy is under backuping, not allow to disable", - cur_policy.policy_name); - response.err = ERR_BUSY; + if (request.__isset.force_disable && request.force_disable) { + LOG_INFO("{}: policy is under backuping, force to disable", + cur_policy.policy_name); + cur_policy.is_disable = true; + have_modify_policy = true; + } else { + LOG_INFO("{}: policy is under backuping, not allow to disable", + cur_policy.policy_name); + response.err = ERR_BUSY; + } } else if (!cur_policy.is_disable) { LOG_INFO("{}: policy is marked to disable", cur_policy.policy_name); cur_policy.is_disable = true; diff --git a/src/meta/meta_backup_service.h b/src/meta/meta_backup_service.h index f767ad2be3..28bbca4f50 100644 --- a/src/meta/meta_backup_service.h +++ b/src/meta/meta_backup_service.h @@ -37,8 +37,8 @@ #include "common/json_helper.h" #include "common/replication_other_types.h" #include "meta_rpc_types.h" -#include "runtime/task/task.h" -#include "runtime/task/task_tracker.h" +#include "task/task.h" +#include "task/task_tracker.h" #include "utils/api_utilities.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" @@ -407,6 +407,7 @@ class backup_service FRIEND_TEST(backup_service_test, test_init_backup); FRIEND_TEST(backup_service_test, test_query_backup_status); + FRIEND_TEST(backup_service_test, test_valid_policy_name); FRIEND_TEST(meta_backup_service_test, test_add_backup_policy); void start_create_policy_meta_root(dsn::task_ptr callback); @@ -420,7 +421,7 @@ class backup_service const policy &p, std::shared_ptr &p_context_ptr); - bool is_valid_policy_name_unlocked(const std::string &policy_name); + bool is_valid_policy_name_unlocked(const std::string &policy_name, std::string &hint_message); policy_factory _factory; meta_service *_meta_svc; diff --git a/src/meta/meta_bulk_load_ingestion_context.cpp b/src/meta/meta_bulk_load_ingestion_context.cpp index 30989c79f8..79d01a4f1e 100644 --- a/src/meta/meta_bulk_load_ingestion_context.cpp +++ b/src/meta/meta_bulk_load_ingestion_context.cpp @@ -26,7 +26,7 @@ #include "utils/fail_point.h" #include "utils/fmt_logging.h" #include "utils/string_conv.h" -#include "absl/strings/string_view.h" +#include DSN_DEFINE_uint32(meta_server, bulk_load_node_max_ingesting_count, @@ -44,13 +44,13 @@ ingestion_context::ingestion_context() { reset_all(); } ingestion_context::~ingestion_context() { reset_all(); } -void ingestion_context::partition_node_info::create(const partition_configuration &config, +void ingestion_context::partition_node_info::create(const partition_configuration &pc, const config_context &cc) { - pid = config.pid; + pid = pc.pid; std::unordered_set current_nodes; - current_nodes.insert(config.hp_primary); - for (const auto &secondary : config.hp_secondaries) { + current_nodes.insert(pc.hp_primary); + for (const auto &secondary : pc.hp_secondaries) { current_nodes.insert(secondary); } for (const auto &node : current_nodes) { @@ -73,7 +73,7 @@ uint32_t ingestion_context::node_context::get_max_disk_ingestion_count( const uint32_t max_node_ingestion_count) const { FAIL_POINT_INJECT_F("ingestion_node_context_disk_count", - [](absl::string_view count_str) -> uint32_t { + [](std::string_view count_str) -> uint32_t { uint32_t count = 0; buf2uint32(count_str, count); return count; @@ -120,16 +120,16 @@ void ingestion_context::node_context::decrease(const std::string &disk_tag) disk_ingesting_counts[disk_tag]--; } -bool ingestion_context::try_partition_ingestion(const partition_configuration &config, +bool ingestion_context::try_partition_ingestion(const partition_configuration &pc, const config_context &cc) { - FAIL_POINT_INJECT_F("ingestion_try_partition_ingestion", [=](absl::string_view) -> bool { + FAIL_POINT_INJECT_F("ingestion_try_partition_ingestion", [=](std::string_view) -> bool { auto info = partition_node_info(); - info.pid = config.pid; - _running_partitions[config.pid] = info; + info.pid = pc.pid; + _running_partitions[pc.pid] = info; return true; }); - partition_node_info info(config, cc); + partition_node_info info(pc, cc); for (const auto &kv : info.node_disk) { if (!check_node_ingestion(kv.first, kv.second)) { return false; @@ -158,7 +158,7 @@ void ingestion_context::add_partition(const partition_node_info &info) void ingestion_context::remove_partition(const gpid &pid) { FAIL_POINT_INJECT_F("ingestion_context_remove_partition", - [=](absl::string_view) { _running_partitions.erase(pid); }); + [=](std::string_view) { _running_partitions.erase(pid); }); if (_running_partitions.find(pid) == _running_partitions.end()) { return; diff --git a/src/meta/meta_bulk_load_ingestion_context.h b/src/meta/meta_bulk_load_ingestion_context.h index 1675726d66..0e02d33317 100644 --- a/src/meta/meta_bulk_load_ingestion_context.h +++ b/src/meta/meta_bulk_load_ingestion_context.h @@ -22,7 +22,7 @@ #include #include "common/gpid.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "utils/flags.h" DSN_DECLARE_uint32(bulk_load_node_max_ingesting_count); @@ -49,11 +49,11 @@ class ingestion_context std::unordered_map node_disk; partition_node_info() {} - partition_node_info(const partition_configuration &config, const config_context &cc) + partition_node_info(const partition_configuration &pc, const config_context &cc) { - create(config, cc); + create(pc, cc); } - void create(const partition_configuration &config, const config_context &cc); + void create(const partition_configuration &pc, const config_context &cc); }; struct node_context @@ -77,7 +77,7 @@ class ingestion_context void decrease(const std::string &disk_tag); }; - bool try_partition_ingestion(const partition_configuration &config, const config_context &cc); + bool try_partition_ingestion(const partition_configuration &pc, const config_context &cc); bool check_node_ingestion(const host_port &node, const std::string &disk_tag); void add_partition(const partition_node_info &info); void remove_partition(const gpid &pid); diff --git a/src/meta/meta_bulk_load_service.cpp b/src/meta/meta_bulk_load_service.cpp index 9dd97141e3..7ba4854035 100644 --- a/src/meta/meta_bulk_load_service.cpp +++ b/src/meta/meta_bulk_load_service.cpp @@ -24,9 +24,9 @@ #include #include #include +#include #include -#include "absl/strings/string_view.h" #include "block_service/block_service.h" #include "block_service/block_service_manager.h" #include "common/replica_envs.h" @@ -39,14 +39,14 @@ #include "meta/meta_state_service.h" #include "meta/server_state.h" #include "meta_admin_types.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "rpc/dns_resolver.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" +#include "task/async_calls.h" +#include "task/task.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/chrono_literals.h" @@ -101,7 +101,7 @@ void bulk_load_service::initialize_bulk_load_service() void bulk_load_service::on_start_bulk_load(start_bulk_load_rpc rpc) { FAIL_POINT_INJECT_F("meta_on_start_bulk_load", - [=](absl::string_view) { rpc.response().err = ERR_OK; }); + [=](std::string_view) { rpc.response().err = ERR_OK; }); const auto &request = rpc.request(); auto &response = rpc.response(); @@ -155,10 +155,11 @@ void bulk_load_service::on_start_bulk_load(start_bulk_load_rpc rpc) // avoid possible load balancing _meta_svc->set_function_level(meta_function_level::fl_steady); - tasking::enqueue(LPC_META_STATE_NORMAL, - _meta_svc->tracker(), - [this, rpc, app]() { do_start_app_bulk_load(std::move(app), std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + _meta_svc->tracker(), + [this, rpc, app]() { do_start_app_bulk_load(std::move(app), std::move(rpc)); }, + server_state::sStateHash); } // ThreadPool: THREAD_POOL_META_SERVER @@ -170,7 +171,7 @@ bulk_load_service::check_bulk_load_request_params(const start_bulk_load_request std::string &hint_msg) { FAIL_POINT_INJECT_F("meta_check_bulk_load_request_params", - [](absl::string_view) -> error_code { return ERR_OK; }); + [](std::string_view) -> error_code { return ERR_OK; }); if (!validate_ingest_behind(envs, request.ingest_behind)) { hint_msg = fmt::format("inconsistent ingestion behind option"); @@ -356,7 +357,7 @@ bool bulk_load_service::check_partition_status( const gpid &pid, bool always_unhealthy_check, const std::function &retry_function, - /*out*/ partition_configuration &pconfig) + /*out*/ partition_configuration &pc) { std::shared_ptr app = get_app(pid.get_app_id()); if (app == nullptr || app->status != app_status::AS_AVAILABLE) { @@ -369,18 +370,19 @@ bool bulk_load_service::check_partition_status( return false; } - pconfig = app->partitions[pid.get_partition_index()]; - if (!pconfig.hp_primary) { + pc = app->pcs[pid.get_partition_index()]; + if (!pc.hp_primary) { LOG_WARNING("app({}) partition({}) primary is invalid, try it later", app_name, pid); - tasking::enqueue(LPC_META_STATE_NORMAL, - _meta_svc->tracker(), - [retry_function, app_name, pid]() { retry_function(app_name, pid); }, - 0, - std::chrono::seconds(1)); + tasking::enqueue( + LPC_META_STATE_NORMAL, + _meta_svc->tracker(), + [retry_function, app_name, pid]() { retry_function(app_name, pid); }, + 0, + std::chrono::seconds(1)); return false; } - if (pconfig.hp_secondaries.size() < pconfig.max_replica_count - 1) { + if (pc.hp_secondaries.size() < pc.max_replica_count - 1) { bulk_load_status::type p_status; { zauto_read_lock l(_lock); @@ -398,11 +400,12 @@ bool bulk_load_service::check_partition_status( app_name, pid, dsn::enum_to_string(p_status)); - tasking::enqueue(LPC_META_STATE_NORMAL, - _meta_svc->tracker(), - [retry_function, app_name, pid]() { retry_function(app_name, pid); }, - 0, - std::chrono::seconds(1)); + tasking::enqueue( + LPC_META_STATE_NORMAL, + _meta_svc->tracker(), + [retry_function, app_name, pid]() { retry_function(app_name, pid); }, + 0, + std::chrono::seconds(1)); return false; } return true; @@ -411,9 +414,9 @@ bool bulk_load_service::check_partition_status( // ThreadPool: THREAD_POOL_META_STATE void bulk_load_service::partition_bulk_load(const std::string &app_name, const gpid &pid) { - FAIL_POINT_INJECT_F("meta_bulk_load_partition_bulk_load", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("meta_bulk_load_partition_bulk_load", [](std::string_view) {}); - partition_configuration pconfig; + partition_configuration pc; if (!check_partition_status(app_name, pid, false, @@ -421,7 +424,7 @@ void bulk_load_service::partition_bulk_load(const std::string &app_name, const g this, std::placeholders::_1, std::placeholders::_2), - pconfig)) { + pc)) { return; } @@ -431,18 +434,18 @@ void bulk_load_service::partition_bulk_load(const std::string &app_name, const g const app_bulk_load_info &ainfo = _app_bulk_load_info[pid.get_app_id()]; req->pid = pid; req->app_name = app_name; - SET_IP_AND_HOST_PORT(*req, primary, pconfig.primary, pconfig.hp_primary); + SET_IP_AND_HOST_PORT(*req, primary, pc.primary, pc.hp_primary); req->remote_provider_name = ainfo.file_provider_type; req->cluster_name = ainfo.cluster_name; req->meta_bulk_load_status = get_partition_bulk_load_status_unlocked(pid); - req->ballot = pconfig.ballot; + req->ballot = pc.ballot; req->query_bulk_load_metadata = is_partition_metadata_not_updated_unlocked(pid); req->remote_root_path = ainfo.remote_root_path; } LOG_INFO("send bulk load request to node({}), app({}), partition({}), partition " "status = {}, remote provider = {}, cluster_name = {}, remote_root_path = {}", - FMT_HOST_PORT_AND_IP(pconfig, primary), + FMT_HOST_PORT_AND_IP(pc, primary), app_name, pid, dsn::enum_to_string(req->meta_bulk_load_status), @@ -451,17 +454,14 @@ void bulk_load_service::partition_bulk_load(const std::string &app_name, const g req->remote_root_path); bulk_load_rpc rpc(std::move(req), RPC_BULK_LOAD, 0_ms, 0, pid.thread_hash()); - rpc.call(pconfig.primary, _meta_svc->tracker(), [this, rpc](error_code err) mutable { - // fill host_port struct if needed - // remote server maybe not supported host_post, just have address - auto &bulk_load_resp = rpc.response(); - if (!bulk_load_resp.__isset.hp_group_bulk_load_state) { - bulk_load_resp.__set_hp_group_bulk_load_state({}); - for (const auto & [ addr, pbls ] : bulk_load_resp.group_bulk_load_state) { - bulk_load_resp.hp_group_bulk_load_state[host_port::from_address(addr)] = pbls; - } - } - + rpc.call(pc.primary, _meta_svc->tracker(), [this, pid, rpc, pc](error_code err) mutable { + // The remote server may not support FQDN, but do not try to reverse resolve the + // IP addresses because they may be unresolved. Just warning and ignore this. + LOG_WARNING_IF(!rpc.response().__isset.hp_group_bulk_load_state, + "The {} primary {} doesn't support FQDN, the response " + "hp_group_bulk_load_state field is not set", + pid, + FMT_HOST_PORT_AND_IP(pc, primary)); on_partition_bulk_load_reply(err, rpc.request(), rpc.response()); }); } @@ -532,7 +532,7 @@ void bulk_load_service::on_partition_bulk_load_reply(error_code err, handle_app_unavailable(pid.get_app_id(), app_name); return; } - ballot current_ballot = app->partitions[pid.get_partition_index()].ballot; + ballot current_ballot = app->pcs[pid.get_partition_index()].ballot; if (request.ballot < current_ballot) { LOG_WARNING( "receive out-date response from node({}), app({}), partition({}), request ballot = " @@ -583,7 +583,7 @@ void bulk_load_service::on_partition_bulk_load_reply(error_code err, // ThreadPool: THREAD_POOL_META_STATE void bulk_load_service::try_resend_bulk_load_request(const std::string &app_name, const gpid &pid) { - FAIL_POINT_INJECT_F("meta_bulk_load_resend_request", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("meta_bulk_load_resend_request", [](std::string_view) {}); zauto_read_lock l(_lock); if (is_app_bulk_loading_unlocked(pid.get_app_id())) { tasking::enqueue(LPC_META_STATE_NORMAL, @@ -596,7 +596,7 @@ void bulk_load_service::try_resend_bulk_load_request(const std::string &app_name // ThreadPool: THREAD_POOL_META_STATE void bulk_load_service::handle_app_downloading(const bulk_load_response &response, - const host_port &primary_addr) + const host_port &primary) { const std::string &app_name = response.app_name; const gpid &pid = response.pid; @@ -605,7 +605,7 @@ void bulk_load_service::handle_app_downloading(const bulk_load_response &respons LOG_WARNING( "receive bulk load response from node({}) app({}), partition({}), primary_status({}), " "but total_download_progress is not set", - primary_addr, + primary, app_name, pid, dsn::enum_to_string(response.primary_bulk_load_status)); @@ -618,7 +618,7 @@ void bulk_load_service::handle_app_downloading(const bulk_load_response &respons !bulk_load_states.__isset.download_status) { LOG_WARNING("receive bulk load response from node({}) app({}), partition({}), " "primary_status({}), but node({}) progress or status is not set", - primary_addr, + primary, app_name, pid, dsn::enum_to_string(response.primary_bulk_load_status), @@ -657,7 +657,7 @@ void bulk_load_service::handle_app_downloading(const bulk_load_response &respons int32_t total_progress = response.total_download_progress; LOG_INFO("receive bulk load response from node({}) app({}) partition({}), primary_status({}), " "total_download_progress = {}", - primary_addr, + primary, app_name, pid, dsn::enum_to_string(response.primary_bulk_load_status), @@ -678,7 +678,7 @@ void bulk_load_service::handle_app_downloading(const bulk_load_response &respons // ThreadPool: THREAD_POOL_META_STATE void bulk_load_service::handle_app_ingestion(const bulk_load_response &response, - const host_port &primary_addr) + const host_port &primary) { const std::string &app_name = response.app_name; const gpid &pid = response.pid; @@ -686,7 +686,7 @@ void bulk_load_service::handle_app_ingestion(const bulk_load_response &response, if (!response.__isset.is_group_ingestion_finished) { LOG_WARNING("receive bulk load response from node({}) app({}) partition({}), " "primary_status({}), but is_group_ingestion_finished is not set", - primary_addr, + primary, app_name, pid, dsn::enum_to_string(response.primary_bulk_load_status)); @@ -698,7 +698,7 @@ void bulk_load_service::handle_app_ingestion(const bulk_load_response &response, if (!bulk_load_states.__isset.ingest_status) { LOG_WARNING("receive bulk load response from node({}) app({}) partition({}), " "primary_status({}), but node({}) ingestion_status is not set", - primary_addr, + primary, app_name, pid, dsn::enum_to_string(response.primary_bulk_load_status), @@ -717,7 +717,7 @@ void bulk_load_service::handle_app_ingestion(const bulk_load_response &response, LOG_INFO("receive bulk load response from node({}) app({}) partition({}), primary_status({}), " "is_group_ingestion_finished = {}", - primary_addr, + primary, app_name, pid, dsn::enum_to_string(response.primary_bulk_load_status), @@ -736,7 +736,7 @@ void bulk_load_service::handle_app_ingestion(const bulk_load_response &response, // ThreadPool: THREAD_POOL_META_STATE void bulk_load_service::handle_bulk_load_finish(const bulk_load_response &response, - const host_port &primary_addr) + const host_port &primary) { const std::string &app_name = response.app_name; const gpid &pid = response.pid; @@ -744,7 +744,7 @@ void bulk_load_service::handle_bulk_load_finish(const bulk_load_response &respon if (!response.__isset.is_group_bulk_load_context_cleaned_up) { LOG_WARNING("receive bulk load response from node({}) app({}) partition({}), " "primary_status({}), but is_group_bulk_load_context_cleaned_up is not set", - primary_addr, + primary, app_name, pid, dsn::enum_to_string(response.primary_bulk_load_status)); @@ -755,7 +755,7 @@ void bulk_load_service::handle_bulk_load_finish(const bulk_load_response &respon if (!kv.second.__isset.is_cleaned_up) { LOG_WARNING("receive bulk load response from node({}) app({}), partition({}), " "primary_status({}), but node({}) is_cleaned_up is not set", - primary_addr, + primary, app_name, pid, dsn::enum_to_string(response.primary_bulk_load_status), @@ -770,7 +770,7 @@ void bulk_load_service::handle_bulk_load_finish(const bulk_load_response &respon LOG_WARNING( "receive bulk load response from node({}) app({}) partition({}), current partition " "has already been cleaned up", - primary_addr, + primary, app_name, pid); return; @@ -781,7 +781,7 @@ void bulk_load_service::handle_bulk_load_finish(const bulk_load_response &respon bool group_cleaned_up = response.is_group_bulk_load_context_cleaned_up; LOG_INFO("receive bulk load response from node({}) app({}) partition({}), primary status = {}, " "is_group_bulk_load_context_cleaned_up = {}", - primary_addr, + primary, app_name, pid, dsn::enum_to_string(response.primary_bulk_load_status), @@ -817,7 +817,7 @@ void bulk_load_service::handle_bulk_load_finish(const bulk_load_response &respon // ThreadPool: THREAD_POOL_META_STATE void bulk_load_service::handle_app_pausing(const bulk_load_response &response, - const host_port &primary_addr) + const host_port &primary) { const std::string &app_name = response.app_name; const gpid &pid = response.pid; @@ -825,7 +825,7 @@ void bulk_load_service::handle_app_pausing(const bulk_load_response &response, if (!response.__isset.is_group_bulk_load_paused) { LOG_WARNING("receive bulk load response from node({}) app({}) partition({}), " "primary_status({}), but is_group_bulk_load_paused is not set", - primary_addr, + primary, app_name, pid, dsn::enum_to_string(response.primary_bulk_load_status)); @@ -836,7 +836,7 @@ void bulk_load_service::handle_app_pausing(const bulk_load_response &response, if (!kv.second.__isset.is_paused) { LOG_WARNING("receive bulk load response from node({}) app({}), partition({}), " "primary_status({}), but node({}) is_paused is not set", - primary_addr, + primary, app_name, pid, dsn::enum_to_string(response.primary_bulk_load_status), @@ -848,7 +848,7 @@ void bulk_load_service::handle_app_pausing(const bulk_load_response &response, bool is_group_paused = response.is_group_bulk_load_paused; LOG_INFO("receive bulk load response from node({}) app({}) partition({}), primary status = {}, " "is_group_bulk_load_paused = {}", - primary_addr, + primary, app_name, pid, dsn::enum_to_string(response.primary_bulk_load_status), @@ -1091,7 +1091,7 @@ void bulk_load_service::update_app_status_on_remote_storage_unlocked( int32_t app_id, bulk_load_status::type new_status, error_code err, bool should_send_request) { FAIL_POINT_INJECT_F("meta_update_app_status_on_remote_storage_unlocked", - [](absl::string_view) {}); + [](std::string_view) {}); app_bulk_load_info ainfo = _app_bulk_load_info[app_id]; auto old_status = ainfo.status; @@ -1186,7 +1186,7 @@ void bulk_load_service::update_app_status_on_remote_storage_reply(const app_bulk } // ThreadPool: THREAD_POOL_META_STATE -bool bulk_load_service::check_ever_ingestion_succeed(const partition_configuration &config, +bool bulk_load_service::check_ever_ingestion_succeed(const partition_configuration &pc, const std::string &app_name, const gpid &pid) { @@ -1201,8 +1201,8 @@ bool bulk_load_service::check_ever_ingestion_succeed(const partition_configurati } std::vector current_nodes; - current_nodes.emplace_back(config.hp_primary); - for (const auto &secondary : config.hp_secondaries) { + current_nodes.emplace_back(pc.hp_primary); + for (const auto &secondary : pc.hp_secondaries) { current_nodes.emplace_back(secondary); } @@ -1223,7 +1223,7 @@ bool bulk_load_service::check_ever_ingestion_succeed(const partition_configurati // ThreadPool: THREAD_POOL_META_STATE void bulk_load_service::partition_ingestion(const std::string &app_name, const gpid &pid) { - FAIL_POINT_INJECT_F("meta_bulk_load_partition_ingestion", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("meta_bulk_load_partition_ingestion", [](std::string_view) {}); auto app_status = get_app_bulk_load_status(pid.get_app_id()); if (app_status != bulk_load_status::BLS_INGESTING) { @@ -1242,7 +1242,7 @@ void bulk_load_service::partition_ingestion(const std::string &app_name, const g return; } - partition_configuration pconfig; + partition_configuration pc; if (!check_partition_status(app_name, pid, true, @@ -1250,16 +1250,16 @@ void bulk_load_service::partition_ingestion(const std::string &app_name, const g this, std::placeholders::_1, std::placeholders::_2), - pconfig)) { + pc)) { return; } - if (check_ever_ingestion_succeed(pconfig, app_name, pid)) { + if (check_ever_ingestion_succeed(pc, app_name, pid)) { return; } auto app = get_app(pid.get_app_id()); - if (!try_partition_ingestion(pconfig, app->helpers->contexts[pid.get_partition_index()])) { + if (!try_partition_ingestion(pc, app->helpers->contexts[pid.get_partition_index()])) { LOG_WARNING( "app({}) partition({}) couldn't execute ingestion, wait and try later", app_name, pid); tasking::enqueue(LPC_META_STATE_NORMAL, @@ -1270,24 +1270,21 @@ void bulk_load_service::partition_ingestion(const std::string &app_name, const g return; } - const auto &primary_addr = pconfig.hp_primary; - ballot meta_ballot = pconfig.ballot; - tasking::enqueue(LPC_BULK_LOAD_INGESTION, - _meta_svc->tracker(), - std::bind(&bulk_load_service::send_ingestion_request, - this, - app_name, - pid, - primary_addr, - meta_ballot), - 0, - std::chrono::seconds(bulk_load_constant::BULK_LOAD_REQUEST_INTERVAL)); + const auto &primary = pc.hp_primary; + ballot meta_ballot = pc.ballot; + tasking::enqueue( + LPC_BULK_LOAD_INGESTION, + _meta_svc->tracker(), + std::bind( + &bulk_load_service::send_ingestion_request, this, app_name, pid, primary, meta_ballot), + 0, + std::chrono::seconds(bulk_load_constant::BULK_LOAD_REQUEST_INTERVAL)); } // ThreadPool: THREAD_POOL_DEFAULT void bulk_load_service::send_ingestion_request(const std::string &app_name, const gpid &pid, - const host_port &primary_addr, + const host_port &primary, const ballot &meta_ballot) { ingestion_request req; @@ -1310,11 +1307,11 @@ void bulk_load_service::send_ingestion_request(const std::string &app_name, dsn::rpc_response_task_ptr rpc_callback = rpc::create_rpc_response_task( msg, _meta_svc->tracker(), - [this, app_name, pid, primary_addr](error_code err, ingestion_response &&resp) { - on_partition_ingestion_reply(err, std::move(resp), app_name, pid, primary_addr); + [this, app_name, pid, primary](error_code err, ingestion_response &&resp) { + on_partition_ingestion_reply(err, std::move(resp), app_name, pid, primary); }); - _meta_svc->send_request(msg, primary_addr, rpc_callback); - LOG_INFO("send ingest_request to node({}), app({}) partition({})", primary_addr, app_name, pid); + _meta_svc->send_request(msg, primary, rpc_callback); + LOG_INFO("send ingest_request to node({}), app({}) partition({})", primary, app_name, pid); } // ThreadPool: THREAD_POOL_DEFAULT @@ -1322,7 +1319,7 @@ void bulk_load_service::on_partition_ingestion_reply(error_code err, const ingestion_response &&resp, const std::string &app_name, const gpid &pid, - const host_port &primary_addr) + const host_port &primary) { if (err != ERR_OK || resp.err != ERR_OK || resp.rocksdb_error != ERR_OK) { finish_ingestion(pid); @@ -1334,7 +1331,7 @@ void bulk_load_service::on_partition_ingestion_reply(error_code err, "repeated request", app_name, pid, - primary_addr); + primary); return; } @@ -1343,7 +1340,7 @@ void bulk_load_service::on_partition_ingestion_reply(error_code err, LOG_ERROR("app({}) partition({}) on node({}) ingestion files failed, error = {}", app_name, pid, - primary_addr, + primary, err); tasking::enqueue( LPC_META_STATE_NORMAL, @@ -1358,7 +1355,7 @@ void bulk_load_service::on_partition_ingestion_reply(error_code err, "{}, retry it later", app_name, pid, - primary_addr, + primary, resp.rocksdb_error); tasking::enqueue(LPC_BULK_LOAD_INGESTION, _meta_svc->tracker(), @@ -1376,7 +1373,7 @@ void bulk_load_service::on_partition_ingestion_reply(error_code err, "error = {}", app_name, pid, - primary_addr, + primary, resp.err, resp.rocksdb_error); @@ -1392,7 +1389,7 @@ void bulk_load_service::on_partition_ingestion_reply(error_code err, LOG_INFO("app({}) partition({}) receive ingestion response from node({}) succeed", app_name, pid, - primary_addr); + primary); } // ThreadPool: THREAD_POOL_META_STATE @@ -1611,13 +1608,13 @@ void bulk_load_service::on_query_bulk_load_status(query_bulk_load_rpc rpc) response.bulk_load_states.resize(partition_count); response.__set_hp_bulk_load_states( std::vector>(partition_count)); - for (const auto & [ pid, pbls_by_hps ] : _partitions_bulk_load_state) { + for (const auto &[pid, pbls_by_hps] : _partitions_bulk_load_state) { if (pid.get_app_id() == app_id) { auto pidx = pid.get_partition_index(); response.hp_bulk_load_states[pidx] = pbls_by_hps; std::map pbls_by_addrs; - for (const auto & [ hp, pbls ] : pbls_by_hps) { + for (const auto &[hp, pbls] : pbls_by_hps) { pbls_by_addrs[dsn::dns_resolver::instance().resolve_address(hp)] = pbls; } response.bulk_load_states[pidx] = pbls_by_addrs; @@ -1662,7 +1659,7 @@ void bulk_load_service::on_clear_bulk_load(clear_bulk_load_rpc rpc) void bulk_load_service::do_clear_app_bulk_load_result(int32_t app_id, clear_bulk_load_rpc rpc) { FAIL_POINT_INJECT_F("meta_do_clear_app_bulk_load_result", - [rpc](absl::string_view) { rpc.response().err = ERR_OK; }); + [rpc](std::string_view) { rpc.response().err = ERR_OK; }); std::string bulk_load_path = get_app_bulk_load_path(app_id); _meta_svc->get_meta_storage()->delete_node_recursively( std::move(bulk_load_path), [this, app_id, bulk_load_path, rpc]() { @@ -1756,7 +1753,7 @@ void bulk_load_service::do_sync_partition(const gpid &pid, std::string &partitio // ThreadPool: THREAD_POOL_META_SERVER void bulk_load_service::try_to_continue_bulk_load() { - FAIL_POINT_INJECT_F("meta_try_to_continue_bulk_load", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("meta_try_to_continue_bulk_load", [](std::string_view) {}); zauto_read_lock l(_lock); for (const auto app_id : _bulk_load_app_id) { app_bulk_load_info ainfo = _app_bulk_load_info[app_id]; diff --git a/src/meta/meta_bulk_load_service.h b/src/meta/meta_bulk_load_service.h index 359b3fa085..a352db2072 100644 --- a/src/meta/meta_bulk_load_service.h +++ b/src/meta/meta_bulk_load_service.h @@ -35,9 +35,9 @@ #include "common/replication_other_types.h" #include "meta/meta_state_service_utils.h" #include "meta_bulk_load_ingestion_context.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task_tracker.h" +#include "rpc/rpc_host_port.h" #include "server_state.h" +#include "task/task_tracker.h" #include "utils/error_code.h" #include "utils/flags.h" #include "utils/zlocks.h" @@ -189,7 +189,7 @@ class bulk_load_service const gpid &pid, bool always_unhealthy_check, const std::function &retry_function, - /*out*/ partition_configuration &pconfig); + /*out*/ partition_configuration &pc); void partition_bulk_load(const std::string &app_name, const gpid &pid); @@ -200,15 +200,15 @@ class bulk_load_service // if app is still in bulk load, resend bulk_load_request to primary after interval seconds void try_resend_bulk_load_request(const std::string &app_name, const gpid &pid); - void handle_app_downloading(const bulk_load_response &response, const host_port &primary_addr); + void handle_app_downloading(const bulk_load_response &response, const host_port &primary); - void handle_app_ingestion(const bulk_load_response &response, const host_port &primary_addr); + void handle_app_ingestion(const bulk_load_response &response, const host_port &primary); // when app status is `succeed, `failed`, `canceled`, meta and replica should cleanup bulk load // states - void handle_bulk_load_finish(const bulk_load_response &response, const host_port &primary_addr); + void handle_bulk_load_finish(const bulk_load_response &response, const host_port &primary); - void handle_app_pausing(const bulk_load_response &response, const host_port &primary_addr); + void handle_app_pausing(const bulk_load_response &response, const host_port &primary); // app not existed or not available during bulk load void handle_app_unavailable(int32_t app_id, const std::string &app_name); @@ -223,20 +223,20 @@ class bulk_load_service void send_ingestion_request(const std::string &app_name, const gpid &pid, - const host_port &primary_addr, + const host_port &primary, const ballot &meta_ballot); void on_partition_ingestion_reply(error_code err, const ingestion_response &&resp, const std::string &app_name, const gpid &pid, - const host_port &primary_addr); + const host_port &primary); // Called by `partition_ingestion` // - true : this partition has ever executed ingestion succeed, no need to send ingestion // request // - false: this partition has not executed ingestion or executed ingestion failed - bool check_ever_ingestion_succeed(const partition_configuration &config, + bool check_ever_ingestion_succeed(const partition_configuration &pc, const std::string &app_name, const gpid &pid); @@ -252,9 +252,9 @@ class bulk_load_service /// /// ingestion_context functions /// - bool try_partition_ingestion(const partition_configuration &config, const config_context &cc) + bool try_partition_ingestion(const partition_configuration &pc, const config_context &cc) { - return _ingestion_context->try_partition_ingestion(config, cc); + return _ingestion_context->try_partition_ingestion(pc, cc); } void finish_ingestion(const gpid &pid) { _ingestion_context->remove_partition(pid); } diff --git a/src/meta/meta_data.cpp b/src/meta/meta_data.cpp index cc6e96c01b..8354ef8956 100644 --- a/src/meta/meta_data.cpp +++ b/src/meta/meta_data.cpp @@ -30,10 +30,10 @@ #include "common/gpid.h" #include "common/replication_enums.h" #include "meta_data.h" +#include "rpc/dns_resolver.h" // IWYU pragma: keep +#include "rpc/rpc_address.h" +#include "rpc/rpc_message.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/dns_resolver.h" // IWYU pragma: keep -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_message.h" #include "utils/flags.h" #include "utils/fmt_logging.h" @@ -134,8 +134,9 @@ bool construct_replica(meta_view view, const gpid &pid, int max_replica_count) // when add node to pc.last_drops, we don't remove it from our cc.drop_list CHECK(pc.hp_last_drops.empty(), "last_drops of partition({}) must be empty", pid); for (auto iter = drop_list.rbegin(); iter != drop_list.rend(); ++iter) { - if (pc.hp_last_drops.size() + 1 >= max_replica_count) + if (pc.hp_last_drops.size() + 1 >= max_replica_count) { break; + } // similar to cc.drop_list, pc.last_drop is also a stack structure HEAD_INSERT_IP_AND_HOST_PORT_BY_DNS(pc, last_drops, iter->node); LOG_INFO("construct for ({}), select {} into last_drops, ballot({}), " @@ -303,8 +304,8 @@ void config_context::check_size() { // when add learner, it is possible that replica_count > max_replica_count, so we // need to remove things from dropped only when it's not empty. - while (replica_count(*config_owner) + dropped.size() > - config_owner->max_replica_count + FLAGS_max_reserved_dropped_replicas && + while (replica_count(*pc) + dropped.size() > + pc->max_replica_count + FLAGS_max_reserved_dropped_replicas && !dropped.empty()) { dropped.erase(dropped.begin()); prefered_dropped = (int)dropped.size() - 1; @@ -377,7 +378,7 @@ int config_context::collect_drop_replica(const host_port &node, const replica_in CHECK(!in_dropped, "adjust position of existing node({}) failed, this is a bug, partition({})", node, - config_owner->pid); + pc->pid); return -1; } return in_dropped ? 1 : 0; @@ -391,7 +392,7 @@ bool config_context::check_order() if (dropped_cmp(dropped[i], dropped[i + 1]) > 0) { LOG_ERROR("check dropped order for gpid({}) failed, [{},{},{},{},{}@{}] vs " "[{},{},{},{},{}@{}]", - config_owner->pid, + pc->pid, dropped[i].node, dropped[i].time, dropped[i].ballot, @@ -474,9 +475,9 @@ void app_state_helper::on_init_partitions() context.prefered_dropped = -1; contexts.assign(owner->partition_count, context); - std::vector &partitions = owner->partitions; + auto &pcs = owner->pcs; for (unsigned int i = 0; i != owner->partition_count; ++i) { - contexts[i].config_owner = &(partitions[i]); + contexts[i].pc = &(pcs[i]); } partitions_in_progress.store(owner->partition_count); @@ -525,19 +526,20 @@ app_state::app_state(const app_info &info) : app_info(info), helpers(new app_sta log_name = info.app_name + "(" + boost::lexical_cast(info.app_id) + ")"; helpers->owner = this; - partition_configuration config; - config.ballot = 0; - config.pid.set_app_id(app_id); - config.last_committed_decree = 0; - config.max_replica_count = app_info::max_replica_count; + partition_configuration pc; + pc.ballot = 0; + pc.pid.set_app_id(app_id); + pc.last_committed_decree = 0; + pc.max_replica_count = app_info::max_replica_count; - RESET_IP_AND_HOST_PORT(config, primary); - CLEAR_IP_AND_HOST_PORT(config, secondaries); - CLEAR_IP_AND_HOST_PORT(config, last_drops); + RESET_IP_AND_HOST_PORT(pc, primary); + CLEAR_IP_AND_HOST_PORT(pc, secondaries); + CLEAR_IP_AND_HOST_PORT(pc, last_drops); - partitions.assign(app_info::partition_count, config); - for (int i = 0; i != app_info::partition_count; ++i) - partitions[i].pid.set_partition_index(i); + pcs.assign(app_info::partition_count, pc); + for (int i = 0; i != app_info::partition_count; ++i) { + pcs[i].pid.set_partition_index(i); + } helpers->on_init_partitions(); } diff --git a/src/meta/meta_data.h b/src/meta/meta_data.h index 06ac12c9a7..239c69435c 100644 --- a/src/meta/meta_data.h +++ b/src/meta/meta_data.h @@ -46,9 +46,9 @@ #include "meta/duplication/duplication_info.h" #include "meta_admin_types.h" #include "metadata_types.h" +#include "rpc/rpc_host_port.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task.h" +#include "task/task.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/enum_helper.h" @@ -196,7 +196,7 @@ struct serving_replica class config_context { public: - partition_configuration *config_owner; + partition_configuration *pc; config_status stage; // for server state's update config management //[ @@ -264,18 +264,12 @@ class config_context struct partition_configuration_stateless { - partition_configuration &config; - partition_configuration_stateless(partition_configuration &pc) : config(pc) {} - std::vector &workers() { return config.hp_last_drops; } - std::vector &hosts() { return config.hp_secondaries; } - bool is_host(const host_port &node) const - { - return utils::contains(config.hp_secondaries, node); - } - bool is_worker(const host_port &node) const - { - return utils::contains(config.hp_last_drops, node); - } + partition_configuration &pc; + partition_configuration_stateless(partition_configuration &_pc) : pc(_pc) {} + std::vector &workers() { return pc.hp_last_drops; } + std::vector &hosts() { return pc.hp_secondaries; } + bool is_host(const host_port &node) const { return utils::contains(pc.hp_secondaries, node); } + bool is_worker(const host_port &node) const { return utils::contains(pc.hp_last_drops, node); } bool is_member(const host_port &node) const { return is_host(node) || is_worker(node); } }; @@ -362,7 +356,7 @@ class app_state : public app_info public: const char *get_logname() const { return log_name.c_str(); } std::shared_ptr helpers; - std::vector partitions; + std::vector pcs; std::map duplications; static std::shared_ptr create(const app_info &info); @@ -462,7 +456,7 @@ inline const partition_configuration *get_config(const app_mapper &apps, const d auto iter = apps.find(gpid.get_app_id()); if (iter == apps.end() || iter->second->status == app_status::AS_DROPPED) return nullptr; - return &(iter->second->partitions[gpid.get_partition_index()]); + return &(iter->second->pcs[gpid.get_partition_index()]); } inline partition_configuration *get_config(app_mapper &apps, const dsn::gpid &gpid) @@ -470,7 +464,7 @@ inline partition_configuration *get_config(app_mapper &apps, const dsn::gpid &gp auto iter = apps.find(gpid.get_app_id()); if (iter == apps.end() || iter->second->status == app_status::AS_DROPPED) return nullptr; - return &(iter->second->partitions[gpid.get_partition_index()]); + return &(iter->second->pcs[gpid.get_partition_index()]); } inline const config_context *get_config_context(const app_mapper &apps, const dsn::gpid &gpid) @@ -510,29 +504,30 @@ inline health_status partition_health_status(const partition_configuration &pc, int mutation_2pc_min_replica_count) { if (!pc.hp_primary) { - if (pc.hp_secondaries.empty()) + if (pc.hp_secondaries.empty()) { return HS_DEAD; - else - return HS_UNREADABLE; - } else { - int n = pc.hp_secondaries.size() + 1; - if (n < mutation_2pc_min_replica_count) - return HS_UNWRITABLE; - else if (n < pc.max_replica_count) - return HS_WRITABLE_ILL; - else - return HS_HEALTHY; + } + return HS_UNREADABLE; + } + + const auto replica_count = pc.hp_secondaries.size() + 1; + if (replica_count < mutation_2pc_min_replica_count) { + return HS_UNWRITABLE; + } + + if (replica_count < pc.max_replica_count) { + return HS_WRITABLE_ILL; } + return HS_HEALTHY; } inline void for_each_available_app(const app_mapper &apps, const std::function &)> &action) { - for (const auto &p : apps) { - if (p.second->status == app_status::AS_AVAILABLE) { - if (!action(p.second)) - break; + for (const auto &[_, as] : apps) { + if (as->status == app_status::AS_AVAILABLE && !action(as)) { + break; } } } @@ -548,6 +543,7 @@ inline int count_partitions(const app_mapper &apps) void when_update_replicas(config_type::type t, const std::function &func); +// TODO(yingchun): refactor to deal both rpc_address and host_port template void maintain_drops(/*inout*/ std::vector &drops, const T &node, config_type::type t) { diff --git a/src/meta/meta_http_service.cpp b/src/meta/meta_http_service.cpp index 8764096441..efc605d58d 100644 --- a/src/meta/meta_http_service.cpp +++ b/src/meta/meta_http_service.cpp @@ -16,6 +16,7 @@ // under the License. #include +#include #include #include #include @@ -46,8 +47,8 @@ #include "meta_admin_types.h" #include "meta_http_service.h" #include "meta_server_failure_detector.h" +#include "rpc/rpc_host_port.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_host_port.h" #include "server_load_balancer.h" #include "server_state.h" #include "utils/error_code.h" @@ -141,40 +142,33 @@ void meta_http_service::get_app_handler(const http_request &req, http_response & int fully_healthy = 0; int write_unhealthy = 0; int read_unhealthy = 0; - for (const auto &p : response.partitions) { + for (const auto &pc : response.partitions) { int replica_count = 0; - if (p.hp_primary) { + if (pc.hp_primary) { replica_count++; - node_stat[p.hp_primary].first++; + node_stat[pc.hp_primary].first++; total_prim_count++; } - replica_count += p.hp_secondaries.size(); - total_sec_count += p.hp_secondaries.size(); - if (p.hp_primary) { - if (replica_count >= p.max_replica_count) + replica_count += pc.hp_secondaries.size(); + total_sec_count += pc.hp_secondaries.size(); + if (pc.hp_primary) { + if (replica_count >= pc.max_replica_count) { fully_healthy++; - else if (replica_count < 2) + } else if (replica_count < 2) { write_unhealthy++; + } } else { write_unhealthy++; read_unhealthy++; } - tp_details.add_row(p.pid.get_partition_index()); - tp_details.append_data(p.ballot); - std::stringstream oss; - oss << replica_count << "/" << p.max_replica_count; - tp_details.append_data(oss.str()); - tp_details.append_data(p.hp_primary ? p.hp_primary.to_string() : "-"); - oss.str(""); - oss << "["; - for (int j = 0; j < p.hp_secondaries.size(); j++) { - if (j != 0) - oss << ","; - oss << p.hp_secondaries[j]; - node_stat[p.hp_secondaries[j]].second++; + tp_details.add_row(pc.pid.get_partition_index()); + tp_details.append_data(pc.ballot); + tp_details.append_data(fmt::format("{}/{}", replica_count, pc.max_replica_count)); + tp_details.append_data(pc.hp_primary ? pc.hp_primary.to_string() : "-"); + tp_details.append_data(fmt::format("[{}]", fmt::join(pc.hp_secondaries, ","))); + for (const auto &secondary : pc.hp_secondaries) { + node_stat[secondary].second++; } - oss << "]"; - tp_details.append_data(oss.str()); } mtp.add(std::move(tp_details)); @@ -322,18 +316,18 @@ void meta_http_service::list_app_handler(const http_request &req, http_response int fully_healthy = 0; int write_unhealthy = 0; int read_unhealthy = 0; - for (int i = 0; i < response.partitions.size(); i++) { - const dsn::partition_configuration &p = response.partitions[i]; + for (const auto &pc : response.partitions) { int replica_count = 0; - if (p.hp_primary) { + if (pc.hp_primary) { replica_count++; } - replica_count += p.hp_secondaries.size(); - if (p.hp_primary) { - if (replica_count >= p.max_replica_count) + replica_count += pc.hp_secondaries.size(); + if (pc.hp_primary) { + if (replica_count >= pc.max_replica_count) { fully_healthy++; - else if (replica_count < 2) + } else if (replica_count < 2) { write_unhealthy++; + } } else { write_unhealthy++; read_unhealthy++; @@ -413,16 +407,15 @@ void meta_http_service::list_node_handler(const http_request &req, http_response CHECK_EQ(app.app_id, response_app.app_id); CHECK_EQ(app.partition_count, response_app.partition_count); - for (int i = 0; i < response_app.partitions.size(); i++) { - const dsn::partition_configuration &p = response_app.partitions[i]; - if (p.hp_primary) { - auto find = tmp_map.find(p.hp_primary); + for (const auto &pc : response_app.partitions) { + if (pc.hp_primary) { + auto find = tmp_map.find(pc.hp_primary); if (find != tmp_map.end()) { find->second.primary_count++; } } - for (int j = 0; j < p.hp_secondaries.size(); j++) { - auto find = tmp_map.find(p.hp_secondaries[j]); + for (const auto &secondary : pc.hp_secondaries) { + auto find = tmp_map.find(secondary); if (find != tmp_map.end()) { find->second.secondary_count++; } diff --git a/src/meta/meta_rpc_types.h b/src/meta/meta_rpc_types.h index 42539e89b4..200b409e17 100644 --- a/src/meta/meta_rpc_types.h +++ b/src/meta/meta_rpc_types.h @@ -24,7 +24,7 @@ #include "backup_types.h" #include "consensus_types.h" #include "replica_admin_types.h" -#include "runtime/rpc/rpc_holder.h" +#include "rpc/rpc_holder.h" namespace dsn { namespace replication { diff --git a/src/meta/meta_server_failure_detector.cpp b/src/meta/meta_server_failure_detector.cpp index 56cc04e46b..8202fff5dd 100644 --- a/src/meta/meta_server_failure_detector.cpp +++ b/src/meta/meta_server_failure_detector.cpp @@ -30,14 +30,14 @@ #include #include -#include "absl/strings/string_view.h" +#include #include "fd_types.h" #include "meta/meta_options.h" #include "meta/meta_service.h" #include "runtime/app_model.h" -#include "runtime/rpc/rpc_address.h" +#include "rpc/rpc_address.h" #include "runtime/serverlet.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/distributed_lock_service.h" #include "utils/error_code.h" @@ -107,7 +107,7 @@ void meta_server_failure_detector::on_worker_connected(const host_port &node) bool meta_server_failure_detector::get_leader(host_port *leader) { - FAIL_POINT_INJECT_F("meta_server_failure_detector_get_leader", [leader](absl::string_view str) { + FAIL_POINT_INJECT_F("meta_server_failure_detector_get_leader", [leader](std::string_view str) { /// the format of str is : true#{ip}:{port} or false#{ip}:{port} auto pos = str.find("#"); // get leader host_port @@ -341,5 +341,5 @@ meta_server_failure_detector::get_stability_map_for_test() { return &_stablity; } -} -} +} // namespace replication +} // namespace dsn diff --git a/src/meta/meta_server_failure_detector.h b/src/meta/meta_server_failure_detector.h index a4f7b6d122..873b9b8e84 100644 --- a/src/meta/meta_server_failure_detector.h +++ b/src/meta/meta_server_failure_detector.h @@ -33,9 +33,9 @@ #include #include "failure_detector/failure_detector.h" +#include "rpc/rpc_host_port.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task.h" +#include "task/task.h" #include "utils/fmt_logging.h" #include "utils/zlocks.h" @@ -43,10 +43,12 @@ namespace dsn { namespace dist { class distributed_lock_service; } // namespace dist + namespace fd { class beacon_ack; class beacon_msg; } // namespace fd + template class rpc_replier; @@ -58,6 +60,7 @@ class meta_service; namespace test { class test_checker; } + class meta_server_failure_detector : public fd::failure_detector { public: @@ -154,5 +157,5 @@ class meta_server_failure_detector : public fd::failure_detector void set_leader_for_test(const host_port &leader_host_port, bool is_myself_leader); stability_map *get_stability_map_for_test(); }; -} -} +} // namespace replication +} // namespace dsn diff --git a/src/meta/meta_service.cpp b/src/meta/meta_service.cpp index b619bd55b7..6a07e3c91d 100644 --- a/src/meta/meta_service.cpp +++ b/src/meta/meta_service.cpp @@ -24,7 +24,6 @@ * THE SOFTWARE. */ -#include // IWYU pragma: no_include // IWYU pragma: no_include #include @@ -32,6 +31,7 @@ #include #include #include +#include #include #include @@ -57,11 +57,11 @@ #include "partition_split_types.h" #include "ranger/ranger_resource_policy_manager.h" #include "remote_cmd/remote_command.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/task/async_calls.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_holder.h" #include "server_load_balancer.h" #include "server_state.h" +#include "task/async_calls.h" #include "utils/autoref_ptr.h" #include "utils/command_manager.h" #include "utils/factory_store.h" @@ -756,9 +756,9 @@ void meta_service::on_query_configuration_by_index(configuration_query_by_index_ host_port forward_hp; if (!check_status_and_authz(rpc, &forward_hp)) { if (forward_hp) { - partition_configuration config; - SET_IP_AND_HOST_PORT_BY_DNS(config, primary, forward_hp); - response.partitions.push_back(std::move(config)); + partition_configuration pc; + SET_IP_AND_HOST_PORT_BY_DNS(pc, primary, forward_hp); + response.partitions.push_back(std::move(pc)); } return; } @@ -982,10 +982,11 @@ void meta_service::on_add_duplication(duplication_add_rpc rpc) rpc.response().err = ERR_SERVICE_NOT_ACTIVE; return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { _dup_svc->add_duplication(std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { _dup_svc->add_duplication(std::move(rpc)); }, + server_state::sStateHash); } void meta_service::on_modify_duplication(duplication_modify_rpc rpc) @@ -998,10 +999,11 @@ void meta_service::on_modify_duplication(duplication_modify_rpc rpc) rpc.response().err = ERR_SERVICE_NOT_ACTIVE; return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { _dup_svc->modify_duplication(std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { _dup_svc->modify_duplication(std::move(rpc)); }, + server_state::sStateHash); } void meta_service::on_query_duplication_info(duplication_query_rpc rpc) @@ -1023,16 +1025,17 @@ void meta_service::on_duplication_sync(duplication_sync_rpc rpc) return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { - if (_dup_svc) { - _dup_svc->duplication_sync(std::move(rpc)); - } else { - rpc.response().err = ERR_SERVICE_NOT_ACTIVE; - } - }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { + if (_dup_svc) { + _dup_svc->duplication_sync(std::move(rpc)); + } else { + rpc.response().err = ERR_SERVICE_NOT_ACTIVE; + } + }, + server_state::sStateHash); } void meta_service::recover_duplication_from_meta_state() @@ -1113,10 +1116,11 @@ void meta_service::on_start_partition_split(start_split_rpc rpc) rpc.response().err = ERR_SERVICE_NOT_ACTIVE; return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { _split_svc->start_partition_split(std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { _split_svc->start_partition_split(std::move(rpc)); }, + server_state::sStateHash); } void meta_service::on_control_partition_split(control_split_rpc rpc) @@ -1130,10 +1134,11 @@ void meta_service::on_control_partition_split(control_split_rpc rpc) rpc.response().err = ERR_SERVICE_NOT_ACTIVE; return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { _split_svc->control_partition_split(std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { _split_svc->control_partition_split(std::move(rpc)); }, + server_state::sStateHash); } void meta_service::on_query_partition_split(query_split_rpc rpc) @@ -1156,10 +1161,11 @@ void meta_service::on_register_child_on_meta(register_child_rpc rpc) return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { _split_svc->register_child_on_meta(std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { _split_svc->register_child_on_meta(std::move(rpc)); }, + server_state::sStateHash); } void meta_service::on_notify_stop_split(notify_stop_split_rpc rpc) @@ -1172,10 +1178,11 @@ void meta_service::on_notify_stop_split(notify_stop_split_rpc rpc) rpc.response().err = ERR_SERVICE_NOT_ACTIVE; return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { _split_svc->notify_stop_split(std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { _split_svc->notify_stop_split(std::move(rpc)); }, + server_state::sStateHash); } void meta_service::on_query_child_state(query_child_state_rpc rpc) @@ -1216,10 +1223,11 @@ void meta_service::on_control_bulk_load(control_bulk_load_rpc rpc) rpc.response().err = ERR_SERVICE_NOT_ACTIVE; return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { _bulk_load_svc->on_control_bulk_load(std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { _bulk_load_svc->on_control_bulk_load(std::move(rpc)); }, + server_state::sStateHash); } void meta_service::on_query_bulk_load_status(query_bulk_load_rpc rpc) @@ -1247,10 +1255,11 @@ void meta_service::on_clear_bulk_load(clear_bulk_load_rpc rpc) rpc.response().err = ERR_SERVICE_NOT_ACTIVE; return; } - tasking::enqueue(LPC_META_STATE_NORMAL, - tracker(), - [this, rpc]() { _bulk_load_svc->on_clear_bulk_load(std::move(rpc)); }, - server_state::sStateHash); + tasking::enqueue( + LPC_META_STATE_NORMAL, + tracker(), + [this, rpc]() { _bulk_load_svc->on_clear_bulk_load(std::move(rpc)); }, + server_state::sStateHash); } void meta_service::on_start_backup_app(start_backup_app_rpc rpc) diff --git a/src/meta/meta_service.h b/src/meta/meta_service.h index ad7afc2355..0630c68131 100644 --- a/src/meta/meta_service.h +++ b/src/meta/meta_service.h @@ -46,17 +46,17 @@ #include "meta_options.h" #include "meta_rpc_types.h" #include "meta_server_failure_detector.h" +#include "rpc/dns_resolver.h" +#include "rpc/network.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/network.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "security/access_controller.h" #include "runtime/serverlet.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_tracker.h" +#include "security/access_controller.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_tracker.h" #include "utils/autoref_ptr.h" #include "utils/enum_helper.h" #include "utils/error_code.h" @@ -72,6 +72,7 @@ namespace ranger { class ranger_resource_policy_manager; } // namespace ranger namespace dist { + class meta_state_service; } // namespace dist @@ -95,6 +96,7 @@ class test_checker; DEFINE_TASK_CODE(LPC_DEFAULT_CALLBACK, TASK_PRIORITY_COMMON, dsn::THREAD_POOL_DEFAULT) enum class meta_op_status + { FREE = 0, RECALL, diff --git a/src/meta/meta_split_service.cpp b/src/meta/meta_split_service.cpp index 00f4449a54..b1e7cd708b 100644 --- a/src/meta/meta_split_service.cpp +++ b/src/meta/meta_split_service.cpp @@ -40,10 +40,10 @@ #include "meta_split_service.h" #include "meta_state_service_utils.h" #include "metadata_types.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/async_calls.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_host_port.h" +#include "task/async_calls.h" #include "utils/blob.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" @@ -119,15 +119,15 @@ void meta_split_service::do_start_partition_split(std::shared_ptr app app->helpers->split_states.splitting_count = app->partition_count; app->partition_count *= 2; app->helpers->contexts.resize(app->partition_count); - app->partitions.resize(app->partition_count); + app->pcs.resize(app->partition_count); _state->get_table_metric_entities().resize_partitions(app->app_id, app->partition_count); app->envs[replica_envs::SPLIT_VALIDATE_PARTITION_HASH] = "true"; for (int i = 0; i < app->partition_count; ++i) { - app->helpers->contexts[i].config_owner = &app->partitions[i]; + app->helpers->contexts[i].pc = &app->pcs[i]; if (i >= app->partition_count / 2) { // child partitions - app->partitions[i].ballot = invalid_ballot; - app->partitions[i].pid = gpid(app->app_id, i); + app->pcs[i].ballot = invalid_ballot; + app->pcs[i].pid = gpid(app->app_id, i); } else { // parent partitions app->helpers->split_states.status[i] = split_status::SPLITTING; } @@ -162,17 +162,17 @@ void meta_split_service::register_child_on_meta(register_child_rpc rpc) const gpid &parent_gpid = request.parent_config.pid; const gpid &child_gpid = request.child_config.pid; - const auto &parent_config = app->partitions[parent_gpid.get_partition_index()]; - if (request.parent_config.ballot != parent_config.ballot) { + const auto &parent_pc = app->pcs[parent_gpid.get_partition_index()]; + if (request.parent_config.ballot != parent_pc.ballot) { LOG_ERROR("app({}) partition({}) register child({}) failed, request is outdated, request " "parent ballot = {}, local parent ballot = {}", app_name, parent_gpid, child_gpid, request.parent_config.ballot, - parent_config.ballot); + parent_pc.ballot); response.err = ERR_INVALID_VERSION; - response.parent_config = parent_config; + response.parent_config = parent_pc; return; } @@ -192,7 +192,7 @@ void meta_split_service::register_child_on_meta(register_child_rpc rpc) parent_gpid, child_gpid); response.err = ERR_INVALID_STATE; - response.parent_config = parent_config; + response.parent_config = parent_pc; return; } @@ -202,14 +202,14 @@ void meta_split_service::register_child_on_meta(register_child_rpc rpc) "duplicated register request, app({}) child partition({}) has already been registered", app_name, child_gpid); - const auto &child_config = app->partitions[child_gpid.get_partition_index()]; - CHECK_GT_MSG(child_config.ballot, + const auto &child_pc = app->pcs[child_gpid.get_partition_index()]; + CHECK_GT_MSG(child_pc.ballot, 0, "app({}) partition({}) should have been registered", app_name, child_gpid); response.err = ERR_CHILD_REGISTERED; - response.parent_config = parent_config; + response.parent_config = parent_pc; return; } @@ -284,15 +284,15 @@ void meta_split_service::on_add_child_on_remote_storage_reply(error_code ec, (ec == ERR_NODE_ALREADY_EXIST && create_new)) { // retry register child on remote storage bool retry_create_new = (ec == ERR_TIMEOUT) ? create_new : false; int delay = (ec == ERR_TIMEOUT) ? 1 : 0; - parent_context.pending_sync_task = - tasking::enqueue(LPC_META_STATE_HIGH, - nullptr, - [this, parent_context, rpc, retry_create_new]() mutable { - parent_context.pending_sync_task = - add_child_on_remote_storage(rpc, retry_create_new); - }, - 0, - std::chrono::seconds(delay)); + parent_context.pending_sync_task = tasking::enqueue( + LPC_META_STATE_HIGH, + nullptr, + [this, parent_context, rpc, retry_create_new]() mutable { + parent_context.pending_sync_task = + add_child_on_remote_storage(rpc, retry_create_new); + }, + 0, + std::chrono::seconds(delay)); return; } CHECK_EQ_MSG(ec, ERR_OK, "we can't handle this right now"); @@ -307,16 +307,17 @@ void meta_split_service::on_add_child_on_remote_storage_reply(error_code ec, update_child_request->type = config_type::CT_REGISTER_CHILD; SET_OBJ_IP_AND_HOST_PORT(*update_child_request, node, request, primary); - partition_configuration child_config = app->partitions[child_gpid.get_partition_index()]; - child_config.secondaries = request.child_config.secondaries; - child_config.__set_hp_secondaries(request.child_config.hp_secondaries); + // TODO(yingchun): should use conference? + auto child_pc = app->pcs[child_gpid.get_partition_index()]; + child_pc.secondaries = request.child_config.secondaries; + child_pc.__set_hp_secondaries(request.child_config.hp_secondaries); _state->update_configuration_locally(*app, update_child_request); if (parent_context.msg) { response.err = ERR_OK; response.app = *app; - response.parent_config = app->partitions[parent_gpid.get_partition_index()]; - response.child_config = app->partitions[child_gpid.get_partition_index()]; + response.parent_config = app->pcs[parent_gpid.get_partition_index()]; + response.child_config = app->pcs[child_gpid.get_partition_index()]; parent_context.msg = nullptr; } parent_context.pending_sync_task = nullptr; @@ -405,8 +406,8 @@ void meta_split_service::do_control_single(std::shared_ptr app, contr auto iter = app->helpers->split_states.status.find(parent_pidx); if (iter == app->helpers->split_states.status.end()) { - response.err = - control_type == split_control_type::PAUSE ? ERR_CHILD_REGISTERED : ERR_INVALID_STATE; + response.err = control_type == split_control_type::PAUSE ? ERR_CHILD_REGISTERED + : ERR_INVALID_STATE; response.__set_hint_msg(fmt::format("partition[{}] is not splitting", parent_pidx)); LOG_ERROR("{} split for app({}) failed, {}", control_type_str(control_type), @@ -562,7 +563,7 @@ void meta_split_service::do_cancel_partition_split(std::shared_ptr ap app->partition_count /= 2; app->helpers->contexts.resize(app->partition_count); - app->partitions.resize(app->partition_count); + app->pcs.resize(app->partition_count); _state->get_table_metric_entities().resize_partitions(app->app_id, app->partition_count); }; @@ -597,7 +598,7 @@ void meta_split_service::query_child_state(query_child_state_rpc rpc) app_name); auto child_pidx = parent_pid.get_partition_index() + request.partition_count; - if (app->partitions[child_pidx].ballot == invalid_ballot) { + if (app->pcs[child_pidx].ballot == invalid_ballot) { response.err = ERR_INVALID_STATE; LOG_ERROR("app({}) parent partition({}) split has been canceled", app_name, parent_pid); return; @@ -606,7 +607,7 @@ void meta_split_service::query_child_state(query_child_state_rpc rpc) "app({}) child partition({}.{}) is ready", app_name, parent_pid.get_app_id(), child_pidx); response.err = ERR_OK; response.__set_partition_count(app->partition_count); - response.__set_child_config(app->partitions[child_pidx]); + response.__set_child_config(app->pcs[child_pidx]); } } // namespace replication diff --git a/src/meta/meta_split_service.h b/src/meta/meta_split_service.h index d3103ee474..870f01c0ac 100644 --- a/src/meta/meta_split_service.h +++ b/src/meta/meta_split_service.h @@ -20,7 +20,7 @@ #include "common/partition_split_common.h" #include "partition_split_types.h" -#include "runtime/task/task.h" +#include "task/task.h" #include "server_state.h" namespace dsn { diff --git a/src/meta/meta_state_service.h b/src/meta/meta_state_service.h index d79f1b3652..dab812ef98 100644 --- a/src/meta/meta_state_service.h +++ b/src/meta/meta_state_service.h @@ -31,9 +31,9 @@ #include #include -#include "runtime/task/future_types.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "task/future_types.h" +#include "task/task.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/error_code.h" @@ -177,5 +177,5 @@ class meta_state_service const err_stringv_callback &cb_get_children, dsn::task_tracker *tracker = nullptr) = 0; }; -} -} +} // namespace dist +} // namespace dsn diff --git a/src/meta/meta_state_service_simple.cpp b/src/meta/meta_state_service_simple.cpp index aa54612032..f875699d53 100644 --- a/src/meta/meta_state_service_simple.cpp +++ b/src/meta/meta_state_service_simple.cpp @@ -37,8 +37,8 @@ #include "rocksdb/slice.h" #include "rocksdb/status.h" #include "runtime/service_app.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" +#include "task/async_calls.h" +#include "task/task.h" #include "utils/autoref_ptr.h" #include "utils/binary_reader.h" #include "utils/env.h" @@ -237,8 +237,8 @@ error_code meta_state_service_simple::apply_transaction( error_code meta_state_service_simple::initialize(const std::vector &args) { - const char *work_dir = - args.empty() ? service_app::current_service_app_info().data_dir.c_str() : args[0].c_str(); + const char *work_dir = args.empty() ? service_app::current_service_app_info().data_dir.c_str() + : args[0].c_str(); _offset = 0; std::string log_path = dsn::utils::filesystem::path_combine(work_dir, "meta_state_service.log"); @@ -426,9 +426,10 @@ task_ptr meta_state_service_simple::submit_transaction( CHECK_EQ_MSG(dest - batch.get(), total_size, "memcpy error"); task_ptr task(new error_code_future(cb_code, cb_transaction, 0)); task->set_tracker(tracker); - write_log(blob(batch, total_size), - [this, t_entries] { return apply_transaction(t_entries); }, - task); + write_log( + blob(batch, total_size), + [this, t_entries] { return apply_transaction(t_entries); }, + task); return task; } } @@ -441,9 +442,10 @@ task_ptr meta_state_service_simple::create_node(const std::string &node, { task_ptr task(new error_code_future(cb_code, cb_create, 0)); task->set_tracker(tracker); - write_log(create_node_log::get_log(node, value), - [=] { return create_node_internal(node, value); }, - task); + write_log( + create_node_log::get_log(node, value), + [=] { return create_node_internal(node, value); }, + task); return task; } @@ -455,9 +457,10 @@ task_ptr meta_state_service_simple::delete_node(const std::string &node, { task_ptr task(new error_code_future(cb_code, cb_delete, 0)); task->set_tracker(tracker); - write_log(delete_node_log::get_log(node, recursively_delete), - [=] { return delete_node_internal(node, recursively_delete); }, - task); + write_log( + delete_node_log::get_log(node, recursively_delete), + [=] { return delete_node_internal(node, recursively_delete); }, + task); return task; } diff --git a/src/meta/meta_state_service_simple.h b/src/meta/meta_state_service_simple.h index 6a415531f6..c4e9298ed5 100644 --- a/src/meta/meta_state_service_simple.h +++ b/src/meta/meta_state_service_simple.h @@ -36,12 +36,12 @@ #include #include "meta/meta_state_service.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/future_types.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" -#include "runtime/task/task_tracker.h" +#include "rpc/serialization.h" +#include "task/future_types.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_spec.h" +#include "task/task_tracker.h" #include "utils/binary_writer.h" #include "utils/blob.h" #include "utils/error_code.h" @@ -225,12 +225,12 @@ class meta_state_service_simple : public meta_state_service shared_blob.length() - sizeof(log_header); return shared_blob; } - static void write(binary_writer &writer, const Head &head, const Tail &... tail) + static void write(binary_writer &writer, const Head &head, const Tail &...tail) { marshall(writer, head, DSF_THRIFT_BINARY); log_struct::write(writer, tail...); } - static void parse(binary_reader &reader, Head &head, Tail &... tail) + static void parse(binary_reader &reader, Head &head, Tail &...tail) { unmarshall(reader, head, DSF_THRIFT_BINARY); log_struct::parse(reader, tail...); diff --git a/src/meta/meta_state_service_utils_impl.h b/src/meta/meta_state_service_utils_impl.h index f6a1b75918..c13063bdb0 100644 --- a/src/meta/meta_state_service_utils_impl.h +++ b/src/meta/meta_state_service_utils_impl.h @@ -127,11 +127,12 @@ struct on_create_recursively : operation args->nodes.pop(); } - remote_storage()->create_node(_cur_path, - LPC_META_STATE_HIGH, - [op = *this](error_code ec) mutable { op.on_error(ec); }, - args->nodes.empty() ? args->val : blob(), - tracker()); + remote_storage()->create_node( + _cur_path, + LPC_META_STATE_HIGH, + [op = *this](error_code ec) mutable { op.on_error(ec); }, + args->nodes.empty() ? args->val : blob(), + tracker()); } void on_error(error_code ec) @@ -167,11 +168,12 @@ struct on_create : operation void run() { - remote_storage()->create_node(args->node, - LPC_META_STATE_HIGH, - [op = *this](error_code ec) mutable { op.on_error(ec); }, - args->val, - tracker()); + remote_storage()->create_node( + args->node, + LPC_META_STATE_HIGH, + [op = *this](error_code ec) mutable { op.on_error(ec); }, + args->val, + tracker()); } void on_error(error_code ec) @@ -197,11 +199,12 @@ struct on_delete : operation void run() { - remote_storage()->delete_node(args->node, - args->is_recursively_delete, - LPC_META_STATE_HIGH, - [op = *this](error_code ec) mutable { op.on_error(ec); }, - tracker()); + remote_storage()->delete_node( + args->node, + args->is_recursively_delete, + LPC_META_STATE_HIGH, + [op = *this](error_code ec) mutable { op.on_error(ec); }, + tracker()); } void on_error(error_code ec) @@ -211,8 +214,8 @@ struct on_delete : operation return; } - auto type = - args->is_recursively_delete ? op_type::OP_DELETE_RECURSIVELY : op_type::OP_DELETE; + auto type = args->is_recursively_delete ? op_type::OP_DELETE_RECURSIVELY + : op_type::OP_DELETE; operation::on_error(this, type, ec, args->node); } }; @@ -257,11 +260,12 @@ struct on_set_data : operation void run() { - remote_storage()->set_data(args->node, - args->val, - LPC_META_STATE_HIGH, - [op = *this](error_code ec) mutable { op.on_error(ec); }, - tracker()); + remote_storage()->set_data( + args->node, + args->val, + LPC_META_STATE_HIGH, + [op = *this](error_code ec) mutable { op.on_error(ec); }, + tracker()); } void on_error(error_code ec) diff --git a/src/meta/meta_state_service_zookeeper.cpp b/src/meta/meta_state_service_zookeeper.cpp index c300d8f50a..e9c659b6a5 100644 --- a/src/meta/meta_state_service_zookeeper.cpp +++ b/src/meta/meta_state_service_zookeeper.cpp @@ -58,6 +58,7 @@ class zoo_transaction : public meta_state_service::transaction_entries virtual error_code get_result(unsigned int entry_index) override; std::shared_ptr packet() { return _pkt; } + private: std::shared_ptr _pkt; }; @@ -262,47 +263,49 @@ task_ptr meta_state_service_zookeeper::delete_node(const std::string &node, { error_code_future_ptr tsk(new error_code_future(cb_code, cb_delete, 0)); tsk->set_tracker(tracker); - err_stringv_callback after_get_children = [node, recursively_delete, cb_code, tsk, this]( - error_code err, const std::vector &children) { - if (ERR_OK != err) - tsk->enqueue_with(err); - else if (children.empty()) - delete_empty_node( - node, cb_code, [tsk](error_code err) { tsk->enqueue_with(err); }, &_tracker); - else if (!recursively_delete) - tsk->enqueue_with(ERR_INVALID_PARAMETERS); - else { - std::atomic_int *child_count = new std::atomic_int(); - std::atomic_int *error_count = new std::atomic_int(); - - child_count->store((int)children.size()); - error_count->store(0); - - for (auto &child : children) { - delete_node(node + "/" + child, - true, - cb_code, - [=](error_code err) { - if (ERR_OK != err) - ++(*error_count); - int result = --(*child_count); - if (0 == result) { - if (0 == *error_count) - delete_empty_node( - node, - cb_code, - [tsk](error_code err) { tsk->enqueue_with(err); }, - &_tracker); - else - tsk->enqueue_with(ERR_FILE_OPERATION_FAILED); - delete child_count; - delete error_count; - } - }, - &_tracker); + err_stringv_callback after_get_children = + [node, recursively_delete, cb_code, tsk, this](error_code err, + const std::vector &children) { + if (ERR_OK != err) + tsk->enqueue_with(err); + else if (children.empty()) + delete_empty_node( + node, cb_code, [tsk](error_code err) { tsk->enqueue_with(err); }, &_tracker); + else if (!recursively_delete) + tsk->enqueue_with(ERR_INVALID_PARAMETERS); + else { + std::atomic_int *child_count = new std::atomic_int(); + std::atomic_int *error_count = new std::atomic_int(); + + child_count->store((int)children.size()); + error_count->store(0); + + for (auto &child : children) { + delete_node( + node + "/" + child, + true, + cb_code, + [=](error_code err) { + if (ERR_OK != err) + ++(*error_count); + int result = --(*child_count); + if (0 == result) { + if (0 == *error_count) + delete_empty_node( + node, + cb_code, + [tsk](error_code err) { tsk->enqueue_with(err); }, + &_tracker); + else + tsk->enqueue_with(ERR_FILE_OPERATION_FAILED); + delete child_count; + delete error_count; + } + }, + &_tracker); + } } - } - }; + }; get_children(node, cb_code, after_get_children, &_tracker); return tsk; @@ -429,5 +432,5 @@ void meta_state_service_zookeeper::visit_zookeeper_internal(ref_this, break; } } -} -} +} // namespace dist +} // namespace dsn diff --git a/src/meta/meta_state_service_zookeeper.h b/src/meta/meta_state_service_zookeeper.h index 9c450f543e..f7194e29e1 100644 --- a/src/meta/meta_state_service_zookeeper.h +++ b/src/meta/meta_state_service_zookeeper.h @@ -32,10 +32,10 @@ #include #include "meta/meta_state_service.h" -#include "runtime/task/future_types.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_tracker.h" +#include "task/future_types.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_tracker.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/error_code.h" @@ -119,5 +119,5 @@ class meta_state_service_zookeeper : public meta_state_service, public ref_count task_ptr callback, void *result /*zookeeper_session::zoo_opcontext**/); }; -} -} +} // namespace dist +} // namespace dsn diff --git a/src/meta/partition_guardian.cpp b/src/meta/partition_guardian.cpp index 37f627f9ea..440ff96d96 100644 --- a/src/meta/partition_guardian.cpp +++ b/src/meta/partition_guardian.cpp @@ -37,7 +37,7 @@ #include "meta/server_load_balancer.h" #include "meta/server_state.h" #include "meta/table_metrics.h" -#include "runtime/rpc/rpc_address.h" +#include "rpc/rpc_address.h" #include "utils/flags.h" #include "utils/fmt_logging.h" #include "utils/metrics.h" @@ -86,14 +86,15 @@ pc_status partition_guardian::cure(meta_view view, CHECK(acts.empty(), ""); pc_status status; - if (!pc.hp_primary) + if (!pc.hp_primary) { status = on_missing_primary(view, gpid); - else if (static_cast(pc.hp_secondaries.size()) + 1 < pc.max_replica_count) + } else if (static_cast(pc.hp_secondaries.size()) + 1 < pc.max_replica_count) { status = on_missing_secondary(view, gpid); - else if (static_cast(pc.hp_secondaries.size()) >= pc.max_replica_count) + } else if (static_cast(pc.hp_secondaries.size()) >= pc.max_replica_count) { status = on_redundant_secondary(view, gpid); - else + } else { status = pc_status::healthy; + } if (!acts.empty()) { action = *acts.front(); @@ -125,9 +126,9 @@ void partition_guardian::reconfig(meta_view view, const configuration_update_req if (request.type == config_type::CT_DROP_PARTITION) { cc->serving.clear(); - const std::vector &config_dropped = request.config.hp_last_drops; - for (const auto &drop_node : config_dropped) { - cc->record_drop_history(drop_node); + const auto &last_drops = request.config.hp_last_drops; + for (const auto &last_drop : last_drops) { + cc->record_drop_history(last_drop); } } else { when_update_replicas(request.type, [cc, &request](bool is_adding) { @@ -248,9 +249,9 @@ pc_status partition_guardian::on_missing_primary(meta_view &view, const dsn::gpi // try to upgrade a secondary to primary if the primary is missing if (!pc.hp_secondaries.empty()) { RESET_IP_AND_HOST_PORT(action, node); - for (const auto &hp_secondary : pc.hp_secondaries) { - const auto ns = get_node_state(*(view.nodes), hp_secondary, false); - CHECK_NOTNULL(ns, "invalid secondary: {}", hp_secondary); + for (const auto &secondary : pc.hp_secondaries) { + const auto ns = get_node_state(*(view.nodes), secondary, false); + CHECK_NOTNULL(ns, "invalid secondary: {}", secondary); if (dsn_unlikely(!ns->alive())) { continue; } @@ -515,7 +516,7 @@ pc_status partition_guardian::on_missing_secondary(meta_view &view, const dsn::g configuration_proposal_action action; bool is_emergency = false; - if (cc.config_owner->max_replica_count > + if (cc.pc->max_replica_count > _svc->get_options().app_mutation_2pc_min_replica_count(pc.max_replica_count) && replica_count(pc) < _svc->get_options().app_mutation_2pc_min_replica_count(pc.max_replica_count)) { @@ -601,7 +602,7 @@ pc_status partition_guardian::on_missing_secondary(meta_view &view, const dsn::g "gpid({}) refuse to use selected node({}) as it is in black list", gpid, node); } newly_partitions *min_server_np = nullptr; - for (auto & [ _, ns ] : *view.nodes) { + for (auto &[_, ns] : *view.nodes) { if (!ns.alive() || is_member(pc, ns.host_port()) || in_black_list(ns.host_port())) { continue; } diff --git a/src/meta/partition_guardian.h b/src/meta/partition_guardian.h index 4fd3f966de..67667707ef 100644 --- a/src/meta/partition_guardian.h +++ b/src/meta/partition_guardian.h @@ -29,7 +29,7 @@ #include "dsn.layer2_types.h" #include "meta_admin_types.h" #include "meta_data.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "utils/command_manager.h" #include "utils/zlocks.h" diff --git a/src/meta/server_load_balancer.cpp b/src/meta/server_load_balancer.cpp index 1a57858e38..0f8de9010d 100644 --- a/src/meta/server_load_balancer.cpp +++ b/src/meta/server_load_balancer.cpp @@ -32,8 +32,8 @@ #include "dsn.layer2_types.h" #include "meta/meta_data.h" #include "meta_admin_types.h" -#include "runtime/rpc/dns_resolver.h" // IWYU pragma: keep -#include "runtime/rpc/rpc_address.h" +#include "rpc/dns_resolver.h" // IWYU pragma: keep +#include "rpc/rpc_address.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" diff --git a/src/meta/server_load_balancer.h b/src/meta/server_load_balancer.h index 7d03be5b9f..fbce601713 100644 --- a/src/meta/server_load_balancer.h +++ b/src/meta/server_load_balancer.h @@ -35,12 +35,13 @@ #include #include "meta_data.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "utils/extensible_object.h" namespace dsn { namespace replication { class configuration_balancer_request; + class configuration_balancer_response; class meta_service; diff --git a/src/meta/server_state.cpp b/src/meta/server_state.cpp index 09dc7781be..68bc786187 100644 --- a/src/meta/server_state.cpp +++ b/src/meta/server_state.cpp @@ -1,5 +1,4 @@ /* - * The MIT License (MIT) * * Copyright (c) 2015 Microsoft Corporation @@ -38,7 +37,6 @@ #include // IWYU pragma: keep #include #include -// IWYU pragma: no_include #include #include "common/duplication_common.h" @@ -59,18 +57,18 @@ #include "meta_bulk_load_service.h" #include "metadata_types.h" #include "replica_admin_types.h" +#include "rpc/dns_resolver.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" -#include "runtime/task/task_spec.h" #include "security/access_controller.h" #include "server_load_balancer.h" #include "server_state.h" +#include "task/async_calls.h" +#include "task/task.h" +#include "task/task_spec.h" #include "utils/autoref_ptr.h" #include "utils/binary_reader.h" #include "utils/binary_writer.h" @@ -285,7 +283,7 @@ error_code server_state::dump_app_states(const char *local_path, binary_writer writer; dsn::marshall(writer, *app, DSF_THRIFT_BINARY); file->append_buffer(writer.get_buffer()); - for (const partition_configuration &pc : app->partitions) { + for (const auto &pc : app->pcs) { binary_writer pc_writer; dsn::marshall(pc_writer, pc, DSF_THRIFT_BINARY); file->append_buffer(pc_writer.get_buffer()); @@ -372,8 +370,8 @@ error_code server_state::restore_from_local_storage(const char *local_path) ans = file->read_next_buffer(data); binary_reader pc_reader(data); CHECK_EQ_MSG(ans, 1, "unexpect read buffer"); - unmarshall(pc_reader, app->partitions[i], DSF_THRIFT_BINARY); - CHECK_EQ_MSG(app->partitions[i].pid.get_partition_index(), + unmarshall(pc_reader, app->pcs[i], DSF_THRIFT_BINARY); + CHECK_EQ_MSG(app->pcs[i].pid.get_partition_index(), i, "uncorrect partition data, gpid({}.{}), appname({})", app->app_id, @@ -487,10 +485,11 @@ error_code server_state::sync_apps_to_remote_storage() error_code err; dist::meta_state_service *storage = _meta_svc->get_remote_storage(); - auto t = storage->create_node(apps_path, - LPC_META_CALLBACK, - [&err](error_code ec) { err = ec; }, - blob(lock_state, 0, strlen(lock_state))); + auto t = storage->create_node( + apps_path, + LPC_META_CALLBACK, + [&err](error_code ec) { err = ec; }, + blob(lock_state, 0, strlen(lock_state))); t->wait(); if (err != ERR_NODE_ALREADY_EXIST && err != ERR_OK) { @@ -510,19 +509,19 @@ error_code server_state::sync_apps_to_remote_storage() "invalid app status"); blob value = app->to_json(app_status::AS_CREATING == app->status ? app_status::AS_AVAILABLE : app_status::AS_DROPPED); - storage->create_node(path, - LPC_META_CALLBACK, - [&err, path](error_code ec) { - if (ec != ERR_OK && ec != ERR_NODE_ALREADY_EXIST) { - LOG_WARNING( - "create app node failed, path({}) reason({})", path, ec); - err = ec; - } else { - LOG_INFO("create app node {} ok", path); - } - }, - value, - &tracker); + storage->create_node( + path, + LPC_META_CALLBACK, + [&err, path](error_code ec) { + if (ec != ERR_OK && ec != ERR_NODE_ALREADY_EXIST) { + LOG_WARNING("create app node failed, path({}) reason({})", path, ec); + err = ec; + } else { + LOG_INFO("create app node {} ok", path); + } + }, + value, + &tracker); } tracker.wait_outstanding_tasks(); @@ -534,8 +533,8 @@ error_code server_state::sync_apps_to_remote_storage() for (auto &kv : _all_apps) { std::shared_ptr &app = kv.second; for (unsigned int i = 0; i != app->partition_count; ++i) { - task_ptr init_callback = - tasking::create_task(LPC_META_STATE_HIGH, &tracker, [] {}, sStateHash); + task_ptr init_callback = tasking::create_task( + LPC_META_STATE_HIGH, &tracker, [] {}, sStateHash); init_app_partition_node(app, i, init_callback); } } @@ -561,8 +560,9 @@ dsn::error_code server_state::sync_apps_from_remote_storage() dsn::task_tracker tracker; dist::meta_state_service *storage = _meta_svc->get_remote_storage(); - auto sync_partition = [this, storage, &err, &tracker]( - std::shared_ptr &app, int partition_id, const std::string &partition_path) { + auto sync_partition = [this, storage, &err, &tracker](std::shared_ptr &app, + int partition_id, + const std::string &partition_path) { storage->get_data( partition_path, LPC_META_CALLBACK, @@ -583,9 +583,10 @@ dsn::error_code server_state::sync_apps_from_remote_storage() "invalid partition config"); { zauto_write_lock l(_lock); - app->partitions[partition_id] = pc; - for (const auto &hp : pc.hp_last_drops) { - app->helpers->contexts[partition_id].record_drop_history(hp); + app->pcs[partition_id] = pc; + CHECK(pc.__isset.hp_last_drops, ""); + for (const auto &last_drop : pc.hp_last_drops) { + app->helpers->contexts[partition_id].record_drop_history(last_drop); } if (app->status == app_status::AS_CREATING && @@ -622,8 +623,8 @@ dsn::error_code server_state::sync_apps_from_remote_storage() app->helpers->split_states.status[partition_id - app->partition_count / 2] = split_status::SPLITTING; app->helpers->split_states.splitting_count++; - app->partitions[partition_id].ballot = invalid_ballot; - app->partitions[partition_id].pid = gpid(app->app_id, partition_id); + app->pcs[partition_id].ballot = invalid_ballot; + app->pcs[partition_id].pid = gpid(app->app_id, partition_id); process_one_partition(app); } @@ -728,15 +729,15 @@ void server_state::initialize_node_state() zauto_write_lock l(_lock); for (auto &app_pair : _all_apps) { app_state &app = *(app_pair.second); - for (partition_configuration &pc : app.partitions) { + for (const auto &pc : app.pcs) { if (pc.hp_primary) { node_state *ns = get_node_state(_nodes, pc.hp_primary, true); ns->put_partition(pc.pid, true); } - for (auto &ep : pc.hp_secondaries) { - CHECK(ep, "invalid secondary address, addr = {}", ep); - node_state *ns = get_node_state(_nodes, ep, true); + for (const auto &secondary : pc.hp_secondaries) { + CHECK(secondary, "invalid secondary: {}", secondary); + node_state *ns = get_node_state(_nodes, secondary, true); ns->put_partition(pc.pid, false); } } @@ -746,7 +747,7 @@ void server_state::initialize_node_state() } for (auto &app_pair : _all_apps) { app_state &app = *(app_pair.second); - for (const partition_configuration &pc : app.partitions) { + for (const auto &pc : app.pcs) { check_consistency(pc.pid); } } @@ -841,7 +842,7 @@ void server_state::on_config_sync(configuration_query_by_node_rpc rpc) } response.partitions[i].info = *app; - response.partitions[i].config = app->partitions[pid.get_partition_index()]; + response.partitions[i].config = app->pcs[pid.get_partition_index()]; response.partitions[i].host_node = request.node; // set meta_split_status const split_state &app_split_states = app->helpers->split_states; @@ -955,12 +956,12 @@ void server_state::on_config_sync(configuration_query_by_node_rpc rpc) } bool server_state::query_configuration_by_gpid(dsn::gpid id, - /*out*/ partition_configuration &config) + /*out*/ partition_configuration &pc) { zauto_read_lock l(_lock); - const partition_configuration *pc = get_config(_all_apps, id); - if (pc != nullptr) { - config = *pc; + const auto *ppc = get_config(_all_apps, id); + if (ppc != nullptr) { + pc = *ppc; return true; } return false; @@ -1003,11 +1004,13 @@ void server_state::query_configuration_by_index(const query_cfg_request &request response.is_stateful = app->is_stateful; for (const int32_t &index : request.partition_indices) { - if (index >= 0 && index < app->partitions.size()) - response.partitions.push_back(app->partitions[index]); + if (index >= 0 && index < app->pcs.size()) { + response.partitions.push_back(app->pcs[index]); + } + } + if (response.partitions.empty()) { + response.partitions = app->pcs; } - if (response.partitions.empty()) - response.partitions = app->partitions; } void server_state::init_app_partition_node(std::shared_ptr &app, @@ -1044,8 +1047,7 @@ void server_state::init_app_partition_node(std::shared_ptr &app, }; std::string app_partition_path = get_partition_path(*app, pidx); - dsn::blob value = - dsn::json::json_forwarder::encode(app->partitions[pidx]); + dsn::blob value = dsn::json::json_forwarder::encode(app->pcs[pidx]); _meta_svc->get_remote_storage()->create_node( app_partition_path, LPC_META_STATE_HIGH, on_create_app_partition, value); } @@ -1381,8 +1383,8 @@ void server_state::recall_app(dsn::message_ex *msg) if (has_seconds_expired(target_app->expire_second)) { response.err = ERR_APP_NOT_EXIST; } else { - std::string &new_app_name = - (request.new_app_name == "") ? target_app->app_name : request.new_app_name; + std::string &new_app_name = (request.new_app_name == "") ? target_app->app_name + : request.new_app_name; if (_exist_apps.find(new_app_name) != _exist_apps.end()) { response.err = ERR_INVALID_PARAMETERS; } else { @@ -1455,65 +1457,66 @@ void server_state::send_proposal(const configuration_proposal_action &action, send_proposal(target, request); } -void server_state::request_check(const partition_configuration &old, +void server_state::request_check(const partition_configuration &old_pc, const configuration_update_request &request) { - const partition_configuration &new_config = request.config; + const auto &new_pc = request.config; switch (request.type) { case config_type::CT_ASSIGN_PRIMARY: if (request.__isset.hp_node) { - CHECK_NE(old.hp_primary, request.hp_node); - CHECK(!utils::contains(old.hp_secondaries, request.hp_node), ""); + CHECK_NE(old_pc.hp_primary, request.hp_node); + CHECK(!utils::contains(old_pc.hp_secondaries, request.hp_node), ""); } else { - CHECK_NE(old.primary, request.node); - CHECK(!utils::contains(old.secondaries, request.node), ""); + CHECK_NE(old_pc.primary, request.node); + CHECK(!utils::contains(old_pc.secondaries, request.node), ""); } break; case config_type::CT_UPGRADE_TO_PRIMARY: if (request.__isset.hp_node) { - CHECK_NE(old.hp_primary, request.hp_node); - CHECK(utils::contains(old.hp_secondaries, request.hp_node), ""); + CHECK_NE(old_pc.hp_primary, request.hp_node); + CHECK(utils::contains(old_pc.hp_secondaries, request.hp_node), ""); } else { - CHECK_NE(old.primary, request.node); - CHECK(utils::contains(old.secondaries, request.node), ""); + CHECK_NE(old_pc.primary, request.node); + CHECK(utils::contains(old_pc.secondaries, request.node), ""); } break; case config_type::CT_DOWNGRADE_TO_SECONDARY: if (request.__isset.hp_node) { - CHECK_EQ(old.hp_primary, request.hp_node); - CHECK(!utils::contains(old.hp_secondaries, request.hp_node), ""); + CHECK_EQ(old_pc.hp_primary, request.hp_node); + CHECK(!utils::contains(old_pc.hp_secondaries, request.hp_node), ""); } else { - CHECK_EQ(old.primary, request.node); - CHECK(!utils::contains(old.secondaries, request.node), ""); + CHECK_EQ(old_pc.primary, request.node); + CHECK(!utils::contains(old_pc.secondaries, request.node), ""); } break; case config_type::CT_DOWNGRADE_TO_INACTIVE: case config_type::CT_REMOVE: if (request.__isset.hp_node) { - CHECK(old.hp_primary == request.hp_node || - utils::contains(old.hp_secondaries, request.hp_node), + CHECK(old_pc.hp_primary == request.hp_node || + utils::contains(old_pc.hp_secondaries, request.hp_node), ""); } else { - CHECK(old.primary == request.node || utils::contains(old.secondaries, request.node), + CHECK(old_pc.primary == request.node || + utils::contains(old_pc.secondaries, request.node), ""); } break; case config_type::CT_UPGRADE_TO_SECONDARY: if (request.__isset.hp_node) { - CHECK_NE(old.hp_primary, request.hp_node); - CHECK(!utils::contains(old.hp_secondaries, request.hp_node), ""); + CHECK_NE(old_pc.hp_primary, request.hp_node); + CHECK(!utils::contains(old_pc.hp_secondaries, request.hp_node), ""); } else { - CHECK_NE(old.primary, request.node); - CHECK(!utils::contains(old.secondaries, request.node), ""); + CHECK_NE(old_pc.primary, request.node); + CHECK(!utils::contains(old_pc.secondaries, request.node), ""); } break; case config_type::CT_PRIMARY_FORCE_UPDATE_BALLOT: { if (request.__isset.hp_node) { - CHECK_EQ(old.hp_primary, new_config.hp_primary); - CHECK(old.hp_secondaries == new_config.hp_secondaries, ""); + CHECK_EQ(old_pc.hp_primary, new_pc.hp_primary); + CHECK(old_pc.hp_secondaries == new_pc.hp_secondaries, ""); } else { - CHECK_EQ(old.primary, new_config.primary); - CHECK(old.secondaries == new_config.secondaries, ""); + CHECK_EQ(old_pc.primary, new_pc.primary); + CHECK(old_pc.secondaries == new_pc.secondaries, ""); } break; } @@ -1526,22 +1529,22 @@ void server_state::update_configuration_locally( app_state &app, std::shared_ptr &config_request) { dsn::gpid &gpid = config_request->config.pid; - partition_configuration &old_cfg = app.partitions[gpid.get_partition_index()]; - partition_configuration &new_cfg = config_request->config; + partition_configuration &old_pc = app.pcs[gpid.get_partition_index()]; + partition_configuration &new_pc = config_request->config; int min_2pc_count = _meta_svc->get_options().app_mutation_2pc_min_replica_count(app.max_replica_count); - health_status old_health_status = partition_health_status(old_cfg, min_2pc_count); - health_status new_health_status = partition_health_status(new_cfg, min_2pc_count); + health_status old_health_status = partition_health_status(old_pc, min_2pc_count); + health_status new_health_status = partition_health_status(new_pc, min_2pc_count); host_port node; GET_HOST_PORT(*config_request, node, node); if (app.is_stateful) { - CHECK(old_cfg.ballot == invalid_ballot || old_cfg.ballot + 1 == new_cfg.ballot, + CHECK(old_pc.ballot == invalid_ballot || old_pc.ballot + 1 == new_pc.ballot, "invalid configuration update request, old ballot {}, new ballot {}", - old_cfg.ballot, - new_cfg.ballot); + old_pc.ballot, + new_pc.ballot); node_state *ns = nullptr; if (config_request->type != config_type::CT_DROP_PARTITION) { @@ -1549,7 +1552,7 @@ void server_state::update_configuration_locally( CHECK_NOTNULL(ns, "invalid node: {}", node); } #ifndef NDEBUG - request_check(old_cfg, *config_request); + request_check(old_pc, *config_request); #endif switch (config_request->type) { case config_type::CT_ASSIGN_PRIMARY: @@ -1573,30 +1576,39 @@ void server_state::update_configuration_locally( case config_type::CT_PRIMARY_FORCE_UPDATE_BALLOT: break; - case config_type::CT_DROP_PARTITION: - for (const auto &node : new_cfg.hp_last_drops) { - ns = get_node_state(_nodes, node, false); - if (ns != nullptr) + case config_type::CT_DROP_PARTITION: { + for (const auto &last_drop : new_pc.hp_last_drops) { + ns = get_node_state(_nodes, last_drop, false); + if (ns != nullptr) { ns->remove_partition(gpid, false); + } } break; - + } case config_type::CT_ADD_SECONDARY: case config_type::CT_ADD_SECONDARY_FOR_LB: CHECK(false, "invalid execution work flow"); break; case config_type::CT_REGISTER_CHILD: { ns->put_partition(gpid, true); - // TODO(yingchun): optimize this + // TODO(yingchun): optimize the duplicate loops. if (config_request->config.__isset.hp_secondaries) { for (const auto &secondary : config_request->config.hp_secondaries) { - auto secondary_node = get_node_state(_nodes, secondary, false); + auto *secondary_node = get_node_state(_nodes, secondary, false); secondary_node->put_partition(gpid, false); } } else { for (const auto &secondary : config_request->config.secondaries) { - auto secondary_node = - get_node_state(_nodes, host_port::from_address(secondary), false); + const auto hp = host_port::from_address(secondary); + if (!hp) { + LOG_ERROR("The registering secondary {} for pid {} can no be reverse " + "resolved, skip registering it, please check the network " + "configuration", + secondary, + config_request->config.pid); + continue; + } + auto secondary_node = get_node_state(_nodes, hp, false); secondary_node->put_partition(gpid, false); } } @@ -1607,11 +1619,12 @@ void server_state::update_configuration_locally( break; } } else { - CHECK_EQ(old_cfg.ballot, new_cfg.ballot); - + CHECK_EQ(old_pc.ballot, new_pc.ballot); const auto host_node = host_port::from_address(config_request->host_node); - new_cfg = old_cfg; - partition_configuration_stateless pcs(new_cfg); + // The non-stateful app is just for testing, so just check the host_node is resolvable. + CHECK(host_node, "'{}' can not be reverse resolved", config_request->host_node); + new_pc = old_pc; + partition_configuration_stateless pcs(new_pc); if (config_request->type == config_type::type::CT_ADD_SECONDARY) { pcs.hosts().emplace_back(host_node); pcs.workers().emplace_back(node); @@ -1634,8 +1647,8 @@ void server_state::update_configuration_locally( // we assume config in config_request stores the proper new config // as we sync to remote storage according to it - std::string old_config_str = boost::lexical_cast(old_cfg); - old_cfg = config_request->config; + std::string old_config_str = boost::lexical_cast(old_pc); + old_pc = config_request->config; auto find_name = _config_type_VALUES_TO_NAMES.find(config_request->type); if (find_name != _config_type_VALUES_TO_NAMES.end()) { LOG_INFO("meta update config ok: type({}), old_config={}, {}", @@ -1713,15 +1726,14 @@ void server_state::on_update_configuration_on_remote_reply( CHECK(app->status == app_status::AS_AVAILABLE || app->status == app_status::AS_DROPPING, "if app removed, this task should be cancelled"); if (ec == ERR_TIMEOUT) { - cc.pending_sync_task = - tasking::enqueue(LPC_META_STATE_HIGH, - tracker(), - [this, config_request, &cc]() mutable { - cc.pending_sync_task = - update_configuration_on_remote(config_request); - }, - 0, - std::chrono::seconds(1)); + cc.pending_sync_task = tasking::enqueue( + LPC_META_STATE_HIGH, + tracker(), + [this, config_request, &cc]() mutable { + cc.pending_sync_task = update_configuration_on_remote(config_request); + }, + 0, + std::chrono::seconds(1)); } else if (ec == ERR_OK) { update_configuration_locally(*app, config_request); cc.pending_sync_task = nullptr; @@ -1767,7 +1779,7 @@ void server_state::recall_partition(std::shared_ptr &app, int pidx) auto on_recall_partition = [this, app, pidx](dsn::error_code error) mutable { if (error == dsn::ERR_OK) { zauto_write_lock l(_lock); - app->partitions[pidx].partition_flags &= (~pc_flags::dropped); + app->pcs[pidx].partition_flags &= (~pc_flags::dropped); process_one_partition(app); } else if (error == dsn::ERR_TIMEOUT) { tasking::enqueue(LPC_META_STATE_HIGH, @@ -1780,7 +1792,7 @@ void server_state::recall_partition(std::shared_ptr &app, int pidx) } }; - partition_configuration &pc = app->partitions[pidx]; + partition_configuration &pc = app->pcs[pidx]; CHECK((pc.partition_flags & pc_flags::dropped), ""); pc.partition_flags = 0; @@ -1792,7 +1804,7 @@ void server_state::recall_partition(std::shared_ptr &app, int pidx) void server_state::drop_partition(std::shared_ptr &app, int pidx) { - partition_configuration &pc = app->partitions[pidx]; + partition_configuration &pc = app->pcs[pidx]; config_context &cc = app->helpers->contexts[pidx]; std::shared_ptr req = @@ -1804,11 +1816,11 @@ void server_state::drop_partition(std::shared_ptr &app, int pidx) SET_OBJ_IP_AND_HOST_PORT(request, node, pc, primary); request.config = pc; - for (auto &node : pc.hp_secondaries) { - maintain_drops(request.config.hp_last_drops, node, request.type); + for (const auto &secondary : pc.hp_secondaries) { + maintain_drops(request.config.hp_last_drops, secondary, request.type); } - for (auto &node : pc.secondaries) { - maintain_drops(request.config.last_drops, node, request.type); + for (const auto &secondary : pc.secondaries) { + maintain_drops(request.config.last_drops, secondary, request.type); } if (pc.hp_primary) { maintain_drops(request.config.hp_last_drops, pc.hp_primary, request.type); @@ -1845,7 +1857,7 @@ void server_state::drop_partition(std::shared_ptr &app, int pidx) void server_state::downgrade_primary_to_inactive(std::shared_ptr &app, int pidx) { - partition_configuration &pc = app->partitions[pidx]; + partition_configuration &pc = app->pcs[pidx]; config_context &cc = app->helpers->contexts[pidx]; if (config_status::pending_remote_sync == cc.stage) { @@ -1890,7 +1902,7 @@ void server_state::downgrade_secondary_to_inactive(std::shared_ptr &a int pidx, const host_port &node) { - partition_configuration &pc = app->partitions[pidx]; + partition_configuration &pc = app->pcs[pidx]; config_context &cc = app->helpers->contexts[pidx]; CHECK(pc.hp_primary, "this shouldn't be called if the primary is invalid"); @@ -1913,31 +1925,31 @@ void server_state::downgrade_secondary_to_inactive(std::shared_ptr &a void server_state::downgrade_stateless_nodes(std::shared_ptr &app, int pidx, - const host_port &address) + const host_port &node) { - std::shared_ptr req = - std::make_shared(); + auto req = std::make_shared(); req->info = *app; req->type = config_type::CT_REMOVE; - req->host_node = dsn::dns_resolver::instance().resolve_address(address); + req->host_node = dsn::dns_resolver::instance().resolve_address(node); RESET_IP_AND_HOST_PORT(*req, node); - req->config = app->partitions[pidx]; + req->config = app->pcs[pidx]; config_context &cc = app->helpers->contexts[pidx]; partition_configuration &pc = req->config; unsigned i = 0; for (; i < pc.hp_secondaries.size(); ++i) { - if (pc.hp_secondaries[i] == address) { + if (pc.hp_secondaries[i] == node) { SET_OBJ_IP_AND_HOST_PORT(*req, node, pc, last_drops[i]); break; } } - host_port node; - GET_HOST_PORT(*req, node, node); - CHECK(node, "invalid node: {}", node); + host_port req_node; + GET_HOST_PORT(*req, node, req_node); + CHECK(req_node, "invalid node: {}", req_node); // remove host_node & node from secondaries/last_drops, as it will be sync to remote // storage + CHECK(pc.__isset.hp_secondaries, "hp_secondaries not set"); for (++i; i < pc.hp_secondaries.size(); ++i) { pc.secondaries[i - 1] = pc.secondaries[i]; pc.last_drops[i - 1] = pc.last_drops[i]; @@ -1954,7 +1966,7 @@ void server_state::downgrade_stateless_nodes(std::shared_ptr &app, "removing host({}) worker({})", pc.pid, req->host_node, - node); + req_node); cc.cancel_sync(); } cc.stage = config_status::pending_remote_sync; @@ -1970,7 +1982,7 @@ void server_state::on_update_configuration( zauto_write_lock l(_lock); dsn::gpid &gpid = cfg_request->config.pid; std::shared_ptr app = get_app(gpid.get_app_id()); - partition_configuration &pc = app->partitions[gpid.get_partition_index()]; + partition_configuration &pc = app->pcs[gpid.get_partition_index()]; config_context &cc = app->helpers->contexts[gpid.get_partition_index()]; configuration_update_response response; response.err = ERR_IO_PENDING; @@ -2034,53 +2046,61 @@ void server_state::on_update_configuration( void server_state::on_partition_node_dead(std::shared_ptr &app, int pidx, - const dsn::host_port &address) -{ - partition_configuration &pc = app->partitions[pidx]; - if (app->is_stateful) { - if (is_primary(pc, address)) - downgrade_primary_to_inactive(app, pidx); - else if (is_secondary(pc, address)) { - if (pc.hp_primary) - downgrade_secondary_to_inactive(app, pidx, address); - else if (is_secondary(pc, address)) { - LOG_INFO("gpid({}): secondary({}) is down, ignored it due to no primary for this " - "partition available", - pc.pid, - address); - } else { - CHECK(false, "no primary/secondary on this node, node address = {}", address); - } - } - } else { - downgrade_stateless_nodes(app, pidx, address); + const dsn::host_port &node) +{ + const auto &pc = app->pcs[pidx]; + if (!app->is_stateful) { + downgrade_stateless_nodes(app, pidx, node); + return; } + + if (is_primary(pc, node)) { + downgrade_primary_to_inactive(app, pidx); + return; + } + + if (!is_secondary(pc, node)) { + return; + } + + if (pc.hp_primary) { + downgrade_secondary_to_inactive(app, pidx, node); + return; + } + + CHECK(is_secondary(pc, node), ""); + LOG_INFO("gpid({}): secondary({}) is down, ignored it due to no primary for this partition " + "available", + pc.pid, + node); } void server_state::on_change_node_state(const host_port &node, bool is_alive) { LOG_DEBUG("change node({}) state to {}", node, is_alive ? "alive" : "dead"); zauto_write_lock l(_lock); - if (!is_alive) { - auto iter = _nodes.find(node); - if (iter == _nodes.end()) { - LOG_INFO("node({}) doesn't exist in the node state, just ignore", node); - } else { - node_state &ns = iter->second; - ns.set_alive(false); - ns.set_replicas_collect_flag(false); - ns.for_each_partition([&, this](const dsn::gpid &pid) { - std::shared_ptr app = get_app(pid.get_app_id()); - CHECK(app != nullptr && app->status != app_status::AS_DROPPED, - "invalid app, app_id = {}", - pid.get_app_id()); - on_partition_node_dead(app, pid.get_partition_index(), node); - return true; - }); - } - } else { + if (is_alive) { get_node_state(_nodes, node, true)->set_alive(true); + return; + } + + auto iter = _nodes.find(node); + if (iter == _nodes.end()) { + LOG_INFO("node({}) doesn't exist in the node state, just ignore", node); + return; } + + node_state &ns = iter->second; + ns.set_alive(false); + ns.set_replicas_collect_flag(false); + ns.for_each_partition([&, this](const dsn::gpid &pid) { + std::shared_ptr app = get_app(pid.get_app_id()); + CHECK(app != nullptr && app->status != app_status::AS_DROPPED, + "invalid app, app_id = {}", + pid.get_app_id()); + on_partition_node_dead(app, pid.get_partition_index(), node); + return true; + }); } void server_state::on_propose_balancer(const configuration_balancer_request &request, @@ -2090,19 +2110,21 @@ void server_state::on_propose_balancer(const configuration_balancer_request &req std::shared_ptr app = get_app(request.gpid.get_app_id()); if (app == nullptr || app->status != app_status::AS_AVAILABLE || request.gpid.get_partition_index() < 0 || - request.gpid.get_partition_index() >= app->partition_count) + request.gpid.get_partition_index() >= app->partition_count) { response.err = ERR_INVALID_PARAMETERS; - else { - if (request.force) { - partition_configuration &pc = *get_config(_all_apps, request.gpid); - for (const configuration_proposal_action &act : request.action_list) { - send_proposal(act, pc, *app); - } - response.err = ERR_OK; - } else { - _meta_svc->get_balancer()->register_proposals({&_all_apps, &_nodes}, request, response); + return; + } + + if (request.force) { + const auto &pc = *get_config(_all_apps, request.gpid); + for (const auto &act : request.action_list) { + send_proposal(act, pc, *app); } + response.err = ERR_OK; + return; } + + _meta_svc->get_balancer()->register_proposals({&_all_apps, &_nodes}, request, response); } error_code @@ -2235,7 +2257,7 @@ error_code server_state::construct_partitions( if (app->status == app_status::AS_DROPPING) { LOG_INFO("ignore constructing partitions for dropping app({})", app->app_id); } else { - for (partition_configuration &pc : app->partitions) { + for (const auto &pc : app->pcs) { bool is_succeed = construct_replica({&_all_apps, &_nodes}, pc.pid, app->max_replica_count); if (is_succeed) { @@ -2244,12 +2266,12 @@ error_code server_state::construct_partitions( pc.pid.get_partition_index(), boost::lexical_cast(pc)); if (pc.hp_last_drops.size() + 1 < pc.max_replica_count) { - std::ostringstream oss; - oss << "WARNING: partition(" << app->app_id << "." - << pc.pid.get_partition_index() << ") only collects " - << (pc.hp_last_drops.size() + 1) << "/" << pc.max_replica_count - << " of replicas, may lost data" << std::endl; - hint_message += oss.str(); + hint_message += fmt::format("WARNING: partition({}.{}) only collects {}/{} " + "of replicas, may lost data", + app->app_id, + pc.pid.get_partition_index(), + pc.hp_last_drops.size() + 1, + pc.max_replica_count); } succeed_count++; } else { @@ -2259,8 +2281,9 @@ error_code server_state::construct_partitions( std::ostringstream oss; if (skip_lost_partitions) { oss << "WARNING: partition(" << app->app_id << "." - << pc.pid.get_partition_index() << ") has no replica collected, force " - "recover the lost partition to empty" + << pc.pid.get_partition_index() + << ") has no replica collected, force " + "recover the lost partition to empty" << std::endl; } else { oss << "ERROR: partition(" << app->app_id << "." @@ -2485,8 +2508,9 @@ void server_state::update_partition_metrics() int min_2pc_count = _meta_svc->get_options().app_mutation_2pc_min_replica_count(app->max_replica_count); - for (unsigned int i = 0; i != app->partition_count; ++i) { - health_status st = partition_health_status(app->partitions[i], min_2pc_count); + CHECK_EQ(app->partition_count, app->pcs.size()); + for (const auto &pc : app->pcs) { + health_status st = partition_health_status(pc, min_2pc_count); counters[st]++; } @@ -2540,8 +2564,8 @@ bool server_state::check_all_partitions() continue; } for (unsigned int i = 0; i != app->partition_count; ++i) { - partition_configuration &pc = app->partitions[i]; - config_context &cc = app->helpers->contexts[i]; + const auto &pc = app->pcs[i]; + const auto &cc = app->helpers->contexts[i]; // partition is under re-configuration or is child partition if (cc.stage != config_status::pending_remote_sync && pc.ballot != invalid_ballot) { configuration_proposal_action action; @@ -2573,18 +2597,17 @@ bool server_state::check_all_partitions() // assign secondary for urgent for (int i = 0; i < add_secondary_actions.size(); ++i) { gpid &pid = add_secondary_gpids[i]; - partition_configuration &pc = *get_config(_all_apps, pid); - if (!add_secondary_proposed[i] && pc.hp_secondaries.empty()) { + const auto *pc = get_config(_all_apps, pid); + if (!add_secondary_proposed[i] && pc->hp_secondaries.empty()) { const auto &action = add_secondary_actions[i]; CHECK(action.hp_node, ""); - if (_add_secondary_enable_flow_control && - add_secondary_running_nodes[action.hp_node] >= - _add_secondary_max_count_for_one_node) { + if (_add_secondary_enable_flow_control && add_secondary_running_nodes[action.hp_node] >= + _add_secondary_max_count_for_one_node) { // ignore continue; } std::shared_ptr app = get_app(pid.get_app_id()); - send_proposal(action, pc, *app); + send_proposal(action, *pc, *app); send_proposal_count++; add_secondary_proposed[i] = true; add_secondary_running_nodes[action.hp_node]++; @@ -2597,20 +2620,19 @@ bool server_state::check_all_partitions() const auto &action = add_secondary_actions[i]; CHECK(action.hp_node, ""); gpid pid = add_secondary_gpids[i]; - partition_configuration &pc = *get_config(_all_apps, pid); - if (_add_secondary_enable_flow_control && - add_secondary_running_nodes[action.hp_node] >= - _add_secondary_max_count_for_one_node) { + const auto *pc = get_config(_all_apps, pid); + if (_add_secondary_enable_flow_control && add_secondary_running_nodes[action.hp_node] >= + _add_secondary_max_count_for_one_node) { LOG_INFO("do not send {} proposal for gpid({}) for flow control reason, target = " "{}, node = {}", ::dsn::enum_to_string(action.type), - pc.pid, + pc->pid, FMT_HOST_PORT_AND_IP(action, target), FMT_HOST_PORT_AND_IP(action, node)); continue; } std::shared_ptr app = get_app(pid.get_app_id()); - send_proposal(action, pc, *app); + send_proposal(action, *pc, *app); send_proposal_count++; add_secondary_proposed[i] = true; add_secondary_running_nodes[action.hp_node]++; @@ -2695,33 +2717,33 @@ void server_state::check_consistency(const dsn::gpid &gpid) auto iter = _all_apps.find(gpid.get_app_id()); CHECK(iter != _all_apps.end(), "invalid gpid({})", gpid); - app_state &app = *(iter->second); - partition_configuration &config = app.partitions[gpid.get_partition_index()]; + auto &app = *(iter->second); + auto &pc = app.pcs[gpid.get_partition_index()]; if (app.is_stateful) { - if (config.hp_primary) { - auto it = _nodes.find(config.hp_primary); - CHECK(it != _nodes.end(), "invalid primary address, address = {}", config.hp_primary); + if (pc.hp_primary) { + const auto it = _nodes.find(pc.hp_primary); + CHECK(it != _nodes.end(), "invalid primary: {}", pc.hp_primary); CHECK_EQ(it->second.served_as(gpid), partition_status::PS_PRIMARY); - CHECK(!utils::contains(config.hp_last_drops, config.hp_primary), - "primary shouldn't appear in last_drops, address = {}", - config.hp_primary); + CHECK(!utils::contains(pc.hp_last_drops, pc.hp_primary), + "primary({}) shouldn't appear in last_drops", + pc.hp_primary); } - for (auto &ep : config.hp_secondaries) { - auto it = _nodes.find(ep); - CHECK(it != _nodes.end(), "invalid secondary address, address = {}", ep); + for (const auto &secondary : pc.hp_secondaries) { + const auto it = _nodes.find(secondary); + CHECK(it != _nodes.end(), "invalid secondary: {}", secondary); CHECK_EQ(it->second.served_as(gpid), partition_status::PS_SECONDARY); - CHECK(!utils::contains(config.hp_last_drops, ep), - "secondary shouldn't appear in last_drops, address = {}", - ep); + CHECK(!utils::contains(pc.hp_last_drops, secondary), + "secondary({}) shouldn't appear in last_drops", + secondary); } } else { - partition_configuration_stateless pcs(config); + partition_configuration_stateless pcs(pc); CHECK_EQ(pcs.hosts().size(), pcs.workers().size()); - for (auto &ep : pcs.hosts()) { - auto it = _nodes.find(ep); - CHECK(it != _nodes.end(), "invalid host, address = {}", ep); + for (const auto &secondary : pcs.hosts()) { + auto it = _nodes.find(secondary); + CHECK(it != _nodes.end(), "invalid secondary: {}", secondary); CHECK_EQ(it->second.served_as(gpid), partition_status::PS_SECONDARY); } } @@ -2745,8 +2767,7 @@ void server_state::do_update_app_info(const std::string &app_path, { // persistent envs to zookeeper blob value = dsn::json::json_forwarder::encode(info); - auto new_cb = [ this, app_path, info, user_cb = std::move(cb) ](error_code ec) - { + auto new_cb = [this, app_path, info, user_cb = std::move(cb)](error_code ec) { if (ec == ERR_OK) { user_cb(ec); } else if (ec == ERR_TIMEOUT) { @@ -3236,18 +3257,17 @@ template bool server_state::check_max_replica_count_consistent(const std::shared_ptr &app, Response &response) const { - for (int i = 0; i < static_cast(app->partitions.size()); ++i) { - const auto &partition_config = app->partitions[i]; - if (partition_config.max_replica_count == app->max_replica_count) { + for (const auto &pc : app->pcs) { + if (pc.max_replica_count == app->max_replica_count) { continue; } response.err = ERR_INCONSISTENT_STATE; response.hint_message = fmt::format("partition_max_replica_count({}) != " "app_max_replica_count({}) for partition {}", - partition_config.max_replica_count, + pc.max_replica_count, app->max_replica_count, - i); + pc.pid); return false; } @@ -3619,8 +3639,8 @@ void server_state::update_partition_max_replica_count(std::shared_ptr app->partition_count, new_max_replica_count); - const auto &old_partition_config = app->partitions[partition_index]; - const auto old_max_replica_count = old_partition_config.max_replica_count; + const auto &old_pc = app->pcs[partition_index]; + const auto old_max_replica_count = old_pc.max_replica_count; if (new_max_replica_count == old_max_replica_count) { LOG_WARNING("partition-level max_replica_count has been updated: app_name={}, " @@ -3667,23 +3687,23 @@ void server_state::update_partition_max_replica_count(std::shared_ptr context.pending_sync_request.reset(); context.msg = nullptr; - auto new_partition_config = old_partition_config; - new_partition_config.max_replica_count = new_max_replica_count; - ++(new_partition_config.ballot); - context.pending_sync_task = update_partition_max_replica_count_on_remote( - app, new_partition_config, on_partition_updated); + auto new_pc = old_pc; + new_pc.max_replica_count = new_max_replica_count; + ++(new_pc.ballot); + context.pending_sync_task = + update_partition_max_replica_count_on_remote(app, new_pc, on_partition_updated); } // ThreadPool: THREAD_POOL_META_STATE -task_ptr server_state::update_partition_max_replica_count_on_remote( - std::shared_ptr &app, - const partition_configuration &new_partition_config, - partition_callback on_partition_updated) +task_ptr +server_state::update_partition_max_replica_count_on_remote(std::shared_ptr &app, + const partition_configuration &new_pc, + partition_callback on_partition_updated) { - const auto &gpid = new_partition_config.pid; + const auto &gpid = new_pc.pid; const auto partition_index = gpid.get_partition_index(); - const auto new_max_replica_count = new_partition_config.max_replica_count; - const auto new_ballot = new_partition_config.ballot; + const auto new_max_replica_count = new_pc.max_replica_count; + const auto new_ballot = new_pc.ballot; const auto level = _meta_svc->get_function_level(); if (level <= meta_function_level::fl_blind) { @@ -3699,21 +3719,21 @@ task_ptr server_state::update_partition_max_replica_count_on_remote( new_ballot); // NOTICE: pending_sync_task should be reassigned - return tasking::enqueue(LPC_META_STATE_HIGH, - tracker(), - [this, app, new_partition_config, on_partition_updated]() mutable { - const auto &gpid = new_partition_config.pid; - const auto partition_index = gpid.get_partition_index(); + return tasking::enqueue( + LPC_META_STATE_HIGH, + tracker(), + [this, app, new_pc, on_partition_updated]() mutable { + const auto &gpid = new_pc.pid; + const auto partition_index = gpid.get_partition_index(); - zauto_write_lock l(_lock); + zauto_write_lock l(_lock); - auto &context = app->helpers->contexts[partition_index]; - context.pending_sync_task = - update_partition_max_replica_count_on_remote( - app, new_partition_config, on_partition_updated); - }, - server_state::sStateHash, - std::chrono::seconds(1)); + auto &context = app->helpers->contexts[partition_index]; + context.pending_sync_task = + update_partition_max_replica_count_on_remote(app, new_pc, on_partition_updated); + }, + server_state::sStateHash, + std::chrono::seconds(1)); } LOG_INFO("request for updating partition-level max_replica_count on remote storage: " @@ -3725,8 +3745,7 @@ task_ptr server_state::update_partition_max_replica_count_on_remote( new_ballot); auto partition_path = get_partition_path(gpid); - auto json_config = - dsn::json::json_forwarder::encode(new_partition_config); + auto json_config = dsn::json::json_forwarder::encode(new_pc); return _meta_svc->get_remote_storage()->set_data( partition_path, json_config, @@ -3735,7 +3754,7 @@ task_ptr server_state::update_partition_max_replica_count_on_remote( this, std::placeholders::_1, app, - new_partition_config, + new_pc, on_partition_updated), tracker()); } @@ -3744,13 +3763,13 @@ task_ptr server_state::update_partition_max_replica_count_on_remote( void server_state::on_update_partition_max_replica_count_on_remote_reply( error_code ec, std::shared_ptr &app, - const partition_configuration &new_partition_config, + const partition_configuration &new_pc, partition_callback on_partition_updated) { - const auto &gpid = new_partition_config.pid; + const auto &gpid = new_pc.pid; const auto partition_index = gpid.get_partition_index(); - const auto new_max_replica_count = new_partition_config.max_replica_count; - const auto new_ballot = new_partition_config.ballot; + const auto new_max_replica_count = new_pc.max_replica_count; + const auto new_ballot = new_pc.ballot; zauto_write_lock l(_lock); @@ -3767,22 +3786,21 @@ void server_state::on_update_partition_max_replica_count_on_remote_reply( auto &context = app->helpers->contexts[partition_index]; if (ec == ERR_TIMEOUT) { // NOTICE: pending_sync_task need to be reassigned - context.pending_sync_task = - tasking::enqueue(LPC_META_STATE_HIGH, - tracker(), - [this, app, new_partition_config, on_partition_updated]() mutable { - const auto &gpid = new_partition_config.pid; - const auto partition_index = gpid.get_partition_index(); + context.pending_sync_task = tasking::enqueue( + LPC_META_STATE_HIGH, + tracker(), + [this, app, new_pc, on_partition_updated]() mutable { + const auto &gpid = new_pc.pid; + const auto partition_index = gpid.get_partition_index(); - zauto_write_lock l(_lock); + zauto_write_lock l(_lock); - auto &context = app->helpers->contexts[partition_index]; - context.pending_sync_task = - update_partition_max_replica_count_on_remote( - app, new_partition_config, on_partition_updated); - }, - server_state::sStateHash, - std::chrono::seconds(1)); + auto &context = app->helpers->contexts[partition_index]; + context.pending_sync_task = + update_partition_max_replica_count_on_remote(app, new_pc, on_partition_updated); + }, + server_state::sStateHash, + std::chrono::seconds(1)); return; } @@ -3791,7 +3809,7 @@ void server_state::on_update_partition_max_replica_count_on_remote_reply( return; } - update_partition_max_replica_count_locally(app, new_partition_config); + update_partition_max_replica_count_locally(app, new_pc); context.pending_sync_task = nullptr; context.pending_sync_request.reset(); @@ -3802,17 +3820,17 @@ void server_state::on_update_partition_max_replica_count_on_remote_reply( } // ThreadPool: THREAD_POOL_META_STATE -void server_state::update_partition_max_replica_count_locally( - std::shared_ptr &app, const partition_configuration &new_partition_config) +void server_state::update_partition_max_replica_count_locally(std::shared_ptr &app, + const partition_configuration &new_pc) { - const auto &gpid = new_partition_config.pid; + const auto &gpid = new_pc.pid; const auto partition_index = gpid.get_partition_index(); - const auto new_max_replica_count = new_partition_config.max_replica_count; - const auto new_ballot = new_partition_config.ballot; + const auto new_max_replica_count = new_pc.max_replica_count; + const auto new_ballot = new_pc.ballot; - auto &old_partition_config = app->partitions[gpid.get_partition_index()]; - const auto old_max_replica_count = old_partition_config.max_replica_count; - const auto old_ballot = old_partition_config.ballot; + auto &old_pc = app->pcs[gpid.get_partition_index()]; + const auto old_max_replica_count = old_pc.max_replica_count; + const auto old_ballot = old_pc.ballot; CHECK_EQ_MSG(old_ballot + 1, new_ballot, @@ -3827,14 +3845,14 @@ void server_state::update_partition_max_replica_count_locally( old_ballot, new_ballot); - std::string old_config_str(boost::lexical_cast(old_partition_config)); - std::string new_config_str(boost::lexical_cast(new_partition_config)); + std::string old_config_str(boost::lexical_cast(old_pc)); + std::string new_config_str(boost::lexical_cast(new_pc)); - old_partition_config = new_partition_config; + old_pc = new_pc; LOG_INFO("local partition-level max_replica_count has been changed successfully: ", - "app_name={}, app_id={}, partition_id={}, old_partition_config={}, " - "new_partition_config={}", + "app_name={}, app_id={}, partition_id={}, old_pc={}, " + "new_pc={}", app->app_name, app->app_id, partition_index, @@ -3903,7 +3921,7 @@ void server_state::recover_all_partitions_max_replica_count(std::shared_ptrpartition_count; ++i) { zauto_read_lock l(_lock); - auto new_pc = app->partitions[i]; + auto new_pc = app->pcs[i]; if (new_pc.max_replica_count == new_max_replica_count) { LOG_WARNING("no need to recover partition-level max_replica_count since it has been " "updated before: app_name={}, app_id={}, partition_index={}, " @@ -3937,7 +3955,7 @@ void server_state::recover_all_partitions_max_replica_count(std::shared_ptrpartitions[i]; + auto &old_pc = app->pcs[i]; std::string old_pc_str(boost::lexical_cast(old_pc)); std::string new_pc_str(boost::lexical_cast(new_pc)); diff --git a/src/meta/server_state.h b/src/meta/server_state.h index ad3664cf15..a787328f73 100644 --- a/src/meta/server_state.h +++ b/src/meta/server_state.h @@ -44,8 +44,8 @@ #include "dsn.layer2_types.h" #include "meta/meta_rpc_types.h" #include "meta_data.h" -#include "runtime/task/task.h" -#include "runtime/task/task_tracker.h" +#include "task/task.h" +#include "task/task_tracker.h" #include "table_metrics.h" #include "utils/error_code.h" #include "utils/zlocks.h" @@ -157,7 +157,7 @@ class server_state void query_configuration_by_index(const query_cfg_request &request, /*out*/ query_cfg_response &response); - bool query_configuration_by_gpid(const dsn::gpid id, /*out*/ partition_configuration &config); + bool query_configuration_by_gpid(const dsn::gpid id, /*out*/ partition_configuration &pc); // app options void create_app(dsn::message_ex *msg); @@ -276,7 +276,7 @@ class server_state void update_configuration_locally(app_state &app, std::shared_ptr &config_request); - void request_check(const partition_configuration &old, + void request_check(const partition_configuration &old_pc, const configuration_update_request &request); void recall_partition(std::shared_ptr &app, int pidx); void drop_partition(std::shared_ptr &app, int pidx); @@ -285,11 +285,9 @@ class server_state int pidx, const host_port &node); void - downgrade_stateless_nodes(std::shared_ptr &app, int pidx, const host_port &address); - - void on_partition_node_dead(std::shared_ptr &app, - int pidx, - const dsn::host_port &address); + downgrade_stateless_nodes(std::shared_ptr &app, int pidx, const host_port &node); + void + on_partition_node_dead(std::shared_ptr &app, int pidx, const dsn::host_port &node); void send_proposal(const host_port &target, const configuration_update_request &proposal); void send_proposal(const configuration_proposal_action &action, const partition_configuration &pc, @@ -347,18 +345,16 @@ class server_state int32_t partition_index, int32_t new_max_replica_count, partition_callback on_partition_updated); - task_ptr update_partition_max_replica_count_on_remote( - std::shared_ptr &app, - const partition_configuration &new_partition_config, - partition_callback on_partition_updated); - void on_update_partition_max_replica_count_on_remote_reply( - error_code ec, - std::shared_ptr &app, - const partition_configuration &new_partition_config, - partition_callback on_partition_updated); + task_ptr update_partition_max_replica_count_on_remote(std::shared_ptr &app, + const partition_configuration &new_pc, + partition_callback on_partition_updated); void - update_partition_max_replica_count_locally(std::shared_ptr &app, - const partition_configuration &new_partition_config); + on_update_partition_max_replica_count_on_remote_reply(error_code ec, + std::shared_ptr &app, + const partition_configuration &new_pc, + partition_callback on_partition_updated); + void update_partition_max_replica_count_locally(std::shared_ptr &app, + const partition_configuration &new_pc); void recover_all_partitions_max_replica_count(std::shared_ptr &app, int32_t max_replica_count, diff --git a/src/meta/server_state_restore.cpp b/src/meta/server_state_restore.cpp index ef99113bef..746a49b553 100644 --- a/src/meta/server_state_restore.cpp +++ b/src/meta/server_state_restore.cpp @@ -40,12 +40,12 @@ #include "meta/table_metrics.h" #include "meta_admin_types.h" #include "meta_service.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "server_state.h" +#include "task/task.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/error_code.h" @@ -250,8 +250,8 @@ void server_state::on_query_restore_status(configuration_query_restore_rpc rpc) response.restore_status.resize(app->partition_count, ERR_OK); for (int32_t i = 0; i < app->partition_count; i++) { const auto &r_state = app->helpers->restore_states[i]; - const auto &p = app->partitions[i]; - if (p.hp_primary || !p.hp_secondaries.empty()) { + const auto &pc = app->pcs[i]; + if (pc.hp_primary || !pc.hp_secondaries.empty()) { // already have primary, restore succeed continue; } diff --git a/src/meta/table_metrics.cpp b/src/meta/table_metrics.cpp index 7b2a21af5c..2bacf28ac6 100644 --- a/src/meta/table_metrics.cpp +++ b/src/meta/table_metrics.cpp @@ -17,7 +17,7 @@ #include "table_metrics.h" -#include +#include // IWYU pragma: no_include #include #include diff --git a/src/meta/test/backup_test.cpp b/src/meta/test/backup_test.cpp index f436514b60..367757ab2d 100644 --- a/src/meta/test/backup_test.cpp +++ b/src/meta/test/backup_test.cpp @@ -42,15 +42,15 @@ #include "meta/test/misc/misc.h" #include "meta_service_test_app.h" #include "meta_test_base.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "task/async_calls.h" +#include "task/task.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/chrono_literals.h" #include "utils/error_code.h" @@ -504,7 +504,7 @@ TEST_F(policy_context_test, test_app_dropped_during_backup) app_state *app = state->_all_apps[3].get(); app->status = dsn::app_status::AS_AVAILABLE; - for (partition_configuration &pc : app->partitions) { + for (auto &pc : app->pcs) { SET_IP_AND_HOST_PORT_BY_DNS(pc, primary, node_list[0]); SET_IPS_AND_HOST_PORTS_BY_DNS(pc, secondaries, node_list[1], node_list[2]); } diff --git a/src/meta/test/balancer_simulator/balancer_simulator.cpp b/src/meta/test/balancer_simulator/balancer_simulator.cpp index 16b50407e4..c22d2040fb 100644 --- a/src/meta/test/balancer_simulator/balancer_simulator.cpp +++ b/src/meta/test/balancer_simulator/balancer_simulator.cpp @@ -41,10 +41,10 @@ #include "meta/server_load_balancer.h" #include "meta/test/misc/misc.h" #include "meta_admin_types.h" +#include "rpc/dns_resolver.h" // IWYU pragma: keep +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" #include "runtime/app_model.h" -#include "runtime/rpc/dns_resolver.h" // IWYU pragma: keep -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" #include "utils/fmt_logging.h" using namespace dsn::replication; @@ -72,6 +72,7 @@ class simple_priority_queue } const dsn::host_port &top() const { return container.front(); } bool empty() const { return container.empty(); } + private: std::vector container; server_load_balancer::node_comparator cmp; @@ -95,11 +96,11 @@ void generate_balanced_apps(/*out*/ app_mapper &apps, info.partition_count = partitions_per_node * node_list.size(); info.max_replica_count = 3; - std::shared_ptr the_app = app_state::create(info); + std::shared_ptr app = app_state::create(info); simple_priority_queue pq1(node_list, server_load_balancer::primary_comparator(nodes)); // generate balanced primary - for (dsn::partition_configuration &pc : the_app->partitions) { + for (auto &pc : app->pcs) { const auto &n = pq1.pop(); nodes[n].put_partition(pc.pid, true); SET_IP_AND_HOST_PORT_BY_DNS(pc, primary, n); @@ -110,7 +111,7 @@ void generate_balanced_apps(/*out*/ app_mapper &apps, simple_priority_queue pq2(node_list, server_load_balancer::partition_comparator(nodes)); std::vector temp; - for (dsn::partition_configuration &pc : the_app->partitions) { + for (auto &pc : app->pcs) { temp.clear(); while (pc.hp_secondaries.size() + 1 < pc.max_replica_count) { const auto &n = pq2.pop(); @@ -126,7 +127,7 @@ void generate_balanced_apps(/*out*/ app_mapper &apps, // check if balanced int pri_min, part_min; - pri_min = part_min = the_app->partition_count + 1; + pri_min = part_min = app->partition_count + 1; int pri_max, part_max; pri_max = part_max = -1; @@ -141,7 +142,7 @@ void generate_balanced_apps(/*out*/ app_mapper &apps, part_min = kv.second.partition_count(); } - apps.emplace(the_app->app_id, the_app); + apps.emplace(app->app_id, app); CHECK_LE(pri_max - pri_min, 1); CHECK_LE(part_max - part_min, 1); @@ -149,9 +150,9 @@ void generate_balanced_apps(/*out*/ app_mapper &apps, void random_move_primary(app_mapper &apps, node_mapper &nodes, int primary_move_ratio) { - app_state &the_app = *(apps[0]); - int space_size = the_app.partition_count * 100; - for (dsn::partition_configuration &pc : the_app.partitions) { + app_state &app = *(apps[0]); + int space_size = app.partition_count * 100; + for (auto &pc : app.pcs) { int n = random32(1, space_size) / 100; if (n < primary_move_ratio) { int indice = random32(0, 1); diff --git a/src/meta/test/balancer_validator.cpp b/src/meta/test/balancer_validator.cpp index 9b610f003a..2543b28935 100644 --- a/src/meta/test/balancer_validator.cpp +++ b/src/meta/test/balancer_validator.cpp @@ -47,8 +47,8 @@ #include "meta_admin_types.h" #include "meta_service_test_app.h" #include "metadata_types.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" #include "utils/fmt_logging.h" namespace dsn { @@ -165,14 +165,14 @@ void meta_service_test_app::balancer_validator() iter.second.partition_count()); } - std::shared_ptr &the_app = apps[1]; - for (::dsn::partition_configuration &pc : the_app->partitions) { + const auto &app = apps[1]; + for (const auto &pc : app->pcs) { CHECK(pc.hp_primary, ""); CHECK_GE(pc.secondaries.size(), pc.max_replica_count - 1); } // now test the cure - ::dsn::partition_configuration &pc = the_app->partitions[0]; + auto &pc = app->pcs[0]; nodes[pc.hp_primary].remove_partition(pc.pid, false); for (const auto &hp : pc.hp_secondaries) { nodes[hp].remove_partition(pc.pid, false); @@ -218,11 +218,11 @@ static void load_apps_and_nodes(const char *file, app_mapper &apps, node_mapper infile >> n; infile >> ip_port; const auto primary = host_port::from_string(ip_port); - SET_IP_AND_HOST_PORT_BY_DNS(app->partitions[j], primary, primary); + SET_IP_AND_HOST_PORT_BY_DNS(app->pcs[j], primary, primary); for (int k = 1; k < n; ++k) { infile >> ip_port; const auto secondary = host_port::from_string(ip_port); - ADD_IP_AND_HOST_PORT_BY_DNS(app->partitions[j], secondaries, secondary); + ADD_IP_AND_HOST_PORT_BY_DNS(app->pcs[j], secondaries, secondary); } } } diff --git a/src/meta/test/cluster_balance_policy_test.cpp b/src/meta/test/cluster_balance_policy_test.cpp index 14ae55632b..fd1185f8cc 100644 --- a/src/meta/test/cluster_balance_policy_test.cpp +++ b/src/meta/test/cluster_balance_policy_test.cpp @@ -30,14 +30,15 @@ #include "common/gpid.h" #include "dsn.layer2_types.h" #include "gtest/gtest.h" +#include "gutil/map_util.h" #include "meta/cluster_balance_policy.h" #include "meta/load_balance_policy.h" #include "meta/meta_data.h" #include "meta/meta_service.h" #include "meta_admin_types.h" #include "metadata_types.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" #include "utils/defer.h" #include "utils/fail_point.h" @@ -119,7 +120,7 @@ TEST(cluster_balance_policy, get_app_migration_info) info.app_name = appname; info.partition_count = 1; auto app = std::make_shared(info); - SET_IP_AND_HOST_PORT_BY_DNS(app->partitions[0], primary, hp); + SET_IP_AND_HOST_PORT_BY_DNS(app->pcs[0], primary, hp); node_state ns; ns.set_hp(hp); @@ -129,14 +130,14 @@ TEST(cluster_balance_policy, get_app_migration_info) cluster_balance_policy::app_migration_info migration_info; { - app->partitions[0].max_replica_count = 100; + app->pcs[0].max_replica_count = 100; auto res = policy.get_app_migration_info(app, nodes, balance_type::COPY_PRIMARY, migration_info); ASSERT_FALSE(res); } { - app->partitions[0].max_replica_count = 1; + app->pcs[0].max_replica_count = 1; auto res = policy.get_app_migration_info(app, nodes, balance_type::COPY_PRIMARY, migration_info); ASSERT_TRUE(res); @@ -162,15 +163,15 @@ TEST(cluster_balance_policy, get_node_migration_info) info.app_name = appname; info.partition_count = 1; auto app = std::make_shared(info); - SET_IP_AND_HOST_PORT_BY_DNS(app->partitions[0], primary, hp); + SET_IP_AND_HOST_PORT_BY_DNS(app->pcs[0], primary, hp); serving_replica sr; sr.node = hp; std::string disk_tag = "disk1"; sr.disk_tag = disk_tag; config_context context; - context.config_owner = new partition_configuration(); - auto cleanup = dsn::defer([&context]() { delete context.config_owner; }); - context.config_owner->pid = gpid(appid, 0); + context.pc = new partition_configuration(); + auto cleanup = dsn::defer([&context]() { delete context.pc; }); + context.pc->pid = gpid(appid, 0); context.serving.emplace_back(std::move(sr)); app->helpers->contexts.emplace_back(std::move(context)); @@ -186,9 +187,10 @@ TEST(cluster_balance_policy, get_node_migration_info) policy.get_node_migration_info(ns, all_apps, migration_info); ASSERT_EQ(migration_info.hp, hp); - ASSERT_NE(migration_info.partitions.find(disk_tag), migration_info.partitions.end()); - ASSERT_EQ(migration_info.partitions.at(disk_tag).size(), 1); - ASSERT_EQ(*migration_info.partitions.at(disk_tag).begin(), pid); + const auto *ps = gutil::FindOrNull(migration_info.partitions, disk_tag); + ASSERT_NE(ps, nullptr); + ASSERT_EQ(1, ps->size()); + ASSERT_EQ(pid, *ps->begin()); } TEST(cluster_balance_policy, get_min_max_set) @@ -517,8 +519,8 @@ TEST(cluster_balance_policy, calc_potential_moving) partition_configuration pc; SET_IP_AND_HOST_PORT_BY_DNS(pc, primary, hp1); SET_IPS_AND_HOST_PORTS_BY_DNS(pc, secondaries, hp2, hp3); - app->partitions[0] = pc; - app->partitions[1] = pc; + app->pcs[0] = pc; + app->pcs[1] = pc; app_mapper apps; apps[app_id] = app; diff --git a/src/meta/test/copy_replica_operation_test.cpp b/src/meta/test/copy_replica_operation_test.cpp index 2c0045cdf9..761a27a7a4 100644 --- a/src/meta/test/copy_replica_operation_test.cpp +++ b/src/meta/test/copy_replica_operation_test.cpp @@ -30,7 +30,7 @@ #include "meta/app_balance_policy.h" #include "meta/load_balance_policy.h" #include "meta/meta_data.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "utils/fail_point.h" namespace dsn { diff --git a/src/meta/test/ford_fulkerson_test.cpp b/src/meta/test/ford_fulkerson_test.cpp index bb291c8bce..68ae88f7df 100644 --- a/src/meta/test/ford_fulkerson_test.cpp +++ b/src/meta/test/ford_fulkerson_test.cpp @@ -27,9 +27,9 @@ #include "gtest/gtest.h" #include "meta/load_balance_policy.h" #include "meta/meta_data.h" -#include "runtime/rpc/dns_resolver.h" // IWYU pragma: keep -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/dns_resolver.h" // IWYU pragma: keep +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" namespace dsn { namespace replication { @@ -99,8 +99,8 @@ TEST(ford_fulkerson, update_decree) std::shared_ptr app = app_state::create(info); partition_configuration pc; SET_IPS_AND_HOST_PORTS_BY_DNS(pc, secondaries, hp2, hp3); - app->partitions.push_back(pc); - app->partitions.push_back(pc); + app->pcs.push_back(pc); + app->pcs.push_back(pc); node_mapper nodes; node_state ns; @@ -137,8 +137,8 @@ TEST(ford_fulkerson, find_shortest_path) partition_configuration pc; SET_IP_AND_HOST_PORT_BY_DNS(pc, primary, hp1); SET_IPS_AND_HOST_PORTS_BY_DNS(pc, secondaries, hp2, hp3); - app->partitions[0] = pc; - app->partitions[1] = pc; + app->pcs[0] = pc; + app->pcs[1] = pc; node_mapper nodes; node_state ns1; diff --git a/src/meta/test/json_compacity.cpp b/src/meta/test/json_compacity.cpp index de01ec52a5..30d5da4d47 100644 --- a/src/meta/test/json_compacity.cpp +++ b/src/meta/test/json_compacity.cpp @@ -38,8 +38,8 @@ #include "gtest/gtest.h" #include "meta/meta_backup_service.h" #include "meta_service_test_app.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" #include "utils/blob.h" namespace dsn { diff --git a/src/meta/test/main.cpp b/src/meta/test/main.cpp index 283282e2de..d3125c4c3a 100644 --- a/src/meta/test/main.cpp +++ b/src/meta/test/main.cpp @@ -29,7 +29,7 @@ #include "meta_service_test_app.h" #include "runtime/app_model.h" #include "runtime/service_app.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "utils/error_code.h" #include "utils/flags.h" #include "utils/fmt_logging.h" diff --git a/src/meta/test/meta_app_operation_test.cpp b/src/meta/test/meta_app_operation_test.cpp index 8cd472aae6..f8b399d4c3 100644 --- a/src/meta/test/meta_app_operation_test.cpp +++ b/src/meta/test/meta_app_operation_test.cpp @@ -41,9 +41,9 @@ #include "meta_service_test_app.h" #include "meta_test_base.h" #include "misc/misc.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task_tracker.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "task/task_tracker.h" #include "utils/defer.h" #include "utils/error_code.h" #include "utils/errors.h" @@ -154,8 +154,8 @@ class meta_app_operation_test : public meta_test_base auto app = find_app(app_name); CHECK(app, "app({}) does not exist", app_name); - auto &partition_config = app->partitions[partition_index]; - partition_config.max_replica_count = max_replica_count; + auto &pc = app->pcs[partition_index]; + pc.max_replica_count = max_replica_count; } void set_max_replica_count_env(const std::string &app_name, const std::string &env) @@ -174,11 +174,12 @@ class meta_app_operation_test : public meta_test_base auto ainfo = *(reinterpret_cast(app.get())); auto json_config = dsn::json::json_forwarder::encode(ainfo); dsn::task_tracker tracker; - _ms->get_remote_storage()->set_data(app_path, - json_config, - LPC_META_STATE_HIGH, - [](dsn::error_code ec) { ASSERT_EQ(ec, ERR_OK); }, - &tracker); + _ms->get_remote_storage()->set_data( + app_path, + json_config, + LPC_META_STATE_HIGH, + [](dsn::error_code ec) { ASSERT_EQ(ec, ERR_OK); }, + &tracker); tracker.wait_outstanding_tasks(); } @@ -216,22 +217,22 @@ class meta_app_operation_test : public meta_test_base auto app = find_app(app_name); CHECK(app, "app({}) does not exist", app_name); - auto partition_size = static_cast(app->partitions.size()); + auto partition_size = static_cast(app->pcs.size()); for (int i = 0; i < partition_size; ++i) { // set local max_replica_count of each partition - auto &partition_config = app->partitions[i]; - partition_config.max_replica_count = max_replica_count; + auto &pc = app->pcs[i]; + pc.max_replica_count = max_replica_count; // set remote max_replica_count of each partition - auto partition_path = _ss->get_partition_path(partition_config.pid); - auto json_config = - dsn::json::json_forwarder::encode(partition_config); + auto partition_path = _ss->get_partition_path(pc.pid); + auto json_config = dsn::json::json_forwarder::encode(pc); dsn::task_tracker tracker; - _ms->get_remote_storage()->set_data(partition_path, - json_config, - LPC_META_STATE_HIGH, - [](dsn::error_code ec) { ASSERT_EQ(ec, ERR_OK); }, - &tracker); + _ms->get_remote_storage()->set_data( + partition_path, + json_config, + LPC_META_STATE_HIGH, + [](dsn::error_code ec) { ASSERT_EQ(ec, ERR_OK); }, + &tracker); tracker.wait_outstanding_tasks(); } @@ -243,11 +244,12 @@ class meta_app_operation_test : public meta_test_base auto ainfo = *(reinterpret_cast(app.get())); auto json_config = dsn::json::json_forwarder::encode(ainfo); dsn::task_tracker tracker; - _ms->get_remote_storage()->set_data(app_path, - json_config, - LPC_META_STATE_HIGH, - [](dsn::error_code ec) { ASSERT_EQ(ec, ERR_OK); }, - &tracker); + _ms->get_remote_storage()->set_data( + app_path, + json_config, + LPC_META_STATE_HIGH, + [](dsn::error_code ec) { ASSERT_EQ(ec, ERR_OK); }, + &tracker); tracker.wait_outstanding_tasks(); } @@ -257,28 +259,27 @@ class meta_app_operation_test : public meta_test_base auto app = find_app(app_name); CHECK(app, "app({}) does not exist", app_name); - auto partition_size = static_cast(app->partitions.size()); + auto partition_size = static_cast(app->pcs.size()); for (int i = 0; i < partition_size; ++i) { // verify local max_replica_count of each partition - auto &partition_config = app->partitions[i]; - ASSERT_EQ(partition_config.max_replica_count, expected_max_replica_count); + auto &pc = app->pcs[i]; + ASSERT_EQ(pc.max_replica_count, expected_max_replica_count); // verify remote max_replica_count of each partition - auto partition_path = _ss->get_partition_path(partition_config.pid); + auto partition_path = _ss->get_partition_path(pc.pid); dsn::task_tracker tracker; _ms->get_remote_storage()->get_data( partition_path, LPC_META_CALLBACK, - [ expected_pid = partition_config.pid, - expected_max_replica_count ](error_code ec, const blob &value) { + [expected_pid = pc.pid, expected_max_replica_count](error_code ec, + const blob &value) { ASSERT_EQ(ec, ERR_OK); - partition_configuration partition_config; - dsn::json::json_forwarder::decode(value, - partition_config); + partition_configuration pc; + dsn::json::json_forwarder::decode(value, pc); - ASSERT_EQ(partition_config.pid, expected_pid); - ASSERT_EQ(partition_config.max_replica_count, expected_max_replica_count); + ASSERT_EQ(pc.pid, expected_pid); + ASSERT_EQ(pc.max_replica_count, expected_max_replica_count); }, &tracker); tracker.wait_outstanding_tasks(); @@ -725,10 +726,9 @@ TEST_F(meta_app_operation_test, get_max_replica_count) auto partition_index = static_cast(random32(0, partition_count - 1)); set_partition_max_replica_count(test.app_name, partition_index, 2); recover_partition_max_replica_count = - [ this, app_name = test.app_name, partition_index ]() - { - set_partition_max_replica_count(app_name, partition_index, 3); - }; + [this, app_name = test.app_name, partition_index]() { + set_partition_max_replica_count(app_name, partition_index, 3); + }; } const auto resp = get_max_replica_count(test.app_name); @@ -878,15 +878,14 @@ TEST_F(meta_app_operation_test, set_max_replica_count) // recover automatically the original FLAGS_min_live_node_count_for_unfreeze, // FLAGS_min_allowed_replica_count and FLAGS_max_allowed_replica_count - auto recover = defer([ - reserved_min_live_node_count_for_unfreeze = FLAGS_min_live_node_count_for_unfreeze, - reserved_min_allowed_replica_count = FLAGS_min_allowed_replica_count, - reserved_max_allowed_replica_count = FLAGS_max_allowed_replica_count - ]() { - FLAGS_max_allowed_replica_count = reserved_max_allowed_replica_count; - FLAGS_min_allowed_replica_count = reserved_min_allowed_replica_count; - FLAGS_min_live_node_count_for_unfreeze = reserved_min_live_node_count_for_unfreeze; - }); + auto recover = defer( + [reserved_min_live_node_count_for_unfreeze = FLAGS_min_live_node_count_for_unfreeze, + reserved_min_allowed_replica_count = FLAGS_min_allowed_replica_count, + reserved_max_allowed_replica_count = FLAGS_max_allowed_replica_count]() { + FLAGS_max_allowed_replica_count = reserved_max_allowed_replica_count; + FLAGS_min_allowed_replica_count = reserved_min_allowed_replica_count; + FLAGS_min_live_node_count_for_unfreeze = reserved_min_live_node_count_for_unfreeze; + }); FLAGS_min_live_node_count_for_unfreeze = test.min_live_node_count_for_unfreeze; FLAGS_min_allowed_replica_count = test.min_allowed_replica_count; FLAGS_max_allowed_replica_count = test.max_allowed_replica_count; diff --git a/src/meta/test/meta_backup_test.cpp b/src/meta/test/meta_backup_test.cpp index 5a77b13bc6..1be550f4be 100644 --- a/src/meta/test/meta_backup_test.cpp +++ b/src/meta/test/meta_backup_test.cpp @@ -19,6 +19,7 @@ #include #include #include +// IWYU pragma: no_include #include #include @@ -34,8 +35,8 @@ #include "meta/meta_service.h" #include "meta/server_state.h" #include "meta_test_base.h" +#include "rpc/rpc_host_port.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_host_port.h" #include "utils/env.h" #include "utils/error_code.h" #include "utils/fail_point.h" @@ -204,6 +205,31 @@ TEST_F(backup_service_test, test_query_backup_status) ASSERT_EQ(1, resp.backup_items.size()); } +TEST_F(backup_service_test, test_valid_policy_name) +{ + std::string hint_message; + ASSERT_FALSE(_backup_service->is_valid_policy_name_unlocked(cold_backup_constant::BACKUP_INFO, + hint_message)); + ASSERT_EQ("policy name is reserved", hint_message); + + ASSERT_FALSE(_backup_service->is_valid_policy_name_unlocked("bad-policy-name", hint_message)); + ASSERT_EQ("policy name should match regex '[a-zA-Z_:][a-zA-Z0-9_:]*' when act as a metric name " + "in prometheus", + hint_message); + + ASSERT_FALSE(_backup_service->is_valid_policy_name_unlocked("bad_policy_name:", hint_message)); + ASSERT_EQ("policy name should match regex '[a-zA-Z_][a-zA-Z0-9_]*' when act as a metric label " + "in prometheus", + hint_message); + + _backup_service->_policy_states.insert(std::make_pair("exist_policy_name", nullptr)); + ASSERT_FALSE(_backup_service->is_valid_policy_name_unlocked("exist_policy_name", hint_message)); + ASSERT_EQ("policy name is already exist", hint_message); + + ASSERT_TRUE(_backup_service->is_valid_policy_name_unlocked("new_policy_name0", hint_message)); + ASSERT_TRUE(hint_message.empty()); +} + class backup_engine_test : public meta_test_base { public: diff --git a/src/meta/test/meta_bulk_load_ingestion_test.cpp b/src/meta/test/meta_bulk_load_ingestion_test.cpp index 8d5e434dc8..f52a324979 100644 --- a/src/meta/test/meta_bulk_load_ingestion_test.cpp +++ b/src/meta/test/meta_bulk_load_ingestion_test.cpp @@ -24,11 +24,12 @@ #include "common/gpid.h" #include "dsn.layer2_types.h" #include "gtest/gtest.h" +#include "gutil/map_util.h" #include "meta/meta_bulk_load_ingestion_context.h" #include "meta/meta_data.h" #include "meta_test_base.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" #include "utils/fail_point.h" namespace dsn { @@ -64,10 +65,11 @@ class node_context_test : public meta_test_base uint32_t get_disk_count(const std::string &disk_tag) { - if (_context.disk_ingesting_counts.find(disk_tag) == _context.disk_ingesting_counts.end()) { + const auto *count = gutil::FindOrNull(_context.disk_ingesting_counts, disk_tag); + if (count == nullptr) { return -1; } - return _context.disk_ingesting_counts[disk_tag]; + return *count; } void mock_get_max_disk_ingestion_count(const uint32_t node_min_disk_count, @@ -199,39 +201,27 @@ class ingestion_context_test : public meta_test_base ainfo.app_id = APP_ID; ainfo.partition_count = PARTITION_COUNT; _app = std::make_shared(ainfo); - _app->partitions.reserve(PARTITION_COUNT); + _app->pcs.reserve(PARTITION_COUNT); _app->helpers->contexts.reserve(PARTITION_COUNT); - mock_partition(0, - {NODE1, NODE2, NODE3}, - {TAG1, TAG1, TAG2}, - _app->partitions[0], - _app->helpers->contexts[0]); - mock_partition(1, - {NODE4, NODE1, NODE2}, - {TAG2, TAG1, TAG2}, - _app->partitions[1], - _app->helpers->contexts[1]); - mock_partition(2, - {NODE3, NODE1, NODE4}, - {TAG1, TAG2, TAG1}, - _app->partitions[2], - _app->helpers->contexts[2]); - mock_partition(3, - {NODE2, NODE3, NODE4}, - {TAG1, TAG1, TAG2}, - _app->partitions[3], - _app->helpers->contexts[3]); + mock_partition( + 0, {NODE1, NODE2, NODE3}, {TAG1, TAG1, TAG2}, _app->pcs[0], _app->helpers->contexts[0]); + mock_partition( + 1, {NODE4, NODE1, NODE2}, {TAG2, TAG1, TAG2}, _app->pcs[1], _app->helpers->contexts[1]); + mock_partition( + 2, {NODE3, NODE1, NODE4}, {TAG1, TAG2, TAG1}, _app->pcs[2], _app->helpers->contexts[2]); + mock_partition( + 3, {NODE2, NODE3, NODE4}, {TAG1, TAG1, TAG2}, _app->pcs[3], _app->helpers->contexts[3]); } void mock_partition(const uint32_t pidx, std::vector nodes, const std::vector tags, - partition_configuration &config, + partition_configuration &pc, config_context &cc) { - config.pid = gpid(APP_ID, pidx); - SET_IP_AND_HOST_PORT_BY_DNS(config, primary, nodes[0]); - SET_IPS_AND_HOST_PORTS_BY_DNS(config, secondaries, nodes[1], nodes[2]); + pc.pid = gpid(APP_ID, pidx); + SET_IP_AND_HOST_PORT_BY_DNS(pc, primary, nodes[0]); + SET_IPS_AND_HOST_PORTS_BY_DNS(pc, secondaries, nodes[1], nodes[2]); auto count = nodes.size(); for (auto i = 0; i < count; i++) { @@ -253,14 +243,13 @@ class ingestion_context_test : public meta_test_base bool try_partition_ingestion(const uint32_t pidx) { - return _context->try_partition_ingestion(_app->partitions[pidx], - _app->helpers->contexts[pidx]); + return _context->try_partition_ingestion(_app->pcs[pidx], _app->helpers->contexts[pidx]); } void add_partition(const uint32_t pidx) { - auto pinfo = ingestion_context::partition_node_info(_app->partitions[pidx], - _app->helpers->contexts[pidx]); + const auto pinfo = + ingestion_context::partition_node_info(_app->pcs[pidx], _app->helpers->contexts[pidx]); _context->add_partition(pinfo); } @@ -268,8 +257,7 @@ class ingestion_context_test : public meta_test_base bool is_partition_ingesting(const uint32_t pidx) const { - return _context->_running_partitions.find(gpid(APP_ID, pidx)) != - _context->_running_partitions.end(); + return gutil::ContainsKey(_context->_running_partitions, gpid(APP_ID, pidx)); } uint32_t get_app_ingesting_count() const { return _context->get_app_ingesting_count(APP_ID); } diff --git a/src/meta/test/meta_bulk_load_service_test.cpp b/src/meta/test/meta_bulk_load_service_test.cpp index 2cd4e3c644..b685bcaed8 100644 --- a/src/meta/test/meta_bulk_load_service_test.cpp +++ b/src/meta/test/meta_bulk_load_service_test.cpp @@ -40,6 +40,7 @@ #include "common/replication_other_types.h" #include "dsn.layer2_types.h" #include "gtest/gtest.h" +#include "gutil/map_util.h" #include "meta/meta_bulk_load_service.h" #include "meta/meta_data.h" #include "meta/meta_server_failure_detector.h" @@ -50,9 +51,9 @@ #include "meta_service_test_app.h" #include "meta_test_base.h" #include "metadata_types.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/dns_resolver.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" #include "utils/blob.h" #include "utils/error_code.h" #include "utils/fail_point.h" @@ -174,16 +175,16 @@ class bulk_load_service_test : public meta_test_base gpid before_check_partition_status(bulk_load_status::type status) { std::shared_ptr app = find_app(APP_NAME); - partition_configuration config; - config.pid = gpid(app->app_id, 0); - config.max_replica_count = 3; - config.ballot = BALLOT; - SET_IP_AND_HOST_PORT_BY_DNS(config, primary, PRIMARY_HP); - SET_IPS_AND_HOST_PORTS_BY_DNS(config, secondaries, SECONDARY1_HP, SECONDARY2_HP); - app->partitions.clear(); - app->partitions.emplace_back(config); + partition_configuration pc; + pc.pid = gpid(app->app_id, 0); + pc.max_replica_count = 3; + pc.ballot = BALLOT; + SET_IP_AND_HOST_PORT_BY_DNS(pc, primary, PRIMARY_HP); + SET_IPS_AND_HOST_PORTS_BY_DNS(pc, secondaries, SECONDARY1_HP, SECONDARY2_HP); + app->pcs.clear(); + app->pcs.emplace_back(pc); mock_meta_bulk_load_context(app->app_id, app->partition_count, status); - return config.pid; + return pc.pid; } bool check_partition_status(const std::string name, @@ -194,18 +195,18 @@ class bulk_load_service_test : public meta_test_base { std::shared_ptr app = find_app(name); if (mock_primary_invalid) { - RESET_IP_AND_HOST_PORT(app->partitions[pid.get_partition_index()], primary); + RESET_IP_AND_HOST_PORT(app->pcs[pid.get_partition_index()], primary); } if (mock_lack_secondary) { - CLEAR_IP_AND_HOST_PORT(app->partitions[pid.get_partition_index()], secondaries); + CLEAR_IP_AND_HOST_PORT(app->pcs[pid.get_partition_index()], secondaries); } - partition_configuration pconfig; + partition_configuration pc; bool flag = bulk_svc().check_partition_status( name, pid, always_unhealthy_check, std::bind(&bulk_load_service_test::mock_partition_bulk_load, this, name, pid), - pconfig); + pc); wait_all(); return flag; } @@ -233,22 +234,22 @@ class bulk_load_service_test : public meta_test_base bool same) { set_partition_bulk_load_info(pid, ever_ingest_succeed); - partition_configuration config; - config.pid = pid; - SET_IP_AND_HOST_PORT_BY_DNS(config, primary, PRIMARY_HP); + partition_configuration pc; + pc.pid = pid; + SET_IP_AND_HOST_PORT_BY_DNS(pc, primary, PRIMARY_HP); if (same) { - ADD_IP_AND_HOST_PORT_BY_DNS(config, secondaries, SECONDARY1_HP); - ADD_IP_AND_HOST_PORT_BY_DNS(config, secondaries, SECONDARY2_HP); + ADD_IP_AND_HOST_PORT_BY_DNS(pc, secondaries, SECONDARY1_HP); + ADD_IP_AND_HOST_PORT_BY_DNS(pc, secondaries, SECONDARY2_HP); } else { - ADD_IP_AND_HOST_PORT_BY_DNS(config, secondaries, SECONDARY1_HP); + ADD_IP_AND_HOST_PORT_BY_DNS(pc, secondaries, SECONDARY1_HP); if (secondary_count == 2) { - ADD_IP_AND_HOST_PORT_BY_DNS(config, secondaries, SECONDARY3_HP); + ADD_IP_AND_HOST_PORT_BY_DNS(pc, secondaries, SECONDARY3_HP); } else if (secondary_count >= 3) { - ADD_IP_AND_HOST_PORT_BY_DNS(config, secondaries, SECONDARY2_HP); - ADD_IP_AND_HOST_PORT_BY_DNS(config, secondaries, SECONDARY3_HP); + ADD_IP_AND_HOST_PORT_BY_DNS(pc, secondaries, SECONDARY2_HP); + ADD_IP_AND_HOST_PORT_BY_DNS(pc, secondaries, SECONDARY3_HP); } } - auto flag = bulk_svc().check_ever_ingestion_succeed(config, APP_NAME, pid); + auto flag = bulk_svc().check_ever_ingestion_succeed(pc, APP_NAME, pid); wait_all(); return flag; } @@ -315,9 +316,9 @@ class bulk_load_service_test : public meta_test_base fail::cfg("ingestion_try_partition_ingestion", "return()"); config_context cc; for (auto i = 0; i < count; i++) { - partition_configuration config; - config.pid = gpid(app_id, i); - bulk_svc().try_partition_ingestion(config, cc); + partition_configuration pc; + pc.pid = gpid(app_id, i); + bulk_svc().try_partition_ingestion(pc, cc); } } @@ -382,14 +383,12 @@ class bulk_load_service_test : public meta_test_base &partition_bulk_load_info_map, &pinfo_map]() { for (const auto app_id : app_id_set) { - auto app_iter = app_bulk_load_info_map.find(app_id); - auto partition_iter = partition_bulk_load_info_map.find(app_id); - if (app_iter != app_bulk_load_info_map.end()) { + const auto *app = gutil::FindOrNull(app_bulk_load_info_map, app_id); + if (app != nullptr) { + const auto *partition = + gutil::FindOrNull(partition_bulk_load_info_map, app_id); mock_app_bulk_load_info_on_remote_storage( - app_iter->second, - partition_iter == partition_bulk_load_info_map.end() - ? pinfo_map - : partition_iter->second); + *app, partition == nullptr ? pinfo_map : *partition); } } }); @@ -452,11 +451,11 @@ class bulk_load_service_test : public meta_test_base [this, &info]() { LOG_INFO("create app({}) app_id={}, dir succeed", info.app_name, info.app_id); for (int i = 0; i < info.partition_count; ++i) { - partition_configuration config; - config.max_replica_count = 3; - config.pid = gpid(info.app_id, i); - config.ballot = BALLOT; - blob v = json::json_forwarder::encode(config); + partition_configuration pc; + pc.max_replica_count = 3; + pc.pid = gpid(info.app_id, i); + pc.ballot = BALLOT; + blob v = json::json_forwarder::encode(pc); _ms->get_meta_storage()->create_node( _app_root + "/" + boost::lexical_cast(info.app_id) + "/" + boost::lexical_cast(i), @@ -492,12 +491,13 @@ class bulk_load_service_test : public meta_test_base bool is_app_bulk_load_states_reset(int32_t app_id) { - return bulk_svc()._bulk_load_app_id.find(app_id) == bulk_svc()._bulk_load_app_id.end(); + return !gutil::ContainsKey(bulk_svc()._bulk_load_app_id, app_id); } meta_op_status get_op_status() { return _ms->get_op_status(); } void unlock_meta_op_status() { return _ms->unlock_meta_op_status(); } + public: int32_t APP_ID = 1; std::string APP_NAME = "bulk_load_test"; @@ -798,7 +798,7 @@ class bulk_load_process_test : public bulk_load_service_test if (!_resp.__isset.hp_group_bulk_load_state) { _resp.__set_hp_group_bulk_load_state({}); } - for (const auto & [ addr_and_hp, state ] : state_by_hosts) { + for (const auto &[addr_and_hp, state] : state_by_hosts) { _resp.group_bulk_load_state[addr_and_hp.first] = state; _resp.hp_group_bulk_load_state[addr_and_hp.second] = state; } diff --git a/src/meta/test/meta_data.cpp b/src/meta/test/meta_data.cpp index c32f33e866..5573989205 100644 --- a/src/meta/test/meta_data.cpp +++ b/src/meta/test/meta_data.cpp @@ -35,9 +35,9 @@ #include "meta/meta_data.h" #include "metadata_types.h" #include "misc/misc.h" -#include "runtime/rpc/dns_resolver.h" // IWYU pragma: keep -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/dns_resolver.h" // IWYU pragma: keep +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" using namespace dsn::replication; @@ -109,7 +109,7 @@ static bool vec_equal(const std::vector &vec1, TEST(meta_data, collect_replica) { - app_mapper app; + app_mapper apps; node_mapper nodes; dsn::app_info info; @@ -120,16 +120,16 @@ TEST(meta_data, collect_replica) info.app_type = "test"; info.max_replica_count = 3; info.partition_count = 1024; - std::shared_ptr the_app = app_state::create(info); - app.emplace(the_app->app_id, the_app); - meta_view view = {&app, &nodes}; + std::shared_ptr app = app_state::create(info); + apps.emplace(app->app_id, app); + meta_view view = {&apps, &nodes}; replica_info rep; rep.app_type = "test"; rep.pid = dsn::gpid(1, 0); - dsn::partition_configuration &pc = *get_config(app, rep.pid); - config_context &cc = *get_config_context(app, rep.pid); + auto &pc = *get_config(apps, rep.pid); + auto &cc = *get_config_context(apps, rep.pid); std::vector node_list; generate_node_list(node_list, 10, 10); @@ -352,7 +352,7 @@ TEST(meta_data, collect_replica) TEST(meta_data, construct_replica) { - app_mapper app; + app_mapper apps; node_mapper nodes; dsn::app_info info; @@ -363,16 +363,16 @@ TEST(meta_data, construct_replica) info.app_type = "test"; info.max_replica_count = 3; info.partition_count = 1024; - std::shared_ptr the_app = app_state::create(info); - app.emplace(the_app->app_id, the_app); - meta_view view = {&app, &nodes}; + std::shared_ptr app = app_state::create(info); + apps.emplace(app->app_id, app); + meta_view view = {&apps, &nodes}; replica_info rep; rep.app_type = "test"; rep.pid = dsn::gpid(1, 0); - dsn::partition_configuration &pc = *get_config(app, rep.pid); - config_context &cc = *get_config_context(app, rep.pid); + dsn::partition_configuration &pc = *get_config(apps, rep.pid); + config_context &cc = *get_config_context(apps, rep.pid); std::vector node_list; generate_node_list(node_list, 10, 10); diff --git a/src/meta/test/meta_duplication_service_test.cpp b/src/meta/test/meta_duplication_service_test.cpp index 78c0de6610..af312fa9e7 100644 --- a/src/meta/test/meta_duplication_service_test.cpp +++ b/src/meta/test/meta_duplication_service_test.cpp @@ -57,8 +57,9 @@ #include "meta/server_state.h" #include "meta/test/misc/misc.h" #include "meta_test_base.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "runtime/api_layer1.h" #include "utils/blob.h" #include "utils/error_code.h" #include "utils/fail_point.h" @@ -118,6 +119,12 @@ class meta_duplication_service_test : public meta_test_base return create_dup(app_name, remote_cluster, app_name, remote_replica_count); } + duplication_add_response create_dup(const std::string &app_name, + const int32_t remote_replica_count) + { + return create_dup(app_name, kTestRemoteClusterName, remote_replica_count); + } + duplication_add_response create_dup(const std::string &app_name) { return create_dup(app_name, kTestRemoteClusterName, kTestRemoteReplicaCount); @@ -399,7 +406,7 @@ class meta_duplication_service_test : public meta_test_base struct TestData { std::string app_name; - std::string remote; + std::string remote_cluster_name; bool specified; std::string remote_app_name; @@ -414,13 +421,14 @@ class meta_duplication_service_test : public meta_test_base kTestRemoteAppName, kTestRemoteReplicaCount, ERR_OK}, - // A duplication that has been added would be found with its original remote_app_name. + // Add a duplication that has been existing for the same table with the same remote + // cluster. {kTestAppName, kTestRemoteClusterName, - true, + false, kTestRemoteAppName, kTestRemoteReplicaCount, - ERR_OK}, + ERR_DUP_EXIST}, // The general case that duplicating to remote cluster with same remote_app_name. {kTestSameAppName, kTestRemoteClusterName, @@ -437,7 +445,7 @@ class meta_duplication_service_test : public meta_test_base ERR_INVALID_PARAMETERS}, // Duplicating to local cluster is not allowed. {kTestAppName, - get_current_cluster_name(), + get_current_dup_cluster_name(), true, kTestRemoteAppName, kTestRemoteReplicaCount, @@ -477,10 +485,12 @@ class meta_duplication_service_test : public meta_test_base for (auto test : tests) { duplication_add_response resp; if (test.specified) { - resp = create_dup( - test.app_name, test.remote, test.remote_app_name, test.remote_replica_count); + resp = create_dup(test.app_name, + test.remote_cluster_name, + test.remote_app_name, + test.remote_replica_count); } else { - resp = create_dup_unspecified(test.app_name, test.remote); + resp = create_dup_unspecified(test.app_name, test.remote_cluster_name); } ASSERT_EQ(test.wec, resp.err); @@ -494,7 +504,7 @@ class meta_duplication_service_test : public meta_test_base ASSERT_TRUE(dup != nullptr); ASSERT_EQ(app->app_id, dup->app_id); ASSERT_EQ(duplication_status::DS_PREPARE, dup->_status); - ASSERT_EQ(test.remote, dup->remote_cluster_name); + ASSERT_EQ(test.remote_cluster_name, dup->remote_cluster_name); ASSERT_EQ(test.remote_app_name, resp.remote_app_name); ASSERT_EQ(test.remote_app_name, dup->remote_app_name); ASSERT_EQ(test.remote_replica_count, resp.remote_replica_count); @@ -524,23 +534,24 @@ TEST_F(meta_duplication_service_test, dup_op_upon_unavail_app) create_app(test_app_unavail); find_app(test_app_unavail)->status = app_status::AS_DROPPED; - dupid_t test_dup = create_dup(kTestAppName).dupid; - struct TestData { std::string app; - error_code wec; } tests[] = { {test_app_not_exist, ERR_APP_NOT_EXIST}, {test_app_unavail, ERR_APP_NOT_EXIST}, - {kTestAppName, ERR_OK}, }; for (auto test : tests) { + const auto &resp = create_dup(test.app); + ASSERT_EQ(test.wec, resp.err); + ASSERT_EQ(test.wec, query_dup_info(test.app).err); - ASSERT_EQ(test.wec, create_dup(test.app).err); + + // For the response with some error, `dupid` doesn't matter. + dupid_t test_dup = test.wec == ERR_OK ? resp.dupid : static_cast(dsn_now_s()); ASSERT_EQ(test.wec, change_dup_status(test.app, test_dup, duplication_status::DS_REMOVED).err); } @@ -669,7 +680,7 @@ TEST_F(meta_duplication_service_test, duplication_sync) auto app = find_app(test_app); // generate all primaries on node[0] - for (partition_configuration &pc : app->partitions) { + for (auto &pc : app->pcs) { pc.ballot = random32(1, 10000); SET_IP_AND_HOST_PORT_BY_DNS(pc, primary, server_nodes[0]); SET_IPS_AND_HOST_PORTS_BY_DNS(pc, secondaries, server_nodes[1], server_nodes[2]); @@ -892,7 +903,7 @@ TEST_F(meta_duplication_service_test, fail_mode) // ensure dup_sync will synchronize fail_mode const auto hp = generate_node_list(3)[0]; - for (partition_configuration &pc : app->partitions) { + for (auto &pc : app->pcs) { SET_IP_AND_HOST_PORT_BY_DNS(pc, primary, hp); } initialize_node_state(); @@ -958,39 +969,99 @@ TEST_F(meta_duplication_service_test, check_follower_app_if_create_completed) { struct test_case { + int32_t remote_replica_count; std::vector fail_cfg_name; std::vector fail_cfg_action; bool is_altering; duplication_status::type cur_status; duplication_status::type next_status; - } test_cases[] = {{{"create_app_ok"}, - {"void()"}, + } test_cases[] = {// 3 remote replicas with both primary and secondaries valid. + {3, + {"create_app_ok"}, + {"void(true,2,0)"}, false, duplication_status::DS_LOG, duplication_status::DS_INIT}, - // the case just `palace holder`, actually - // `check_follower_app_if_create_completed` is failed by default in unit test - {{"create_app_failed"}, + // 3 remote replicas with primary invalid and all secondaries valid. + {3, + {"create_app_ok"}, + {"void(false,2,0)"}, + false, + duplication_status::DS_APP, + duplication_status::DS_INIT}, + // 3 remote replicas with primary valid and only one secondary present + // and valid. + {3, + {"create_app_ok"}, + {"void(true,1,0)"}, + false, + duplication_status::DS_LOG, + duplication_status::DS_INIT}, + // 3 remote replicas with primary valid and one secondary invalid. + {3, + {"create_app_ok"}, + {"void(true,1,1)"}, + false, + duplication_status::DS_APP, + duplication_status::DS_INIT}, + // 3 remote replicas with primary valid and only one secondary present + // and invalid. + {3, + {"create_app_ok"}, + {"void(true,0,1)"}, + false, + duplication_status::DS_APP, + duplication_status::DS_INIT}, + // 3 remote replicas with primary valid and both secondaries absent. + {3, + {"create_app_ok"}, + {"void(true,0,0)"}, + false, + duplication_status::DS_APP, + duplication_status::DS_INIT}, + // 1 remote replicas with primary valid. + {1, + {"create_app_ok"}, + {"void(true,0,0)"}, + false, + duplication_status::DS_LOG, + duplication_status::DS_INIT}, + // 1 remote replicas with primary invalid. + {1, + {"create_app_ok"}, + {"void(false,0,0)"}, + false, + duplication_status::DS_APP, + duplication_status::DS_INIT}, + // The case is just a "palace holder", actually + // `check_follower_app_if_create_completed` would fail by default + // in unit test. + {3, + {"create_app_failed"}, {"off()"}, false, duplication_status::DS_APP, duplication_status::DS_INIT}, - {{"create_app_ok", "persist_dup_status_failed"}, - {"void()", "return()"}, + {3, + {"create_app_ok", "persist_dup_status_failed"}, + {"void(true,2,0)", "return()"}, true, duplication_status::DS_APP, duplication_status::DS_LOG}}; + size_t i = 0; for (const auto &test : test_cases) { - const auto test_app = fmt::format("{}{}", test.fail_cfg_name[0], test.fail_cfg_name.size()); - create_app(test_app); - auto app = find_app(test_app); + const auto &app_name = fmt::format("check_follower_app_if_create_completed_test_{}", i++); + create_app(app_name); - auto dup_add_resp = create_dup(test_app); + auto app = find_app(app_name); + auto dup_add_resp = create_dup(app_name, test.remote_replica_count); auto dup = app->duplications[dup_add_resp.dupid]; + // 'check_follower_app_if_create_completed' must execute under duplication_status::DS_APP, - // so force update it + // so force update it. force_update_dup_status(dup, duplication_status::DS_APP); + fail::setup(); for (int i = 0; i < test.fail_cfg_name.size(); i++) { fail::cfg(test.fail_cfg_name[i], test.fail_cfg_action[i]); @@ -998,6 +1069,7 @@ TEST_F(meta_duplication_service_test, check_follower_app_if_create_completed) check_follower_app_if_create_completed(dup); wait_all(); fail::teardown(); + ASSERT_EQ(dup->is_altering(), test.is_altering); ASSERT_EQ(next_status(dup), test.next_status); ASSERT_EQ(dup->status(), test.cur_status); diff --git a/src/meta/test/meta_http_service_test.cpp b/src/meta/test/meta_http_service_test.cpp index cadcdffc63..940d912708 100644 --- a/src/meta/test/meta_http_service_test.cpp +++ b/src/meta/test/meta_http_service_test.cpp @@ -39,10 +39,10 @@ #include "meta/meta_state_service.h" #include "meta_service_test_app.h" #include "meta_test_base.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_message.h" +#include "task/task.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/chrono_literals.h" diff --git a/src/meta/test/meta_mauanl_compaction_test.cpp b/src/meta/test/meta_mauanl_compaction_test.cpp index 6853d34e4f..02c7c82b1e 100644 --- a/src/meta/test/meta_mauanl_compaction_test.cpp +++ b/src/meta/test/meta_mauanl_compaction_test.cpp @@ -51,7 +51,7 @@ class meta_app_compaction_test : public meta_test_base { create_app(APP_NAME, PARTITION_COUNT); auto app = find_app(APP_NAME); - app->partitions.resize(PARTITION_COUNT); + app->pcs.resize(PARTITION_COUNT); app->helpers->contexts.resize(PARTITION_COUNT); for (auto i = 0; i < PARTITION_COUNT; ++i) { serving_replica rep; diff --git a/src/meta/test/meta_partition_guardian_test.cpp b/src/meta/test/meta_partition_guardian_test.cpp index dbc3bf08e1..931433a754 100644 --- a/src/meta/test/meta_partition_guardian_test.cpp +++ b/src/meta/test/meta_partition_guardian_test.cpp @@ -24,6 +24,7 @@ * THE SOFTWARE. */ +// IWYU pragma: no_include #include #include #include @@ -54,13 +55,13 @@ #include "meta_service_test_app.h" #include "meta_test_base.h" #include "metadata_types.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" +#include "rpc/dns_resolver.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" +#include "task/async_calls.h" +#include "task/task.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/filesystem.h" @@ -207,8 +208,8 @@ void meta_partition_guardian_test::cure_test() std::vector nodes; generate_node_list(nodes, 4, 4); - dsn::partition_configuration &pc = app->partitions[0]; - config_context &cc = *get_config_context(state->_all_apps, dsn::gpid(1, 0)); + auto &pc = app->pcs[0]; + auto &cc = *get_config_context(state->_all_apps, dsn::gpid(1, 0)); #define PROPOSAL_FLAG_CHECK \ ASSERT_TRUE(proposal_sent); \ @@ -797,7 +798,7 @@ void meta_partition_guardian_test::cure() std::vector nodes_list; generate_node_list(nodes_list, 20, 100); - app_mapper app; + app_mapper apps; node_mapper nodes; meta_service svc; partition_guardian guardian(&svc); @@ -810,9 +811,9 @@ void meta_partition_guardian_test::cure() info.app_type = "test"; info.max_replica_count = 3; info.partition_count = 1024; - std::shared_ptr the_app = app_state::create(info); + std::shared_ptr app = app_state::create(info); - app.emplace(the_app->app_id, the_app); + apps.emplace(app->app_id, app); for (const auto &hp : nodes_list) { get_node_state(nodes, hp, true)->set_alive(true); } @@ -823,21 +824,21 @@ void meta_partition_guardian_test::cure() pc_status status; all_partitions_healthy = true; - for (int i = 0; i != the_app->partition_count; ++i) { - dsn::gpid &pid = the_app->partitions[i].pid; - status = guardian.cure({&app, &nodes}, pid, action); + CHECK_EQ(app->partition_count, app->pcs.size()); + for (const auto &pc : app->pcs) { + status = guardian.cure({&apps, &nodes}, pc.pid, action); if (status != pc_status::healthy) { all_partitions_healthy = false; - proposal_action_check_and_apply(action, pid, app, nodes, nullptr); + proposal_action_check_and_apply(action, pc.pid, apps, nodes, nullptr); configuration_update_request fake_request; - fake_request.info = *the_app; - fake_request.config = the_app->partitions[i]; + fake_request.info = *app; + fake_request.config = pc; fake_request.type = action.type; SET_OBJ_IP_AND_HOST_PORT(fake_request, node, action, node); fake_request.host_node = action.node; - guardian.reconfig({&app, &nodes}, fake_request); + guardian.reconfig({&apps, &nodes}, fake_request); check_nodes_loads(nodes); } } @@ -849,7 +850,7 @@ void meta_partition_guardian_test::from_proposal_test() std::vector nodes_list; generate_node_list(nodes_list, 3, 3); - app_mapper app; + app_mapper apps; node_mapper nodes; meta_service svc; @@ -863,20 +864,20 @@ void meta_partition_guardian_test::from_proposal_test() info.app_type = "test"; info.max_replica_count = 3; info.partition_count = 1; - std::shared_ptr the_app = app_state::create(info); + std::shared_ptr app = app_state::create(info); - app.emplace(the_app->app_id, the_app); + apps.emplace(app->app_id, app); for (const auto &hp : nodes_list) { get_node_state(nodes, hp, true)->set_alive(true); } - meta_view mv{&app, &nodes}; + meta_view mv{&apps, &nodes}; dsn::gpid p(1, 0); configuration_proposal_action cpa; configuration_proposal_action cpa2; - dsn::partition_configuration &pc = *get_config(app, p); - config_context &cc = *get_config_context(app, p); + dsn::partition_configuration &pc = *get_config(apps, p); + config_context &cc = *get_config_context(apps, p); std::cerr << "Case 1: test no proposals in config_context" << std::endl; ASSERT_FALSE(guardian.from_proposals(mv, p, cpa)); diff --git a/src/meta/test/meta_service_test.cpp b/src/meta/test/meta_service_test.cpp index 802ca62bd9..dc727d8026 100644 --- a/src/meta/test/meta_service_test.cpp +++ b/src/meta/test/meta_service_test.cpp @@ -26,12 +26,12 @@ #include "meta/meta_service.h" #include "meta_admin_types.h" #include "meta_test_base.h" -#include "runtime/rpc/network.sim.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" +#include "rpc/network.sim.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/fail_point.h" diff --git a/src/meta/test/meta_service_test_app.h b/src/meta/test/meta_service_test_app.h index b755755919..1171315502 100644 --- a/src/meta/test/meta_service_test_app.h +++ b/src/meta/test/meta_service_test_app.h @@ -34,14 +34,16 @@ #include "utils/api_utilities.h" #include "utils/error_code.h" #include "utils/threadpool_code.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "common/gpid.h" -#include "runtime/rpc/serialization.h" -#include "runtime/rpc/rpc_stream.h" +#include "rpc/serialization.h" +#include "rpc/rpc_stream.h" #include "runtime/serverlet.h" #include "runtime/service_app.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/task/async_calls.h" +#include "rpc/rpc_address.h" +#include "task/async_calls.h" +#include "rpc/rpc_address.h" +#include "task/async_calls.h" #include "meta_admin_types.h" #include "partition_split_types.h" #include "duplication_types.h" diff --git a/src/meta/test/meta_split_service_test.cpp b/src/meta/test/meta_split_service_test.cpp index f881500d44..c63c7aa4e7 100644 --- a/src/meta/test/meta_split_service_test.cpp +++ b/src/meta/test/meta_split_service_test.cpp @@ -59,8 +59,8 @@ #include "meta_test_base.h" #include "metadata_types.h" #include "partition_split_types.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" #include "utils/blob.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" @@ -128,16 +128,16 @@ class meta_split_service_test : public meta_test_base error_code register_child(int32_t parent_index, ballot req_parent_ballot, bool wait_zk) { - partition_configuration parent_config; - parent_config.ballot = req_parent_ballot; - parent_config.last_committed_decree = 5; - parent_config.max_replica_count = 3; - parent_config.pid = gpid(app->app_id, parent_index); + partition_configuration parent_pc; + parent_pc.ballot = req_parent_ballot; + parent_pc.last_committed_decree = 5; + parent_pc.max_replica_count = 3; + parent_pc.pid = gpid(app->app_id, parent_index); - partition_configuration child_config; - child_config.ballot = PARENT_BALLOT + 1; - child_config.last_committed_decree = 5; - child_config.pid = gpid(app->app_id, parent_index + PARTITION_COUNT); + partition_configuration child_pc; + child_pc.ballot = PARENT_BALLOT + 1; + child_pc.last_committed_decree = 5; + child_pc.pid = gpid(app->app_id, parent_index + PARTITION_COUNT); // mock node state node_state node; @@ -147,8 +147,8 @@ class meta_split_service_test : public meta_test_base auto request = std::make_unique(); request->app.app_name = app->app_name; request->app.app_id = app->app_id; - request->parent_config = parent_config; - request->child_config = child_config; + request->parent_config = parent_pc; + request->child_config = child_pc; SET_IP_AND_HOST_PORT_BY_DNS(*request, primary, NODE); register_child_rpc rpc(std::move(request), RPC_CM_REGISTER_CHILD_REPLICA); @@ -207,17 +207,17 @@ class meta_split_service_test : public meta_test_base void mock_app_partition_split_context() { app->partition_count = NEW_PARTITION_COUNT; - app->partitions.resize(app->partition_count); + app->pcs.resize(app->partition_count); _ss->get_table_metric_entities().resize_partitions(app->app_id, app->partition_count); app->helpers->contexts.resize(app->partition_count); app->helpers->split_states.splitting_count = app->partition_count / 2; for (int i = 0; i < app->partition_count; ++i) { - app->helpers->contexts[i].config_owner = &app->partitions[i]; - app->partitions[i].pid = gpid(app->app_id, i); + app->helpers->contexts[i].pc = &app->pcs[i]; + app->pcs[i].pid = gpid(app->app_id, i); if (i >= app->partition_count / 2) { - app->partitions[i].ballot = invalid_ballot; + app->pcs[i].ballot = invalid_ballot; } else { - app->partitions[i].ballot = PARENT_BALLOT; + app->pcs[i].ballot = PARENT_BALLOT; app->helpers->contexts[i].stage = config_status::not_pending; app->helpers->split_states.status[i] = split_status::SPLITTING; } @@ -227,7 +227,7 @@ class meta_split_service_test : public meta_test_base void clear_app_partition_split_context() { app->partition_count = PARTITION_COUNT; - app->partitions.resize(app->partition_count); + app->pcs.resize(app->partition_count); _ss->get_table_metric_entities().resize_partitions(app->app_id, app->partition_count); app->helpers->contexts.resize(app->partition_count); app->helpers->split_states.splitting_count = 0; @@ -237,16 +237,16 @@ class meta_split_service_test : public meta_test_base void mock_only_one_partition_split(split_status::type split_status) { app->partition_count = NEW_PARTITION_COUNT; - app->partitions.resize(app->partition_count); + app->pcs.resize(app->partition_count); _ss->get_table_metric_entities().resize_partitions(app->app_id, app->partition_count); app->helpers->contexts.resize(app->partition_count); for (int i = 0; i < app->partition_count; ++i) { - app->helpers->contexts[i].config_owner = &app->partitions[i]; - app->partitions[i].pid = dsn::gpid(app->app_id, i); + app->helpers->contexts[i].pc = &app->pcs[i]; + app->pcs[i].pid = dsn::gpid(app->app_id, i); if (i >= app->partition_count / 2) { - app->partitions[i].ballot = invalid_ballot; + app->pcs[i].ballot = invalid_ballot; } else { - app->partitions[i].ballot = PARENT_BALLOT; + app->pcs[i].ballot = PARENT_BALLOT; app->helpers->contexts[i].stage = config_status::not_pending; } } @@ -256,7 +256,7 @@ class meta_split_service_test : public meta_test_base void mock_child_registered() { - app->partitions[CHILD_INDEX].ballot = PARENT_BALLOT; + app->pcs[CHILD_INDEX].ballot = PARENT_BALLOT; app->helpers->split_states.splitting_count--; app->helpers->split_states.status.erase(PARENT_INDEX); } @@ -358,11 +358,11 @@ class meta_split_service_test : public meta_test_base const int32_t app_id, const int32_t pidx) { - partition_configuration config; - config.max_replica_count = 3; - config.pid = gpid(app_id, pidx); - config.ballot = PARENT_BALLOT; - blob value = json::json_forwarder::encode(config); + partition_configuration pc; + pc.max_replica_count = 3; + pc.pid = gpid(app_id, pidx); + pc.ballot = PARENT_BALLOT; + blob value = json::json_forwarder::encode(pc); _ms->get_meta_storage()->create_node( app_root + "/" + boost::lexical_cast(app_id) + "/" + boost::lexical_cast(pidx), @@ -846,7 +846,7 @@ TEST_F(meta_split_service_failover_test, half_split_test) ASSERT_EQ(split_states.splitting_count, PARTITION_COUNT - 1); ASSERT_EQ(split_states.status.find(PARENT_INDEX), split_states.status.end()); ASSERT_EQ(app->partition_count, NEW_PARTITION_COUNT); - ASSERT_EQ(app->partitions.size(), NEW_PARTITION_COUNT); + ASSERT_EQ(app->pcs.size(), NEW_PARTITION_COUNT); } } // namespace replication diff --git a/src/meta/test/meta_state/meta_state_service.cpp b/src/meta/test/meta_state/meta_state_service.cpp index 90b44c1888..4bde7f5120 100644 --- a/src/meta/test/meta_state/meta_state_service.cpp +++ b/src/meta/test/meta_state/meta_state_service.cpp @@ -34,7 +34,7 @@ #include "meta/meta_state_service_simple.h" #include "meta/meta_state_service_zookeeper.h" #include "runtime/service_app.h" -#include "runtime/task/task_tracker.h" +#include "task/task_tracker.h" #include "test_util/test_util.h" #include "utils/binary_reader.h" #include "utils/binary_writer.h" diff --git a/src/meta/test/meta_state/run.sh b/src/meta/test/meta_state/run.sh index 62db6b8d26..cb8ad502d0 100755 --- a/src/meta/test/meta_state/run.sh +++ b/src/meta/test/meta_state/run.sh @@ -40,9 +40,9 @@ while read -r -a line; do echo "run dsn_meta_state_tests $test_case failed" echo "---- ls ----" ls -l - if find . -name log.1.txt; then - echo "---- tail -n 100 log.1.txt ----" - tail -n 100 `find . -name log.1.txt` + if [ `find . -name pegasus.log.* | wc -l` -ne 0 ]; then + echo "---- tail -n 100 pegasus.log.* ----" + tail -n 100 `find . -name pegasus.log.*` fi if [ -f core ]; then echo "---- gdb ./dsn_meta_state_tests core ----" diff --git a/src/meta/test/meta_state_service_utils_test.cpp b/src/meta/test/meta_state_service_utils_test.cpp index fa4cf3deea..0034d02360 100644 --- a/src/meta/test/meta_state_service_utils_test.cpp +++ b/src/meta/test/meta_state_service_utils_test.cpp @@ -33,7 +33,7 @@ #include "gtest/gtest.h" #include "meta/meta_state_service.h" #include "meta/meta_state_service_utils.h" -#include "runtime/task/task_tracker.h" +#include "task/task_tracker.h" #include "utils/binary_reader.h" #include "utils/binary_writer.h" #include "utils/blob.h" diff --git a/src/meta/test/meta_test_base.cpp b/src/meta/test/meta_test_base.cpp index 965939d566..db136d3913 100644 --- a/src/meta/test/meta_test_base.cpp +++ b/src/meta/test/meta_test_base.cpp @@ -38,9 +38,9 @@ #include "meta/server_state.h" #include "meta/test/misc/misc.h" #include "meta_service_test_app.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task_tracker.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "task/task_tracker.h" #include "utils/error_code.h" #include "utils/factory_store.h" #include "utils/filesystem.h" @@ -182,14 +182,16 @@ std::vector meta_test_base::ensure_enough_alive_nodes(int min_node_co return nodes; } -void meta_test_base::create_app(const std::string &name, uint32_t partition_count) +void meta_test_base::create_app(const std::string &name, + int32_t partition_count, + int32_t replica_count) { configuration_create_app_request req; configuration_create_app_response resp; req.app_name = name; req.options.app_type = "simple_kv"; req.options.partition_count = partition_count; - req.options.replica_count = 3; + req.options.replica_count = replica_count; req.options.success_if_exist = false; req.options.is_stateful = true; req.options.envs["value_version"] = "1"; diff --git a/src/meta/test/meta_test_base.h b/src/meta/test/meta_test_base.h index 2f27e883a4..25f3eed42f 100644 --- a/src/meta/test/meta_test_base.h +++ b/src/meta/test/meta_test_base.h @@ -57,8 +57,13 @@ class meta_test_base : public testing::Test std::vector ensure_enough_alive_nodes(int min_node_count); - // create an app for test with specified name and specified partition count - void create_app(const std::string &name, uint32_t partition_count); + // Create an app for test with specified name, partition count and replica count. + void create_app(const std::string &name, int32_t partition_count, int32_t replica_count); + + void create_app(const std::string &name, int32_t partition_count) + { + create_app(name, partition_count, 3); + } void create_app(const std::string &name) { create_app(name, 8); } diff --git a/src/meta/test/misc/misc.cpp b/src/meta/test/misc/misc.cpp index 149f0735e1..a3bd371445 100644 --- a/src/meta/test/misc/misc.cpp +++ b/src/meta/test/misc/misc.cpp @@ -27,6 +27,7 @@ #include "misc.h" #include +#include // IWYU pragma: no_include #include #include @@ -47,9 +48,10 @@ #include "duplication_types.h" #include "meta_admin_types.h" #include "metadata_types.h" -#include "runtime/rpc/dns_resolver.h" // IWYU pragma: keep -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/dns_resolver.h" // IWYU pragma: keep +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "utils/filesystem.h" #include "utils/fmt_logging.h" #include "utils/rand.h" @@ -76,11 +78,11 @@ void verbose_apps(const app_mapper &input_apps) for (const auto &apps : input_apps) { const std::shared_ptr &app = apps.second; std::cout << apps.first << " " << app->partition_count << std::endl; - for (int i = 0; i < app->partition_count; ++i) { - std::cout << app->partitions[i].hp_secondaries.size() + 1 << " " - << app->partitions[i].hp_primary; - for (int j = 0; j < app->partitions[i].hp_secondaries.size(); ++j) { - std::cout << " " << app->partitions[i].hp_secondaries[j]; + CHECK_EQ(app->partition_count, app->pcs.size()); + for (const auto &pc : app->pcs) { + std::cout << pc.hp_secondaries.size() + 1 << " " << pc.hp_primary; + for (const auto &secondary : pc.hp_secondaries) { + std::cout << " " << secondary; } std::cout << std::endl; } @@ -99,7 +101,7 @@ void generate_node_mapper( for (auto &kv : input_apps) { const std::shared_ptr &app = kv.second; - for (const dsn::partition_configuration &pc : app->partitions) { + for (const auto &pc : app->pcs) { node_state *ns; if (pc.hp_primary) { ns = get_node_state(output_nodes, pc.hp_primary, true); @@ -117,7 +119,7 @@ void generate_node_mapper( void generate_app(/*out*/ std::shared_ptr &app, const std::vector &node_list) { - for (dsn::partition_configuration &pc : app->partitions) { + for (auto &pc : app->pcs) { pc.ballot = random32(1, 10000); std::vector indices(3, 0); indices[0] = random32(0, node_list.size() - 3); @@ -145,18 +147,18 @@ void generate_app_serving_replica_info(/*out*/ std::shared_ptrpartition_count; ++i) { - config_context &cc = app->helpers->contexts[i]; - dsn::partition_configuration &pc = app->partitions[i]; + auto &cc = app->helpers->contexts[i]; + auto &pc = app->pcs[i]; replica_info ri; snprintf(buffer, 256, "disk%u", dsn::rand::next_u32(1, total_disks)); ri.disk_tag = buffer; cc.collect_serving_replica(pc.hp_primary, ri); - for (const auto &hp : pc.hp_secondaries) { + for (const auto &secondary : pc.hp_secondaries) { snprintf(buffer, 256, "disk%u", dsn::rand::next_u32(1, total_disks)); ri.disk_tag = buffer; - cc.collect_serving_replica(hp, ri); + cc.collect_serving_replica(secondary, ri); } } } @@ -178,14 +180,14 @@ void generate_apps(/*out*/ dsn::replication::app_mapper &mapper, info.app_type = "test"; info.max_replica_count = 3; info.partition_count = random32(partitions_range.first, partitions_range.second); - std::shared_ptr the_app = app_state::create(info); - generate_app(the_app, node_list); + std::shared_ptr app = app_state::create(info); + generate_app(app, node_list); if (generate_serving_info) { - generate_app_serving_replica_info(the_app, disks_per_node); + generate_app_serving_replica_info(app, disks_per_node); } LOG_DEBUG("generated app, partitions({})", info.partition_count); - mapper.emplace(the_app->app_id, the_app); + mapper.emplace(app->app_id, app); } } @@ -195,15 +197,13 @@ void generate_node_fs_manager(const app_mapper &apps, int total_disks) { nfm.clear(); - const char *prefix = "/home/work/"; - char pid_dir[256]; + std::string prefix; + CHECK(dsn::utils::filesystem::get_current_directory(prefix), ""); std::vector data_dirs(total_disks); std::vector tags(total_disks); for (int i = 0; i < data_dirs.size(); ++i) { - snprintf(pid_dir, 256, "%sdisk%d", prefix, i + 1); - data_dirs[i] = pid_dir; - snprintf(pid_dir, 256, "disk%d", i + 1); - tags[i] = pid_dir; + data_dirs[i] = fmt::format("{}disk{}", prefix, i + 1); + tags[i] = fmt::format("disk{}", i + 1); } for (const auto &kv : nodes) { @@ -215,15 +215,13 @@ void generate_node_fs_manager(const app_mapper &apps, manager.initialize(data_dirs, tags); ns.for_each_partition([&](const dsn::gpid &pid) { const config_context &cc = *get_config_context(apps, pid); - snprintf(pid_dir, - 256, - "%s%s/%d.%d.test", - prefix, - cc.find_from_serving(ns.host_port())->disk_tag.c_str(), - pid.get_app_id(), - pid.get_partition_index()); - LOG_DEBUG("concat pid_dir({}) of node({})", pid_dir, ns.host_port()); - manager.add_replica(pid, pid_dir); + const auto dir = fmt::format("{}{}/{}.{}.test", + prefix, + cc.find_from_serving(ns.host_port())->disk_tag, + pid.get_app_id(), + pid.get_partition_index()); + LOG_DEBUG("concat {} of node({})", dir, ns.host_port()); + manager.add_replica(pid, dir); return true; }); } @@ -410,19 +408,17 @@ void proposal_action_check_and_apply(const configuration_proposal_action &act, void migration_check_and_apply(app_mapper &apps, node_mapper &nodes, - migration_list &ml, + const migration_list &ml, nodes_fs_manager *manager) { int i = 0; - for (auto kv = ml.begin(); kv != ml.end(); ++kv) { - std::shared_ptr &proposal = kv->second; + for (const auto &[_, proposal] : ml) { LOG_DEBUG("the {}th round of proposal, gpid({})", i++, proposal->gpid); - std::shared_ptr &the_app = apps.find(proposal->gpid.get_app_id())->second; + const auto &app = apps.find(proposal->gpid.get_app_id())->second; - CHECK_EQ(proposal->gpid.get_app_id(), the_app->app_id); - CHECK_LT(proposal->gpid.get_partition_index(), the_app->partition_count); - dsn::partition_configuration &pc = - the_app->partitions[proposal->gpid.get_partition_index()]; + CHECK_EQ(proposal->gpid.get_app_id(), app->app_id); + CHECK_LT(proposal->gpid.get_partition_index(), app->partition_count); + const auto &pc = app->pcs[proposal->gpid.get_partition_index()]; CHECK(pc.hp_primary, ""); CHECK_EQ(pc.hp_secondaries.size(), 2); @@ -462,7 +458,7 @@ void app_mapper_compare(const app_mapper &mapper1, const app_mapper &mapper2) if (app1->status == dsn::app_status::AS_AVAILABLE) { CHECK_EQ(app1->partition_count, app2->partition_count); for (unsigned int i = 0; i < app1->partition_count; ++i) { - CHECK(is_partition_config_equal(app1->partitions[i], app2->partitions[i]), ""); + CHECK(is_partition_config_equal(app1->pcs[i], app2->pcs[i]), ""); } } } diff --git a/src/meta/test/misc/misc.h b/src/meta/test/misc/misc.h index 45ab38ac1f..bb9a23a881 100644 --- a/src/meta/test/misc/misc.h +++ b/src/meta/test/misc/misc.h @@ -37,7 +37,7 @@ #include #include "meta/meta_data.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" namespace dsn { class gpid; @@ -111,7 +111,7 @@ void generate_apps(/*out*/ dsn::replication::app_mapper &apps, void migration_check_and_apply( /*in-out*/ dsn::replication::app_mapper &apps, /*in-out*/ dsn::replication::node_mapper &nodes, - /*in-out*/ dsn::replication::migration_list &ml, + /*in*/ const dsn::replication::migration_list &ml, /*in-out*/ nodes_fs_manager *manager); // when the test need to track the disk info, please input the fs_manager of all disks, diff --git a/src/meta/test/run.sh b/src/meta/test/run.sh index fe9a116961..a27f485717 100755 --- a/src/meta/test/run.sh +++ b/src/meta/test/run.sh @@ -54,9 +54,9 @@ if [ $? -ne 0 ]; then echo "run dsn.meta.test failed" echo "---- ls ----" ls -l - if find . -name log.1.txt; then - echo "---- tail -n 100 log.1.txt ----" - tail -n 100 `find . -name log.1.txt` + if [ `find . -name pegasus.log.* | wc -l` -ne 0 ]; then + echo "---- tail -n 100 pegasus.log.* ----" + tail -n 100 `find . -name pegasus.log.*` fi if [ -f core ]; then echo "---- gdb ./dsn.meta.test core ----" diff --git a/src/meta/test/server_state_restore_test.cpp b/src/meta/test/server_state_restore_test.cpp index 6c997b69d0..395aa4444d 100644 --- a/src/meta/test/server_state_restore_test.cpp +++ b/src/meta/test/server_state_restore_test.cpp @@ -33,8 +33,8 @@ #include "meta/meta_service.h" #include "meta/server_state.h" #include "meta_test_base.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "utils/blob.h" #include "utils/error_code.h" #include "utils/zlocks.h" diff --git a/src/meta/test/server_state_test.cpp b/src/meta/test/server_state_test.cpp index 7fdee4588d..b77890d314 100644 --- a/src/meta/test/server_state_test.cpp +++ b/src/meta/test/server_state_test.cpp @@ -42,8 +42,8 @@ #include "meta/server_state.h" #include "meta_admin_types.h" #include "meta_service_test_app.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "utils/error_code.h" #include "utils/flags.h" diff --git a/src/meta/test/state_sync_test.cpp b/src/meta/test/state_sync_test.cpp index 7c08181a80..fedb1ab384 100644 --- a/src/meta/test/state_sync_test.cpp +++ b/src/meta/test/state_sync_test.cpp @@ -46,15 +46,15 @@ #include "meta/test/misc/misc.h" #include "meta_admin_types.h" #include "meta_service_test_app.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "task/task.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/flags.h" #include "utils/strings.h" -#include "utils/utils.h" #include "utils/test_macros.h" +#include "utils/utils.h" DSN_DECLARE_string(cluster_root); DSN_DECLARE_string(meta_state_service_type); @@ -75,7 +75,7 @@ static void random_assign_partition_config(std::shared_ptr &app, }; int max_servers = (server_list.size() - 1) * 2 - 1; - for (dsn::partition_configuration &pc : app->partitions) { + for (auto &pc : app->pcs) { int start = 0; std::vector indices; for (int i = 0; i < max_replica_count && start <= max_servers; ++i) { @@ -169,7 +169,7 @@ void meta_service_test_app::state_sync_test() random_assign_partition_config(app, server_list, 3); if (app->status == dsn::app_status::AS_DROPPING) { for (int j = 0; j < app->partition_count; ++j) { - app->partitions[j].partition_flags = pc_flags::dropped; + app->pcs[j].partition_flags = pc_flags::dropped; } } } @@ -218,11 +218,12 @@ void meta_service_test_app::state_sync_test() dsn::error_code ec; dsn::dist::meta_state_service *storage = svc->get_remote_storage(); storage - ->delete_node(apps_root, - true, - LPC_META_CALLBACK, - [&ec](dsn::error_code error) { ec = error; }, - nullptr) + ->delete_node( + apps_root, + true, + LPC_META_CALLBACK, + [&ec](dsn::error_code error) { ec = error; }, + nullptr) ->wait(); ASSERT_TRUE(dsn::ERR_OK == ec || dsn::ERR_OBJECT_NOT_FOUND == ec); } @@ -280,7 +281,7 @@ void meta_service_test_app::state_sync_test() dsn::gpid gpid = {15, 0}; dsn::partition_configuration pc; ASSERT_TRUE(ss2->query_configuration_by_gpid(gpid, pc)); - ASSERT_EQ(ss1->_all_apps[15]->partitions[0], pc); + ASSERT_EQ(ss1->_all_apps[15]->pcs[0], pc); // 1.2 dropped app if (!drop_set.empty()) { gpid.set_app_id(drop_set[0]); @@ -300,7 +301,7 @@ void meta_service_test_app::state_sync_test() ASSERT_EQ(app_created->partition_count, resp.partition_count); ASSERT_EQ(resp.partitions.size(), 3); for (int i = 1; i <= 3; ++i) - ASSERT_EQ(resp.partitions[i - 1], app_created->partitions[i]); + ASSERT_EQ(resp.partitions[i - 1], app_created->pcs[i]); // 2.2 no exist app req.app_name = "make_no_sense"; @@ -341,11 +342,12 @@ void meta_service_test_app::state_sync_test() dsn::dist::meta_state_service *storage = svc->get_remote_storage(); storage - ->delete_node(ss2->get_partition_path(dsn::gpid{apps_count, 0}), - false, - LPC_META_CALLBACK, - [&ec](dsn::error_code error) { ec = error; }, - nullptr) + ->delete_node( + ss2->get_partition_path(dsn::gpid{apps_count, 0}), + false, + LPC_META_CALLBACK, + [&ec](dsn::error_code error) { ec = error; }, + nullptr) ->wait(); ASSERT_EQ(ec, dsn::ERR_OK); diff --git a/src/meta/test/update_configuration_test.cpp b/src/meta/test/update_configuration_test.cpp index d8a947f28e..c0ef9b0d63 100644 --- a/src/meta/test/update_configuration_test.cpp +++ b/src/meta/test/update_configuration_test.cpp @@ -54,13 +54,13 @@ #include "meta_admin_types.h" #include "meta_service_test_app.h" #include "metadata_types.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" +#include "task/async_calls.h" +#include "task/task.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/filesystem.h" @@ -161,8 +161,9 @@ class dummy_partition_guardian : public partition_guardian { action.type = config_type::CT_INVALID; const dsn::partition_configuration &pc = *get_config(*view.apps, gpid); - if (pc.hp_primary && pc.hp_secondaries.size() == 2) + if (pc.hp_primary && pc.hp_secondaries.size() == 2) { return pc_status::healthy; + } return pc_status::ill; } }; @@ -248,12 +249,12 @@ void meta_service_test_app::update_configuration_test() std::vector nodes; generate_node_list(nodes, 4, 4); - dsn::partition_configuration &pc0 = app->partitions[0]; + auto &pc0 = app->pcs[0]; SET_IP_AND_HOST_PORT_BY_DNS(pc0, primary, nodes[0]); SET_IPS_AND_HOST_PORTS_BY_DNS(pc0, secondaries, nodes[1], nodes[2]); pc0.ballot = 3; - dsn::partition_configuration &pc1 = app->partitions[1]; + auto &pc1 = app->pcs[1]; SET_IP_AND_HOST_PORT_BY_DNS(pc1, primary, nodes[1]); SET_IPS_AND_HOST_PORTS_BY_DNS(pc1, secondaries, nodes[0], nodes[2]); pc1.ballot = 3; @@ -326,7 +327,7 @@ void meta_service_test_app::adjust_dropped_size() generate_node_list(nodes, 10, 10); // first, the replica is healthy, and there are 2 dropped - dsn::partition_configuration &pc = app->partitions[0]; + auto &pc = app->pcs[0]; SET_IP_AND_HOST_PORT_BY_DNS(pc, primary, nodes[0]); SET_IPS_AND_HOST_PORTS_BY_DNS(pc, secondaries, nodes[1], nodes[2]); pc.ballot = 10; @@ -394,8 +395,8 @@ static void clone_app_mapper(app_mapper &output, const app_mapper &input) const std::shared_ptr &old_app = iter.second; dsn::app_info info = *old_app; std::shared_ptr new_app = app_state::create(info); - for (unsigned int i = 0; i != old_app->partition_count; ++i) - new_app->partitions[i] = old_app->partitions[i]; + CHECK_EQ(old_app->partition_count, old_app->pcs.size()); + new_app->pcs = old_app->pcs; output.emplace(new_app->app_id, new_app); } } @@ -453,11 +454,11 @@ void meta_service_test_app::apply_balancer_test() ss->set_replica_migration_subscriber_for_test(migration_actions); while (true) { - dsn::task_ptr tsk = - dsn::tasking::enqueue(LPC_META_STATE_NORMAL, - nullptr, - [&result, ss]() { result = ss->check_all_partitions(); }, - server_state::sStateHash); + dsn::task_ptr tsk = dsn::tasking::enqueue( + LPC_META_STATE_NORMAL, + nullptr, + [&result, ss]() { result = ss->check_all_partitions(); }, + server_state::sStateHash); tsk->wait(); if (result) break; @@ -498,12 +499,12 @@ void meta_service_test_app::cannot_run_balancer_test() info.partition_count = 1; info.status = dsn::app_status::AS_AVAILABLE; - std::shared_ptr the_app = app_state::create(info); - svc->_state->_all_apps.emplace(info.app_id, the_app); - svc->_state->_exist_apps.emplace(info.app_name, the_app); + std::shared_ptr app = app_state::create(info); + svc->_state->_all_apps.emplace(info.app_id, app); + svc->_state->_exist_apps.emplace(info.app_name, app); svc->_state->_table_metric_entities.create_entity(info.app_id, info.partition_count); - dsn::partition_configuration &pc = the_app->partitions[0]; + auto &pc = app->pcs[0]; SET_IP_AND_HOST_PORT_BY_DNS(pc, primary, nodes[0]); SET_IPS_AND_HOST_PORTS_BY_DNS(pc, secondaries, nodes[1], nodes[2]); @@ -535,11 +536,11 @@ void meta_service_test_app::cannot_run_balancer_test() // some apps are staging REGENERATE_NODE_MAPPER; - the_app->status = dsn::app_status::AS_DROPPING; + app->status = dsn::app_status::AS_DROPPING; ASSERT_FALSE(svc->_state->check_all_partitions()); // call function can run balancer - the_app->status = dsn::app_status::AS_AVAILABLE; + app->status = dsn::app_status::AS_AVAILABLE; ASSERT_TRUE(svc->_state->can_run_balancer()); // recover original FLAGS_min_live_node_count_for_unfreeze diff --git a/src/nfs/nfs_client_impl.cpp b/src/nfs/nfs_client_impl.cpp index e736b2749f..9ef555e87c 100644 --- a/src/nfs/nfs_client_impl.cpp +++ b/src/nfs/nfs_client_impl.cpp @@ -26,15 +26,17 @@ #include "nfs_client_impl.h" +#include // IWYU pragma: no_include #include -#include "absl/strings/string_view.h" +#include #include "fmt/core.h" #include "nfs/nfs_code_definition.h" #include "nfs/nfs_node.h" -#include "runtime/rpc/dns_resolver.h" // IWYU pragma: keep -#include "runtime/rpc/rpc_host_port.h" +#include "nlohmann/json.hpp" +#include "rpc/dns_resolver.h" // IWYU pragma: keep +#include "rpc/rpc_host_port.h" #include "utils/blob.h" #include "utils/command_manager.h" #include "utils/filesystem.h" @@ -149,12 +151,13 @@ void nfs_client_impl::begin_remote_copy(std::shared_ptr &rc req->nfs_task = nfs_task; req->is_finished = false; - async_nfs_get_file_size(req->file_size_req, - [=](error_code err, get_file_size_response &&resp) { - end_get_file_size(err, std::move(resp), req); - }, - std::chrono::milliseconds(FLAGS_rpc_timeout_ms), - req->file_size_req.source); + async_nfs_get_file_size( + req->file_size_req, + [=](error_code err, get_file_size_response &&resp) { + end_get_file_size(err, std::move(resp), req); + }, + std::chrono::milliseconds(FLAGS_rpc_timeout_ms), + req->file_size_req.source); } void nfs_client_impl::end_get_file_size(::dsn::error_code err, @@ -228,7 +231,8 @@ void nfs_client_impl::end_get_file_size(::dsn::error_code err, _copy_requests_low.push(std::move(copy_requests)); } - tasking::enqueue(LPC_NFS_COPY_FILE, nullptr, [this]() { continue_copy(); }, 0); + tasking::enqueue( + LPC_NFS_COPY_FILE, nullptr, [this]() { continue_copy(); }, 0); } void nfs_client_impl::continue_copy() @@ -303,20 +307,20 @@ void nfs_client_impl::continue_copy() copy_req.is_last = req->is_last; copy_req.__set_source_disk_tag(ureq->file_size_req.source_disk_tag); copy_req.__set_pid(ureq->file_size_req.pid); - req->remote_copy_task = - async_nfs_copy(copy_req, - [=](error_code err, copy_response &&resp) { - end_copy(err, std::move(resp), req); - // reset task to release memory quickly. - // should do this after end_copy() done. - if (req->is_ready_for_write) { - ::dsn::task_ptr tsk; - zauto_lock l(req->lock); - tsk = std::move(req->remote_copy_task); - } - }, - std::chrono::milliseconds(FLAGS_rpc_timeout_ms), - req->file_ctx->user_req->file_size_req.source); + req->remote_copy_task = async_nfs_copy( + copy_req, + [=](error_code err, copy_response &&resp) { + end_copy(err, std::move(resp), req); + // reset task to release memory quickly. + // should do this after end_copy() done. + if (req->is_ready_for_write) { + ::dsn::task_ptr tsk; + zauto_lock l(req->lock); + tsk = std::move(req->remote_copy_task); + } + }, + std::chrono::milliseconds(FLAGS_rpc_timeout_ms), + req->file_ctx->user_req->file_size_req.source); } else { --ureq->concurrent_copy_count; --_concurrent_copy_request_count; diff --git a/src/nfs/nfs_client_impl.h b/src/nfs/nfs_client_impl.h index 183ac38a97..c0721f4b59 100644 --- a/src/nfs/nfs_client_impl.h +++ b/src/nfs/nfs_client_impl.h @@ -41,10 +41,10 @@ #include "aio/file_io.h" #include "nfs_code_definition.h" #include "nfs_types.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" -#include "runtime/task/task_tracker.h" +#include "rpc/rpc_address.h" +#include "task/async_calls.h" +#include "task/task.h" +#include "task/task_tracker.h" #include "utils/TokenBucket.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" @@ -55,6 +55,7 @@ namespace dsn { class command_deregister; class disk_file; + namespace utils { class token_buckets; } // namespace utils diff --git a/src/nfs/nfs_code_definition.h b/src/nfs/nfs_code_definition.h index a517d6c980..a848cf9028 100644 --- a/src/nfs/nfs_code_definition.h +++ b/src/nfs/nfs_code_definition.h @@ -40,5 +40,5 @@ DEFINE_TASK_CODE(LPC_NFS_FILE_CLOSE_TIMER, TASK_PRIORITY_COMMON, THREAD_POOL_DEF DEFINE_TASK_CODE_AIO(LPC_NFS_WRITE, TASK_PRIORITY_COMMON, THREAD_POOL_DEFAULT) DEFINE_TASK_CODE_AIO(LPC_NFS_COPY_FILE, TASK_PRIORITY_COMMON, THREAD_POOL_DEFAULT) -} -} +} // namespace service +} // namespace dsn diff --git a/src/nfs/nfs_node.cpp b/src/nfs/nfs_node.cpp index e4282dcbe1..ea0b8564cf 100644 --- a/src/nfs/nfs_node.cpp +++ b/src/nfs/nfs_node.cpp @@ -109,4 +109,4 @@ aio_task_ptr nfs_node::copy_remote_files(std::shared_ptr &r call(request, cb); return cb; } -} +} // namespace dsn diff --git a/src/nfs/nfs_node.h b/src/nfs/nfs_node.h index f22810cd84..cda645d3c9 100644 --- a/src/nfs/nfs_node.h +++ b/src/nfs/nfs_node.h @@ -32,19 +32,21 @@ #include "aio/aio_task.h" #include "common/gpid.h" +#include "rpc/rpc_host_port.h" #include "runtime/api_task.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "utils/error_code.h" namespace dsn { class task_tracker; + namespace service { class copy_request; class copy_response; class get_file_size_request; class get_file_size_response; } // namespace service + template class rpc_replier; @@ -113,4 +115,4 @@ class nfs_node protected: virtual void call(std::shared_ptr rci, aio_task *callback) = 0; }; -} +} // namespace dsn diff --git a/src/nfs/nfs_server_impl.cpp b/src/nfs/nfs_server_impl.cpp index df1418822d..865922f31d 100644 --- a/src/nfs/nfs_server_impl.cpp +++ b/src/nfs/nfs_server_impl.cpp @@ -30,18 +30,21 @@ #include #include #include -#include #include -#include "absl/strings/string_view.h" +#include +#include "fmt/core.h" // IWYU pragma: keep +#include "gutil/map_util.h" #include "nfs/nfs_code_definition.h" +#include "nlohmann/json.hpp" #include "runtime/api_layer1.h" -#include "runtime/task/async_calls.h" +#include "task/async_calls.h" #include "utils/TokenBucket.h" #include "utils/autoref_ptr.h" #include "utils/env.h" #include "utils/filesystem.h" #include "utils/flags.h" +#include "utils/ports.h" #include "utils/utils.h" METRIC_DEFINE_counter( @@ -94,24 +97,25 @@ void nfs_service_impl::on_copy(const ::dsn::service::copy_request &request, do { zauto_lock l(_handles_map_lock); - auto it = _handles_map.find(file_path); // find file handle cache first - if (it == _handles_map.end()) { + auto &fh = gutil::LookupOrInsert(&_handles_map, file_path, {}); + if (!fh) { dfile = file::open(file_path, file::FileOpenType::kReadOnly); - if (dfile == nullptr) { + if (dsn_unlikely(dfile == nullptr)) { LOG_ERROR("[nfs_service] open file {} failed", file_path); + gutil::EraseKeyReturnValuePtr(&_handles_map, file_path); ::dsn::service::copy_response resp; resp.error = ERR_OBJECT_NOT_FOUND; reply(resp); return; } - - auto fh = std::make_shared(); + fh = std::make_shared(); fh->file_handle = dfile; - it = _handles_map.insert(std::make_pair(file_path, std::move(fh))).first; + } else { + dfile = fh->file_handle; } - dfile = it->second->file_handle; - it->second->file_access_count++; - it->second->last_access_time = dsn_now_ms(); + DCHECK(fh, ""); + fh->file_access_count++; + fh->last_access_time = dsn_now_ms(); } while (false); CHECK_NOTNULL(dfile, ""); diff --git a/src/nfs/nfs_server_impl.h b/src/nfs/nfs_server_impl.h index f76126cee0..b713be83d8 100644 --- a/src/nfs/nfs_server_impl.h +++ b/src/nfs/nfs_server_impl.h @@ -36,8 +36,8 @@ #include "nfs_code_definition.h" #include "nfs_types.h" #include "runtime/serverlet.h" -#include "runtime/task/task.h" -#include "runtime/task/task_tracker.h" +#include "task/task.h" +#include "task/task_tracker.h" #include "utils/blob.h" #include "utils/command_manager.h" #include "utils/error_code.h" diff --git a/src/nfs/test/main.cpp b/src/nfs/test/main.cpp index c8ebd6d4f8..572283702f 100644 --- a/src/nfs/test/main.cpp +++ b/src/nfs/test/main.cpp @@ -37,10 +37,10 @@ #include "common/gpid.h" #include "gtest/gtest.h" #include "nfs/nfs_node.h" +#include "rpc/rpc_host_port.h" #include "runtime/app_model.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task_code.h" #include "runtime/tool_api.h" +#include "task/task_code.h" #include "test_util/test_util.h" #include "utils/autoref_ptr.h" #include "utils/env.h" @@ -109,22 +109,23 @@ TEST_P(nfs_test, basic) ASSERT_TRUE(dst_filenames.empty()); aio_result r; - auto t = nfs->copy_remote_files(dsn::host_port("localhost", 20101), - "default", - ".", - kSrcFilenames, - "default", - kDstDir, - fake_pid, - false, - false, - LPC_AIO_TEST_NFS, - nullptr, - [&r](dsn::error_code err, size_t sz) { - r.err = err; - r.sz = sz; - }, - 0); + auto t = nfs->copy_remote_files( + dsn::host_port("localhost", 20101), + "default", + ".", + kSrcFilenames, + "default", + kDstDir, + fake_pid, + false, + false, + LPC_AIO_TEST_NFS, + nullptr, + [&r](dsn::error_code err, size_t sz) { + r.err = err; + r.sz = sz; + }, + 0); ASSERT_NE(nullptr, t); ASSERT_TRUE(t->wait(20000)); ASSERT_EQ(r.err, t->error()); @@ -151,22 +152,23 @@ TEST_P(nfs_test, basic) // copy files to the destination directory, files will be overwritten. { aio_result r; - auto t = nfs->copy_remote_files(dsn::host_port("localhost", 20101), - "default", - ".", - kSrcFilenames, - "default", - kDstDir, - fake_pid, - true, - false, - LPC_AIO_TEST_NFS, - nullptr, - [&r](dsn::error_code err, size_t sz) { - r.err = err; - r.sz = sz; - }, - 0); + auto t = nfs->copy_remote_files( + dsn::host_port("localhost", 20101), + "default", + ".", + kSrcFilenames, + "default", + kDstDir, + fake_pid, + true, + false, + LPC_AIO_TEST_NFS, + nullptr, + [&r](dsn::error_code err, size_t sz) { + r.err = err; + r.sz = sz; + }, + 0); ASSERT_NE(nullptr, t); ASSERT_TRUE(t->wait(20000)); ASSERT_EQ(r.err, t->error()); @@ -203,21 +205,22 @@ TEST_P(nfs_test, basic) ASSERT_FALSE(utils::filesystem::directory_exists(kNewDstDir)); aio_result r; - auto t = nfs->copy_remote_directory(dsn::host_port("localhost", 20101), - "default", - kDstDir, - "default", - kNewDstDir, - fake_pid, - false, - false, - LPC_AIO_TEST_NFS, - nullptr, - [&r](dsn::error_code err, size_t sz) { - r.err = err; - r.sz = sz; - }, - 0); + auto t = nfs->copy_remote_directory( + dsn::host_port("localhost", 20101), + "default", + kDstDir, + "default", + kNewDstDir, + fake_pid, + false, + false, + LPC_AIO_TEST_NFS, + nullptr, + [&r](dsn::error_code err, size_t sz) { + r.err = err; + r.sz = sz; + }, + 0); ASSERT_NE(nullptr, t); ASSERT_TRUE(t->wait(20000)); ASSERT_EQ(r.err, t->error()); diff --git a/src/perf_counter/perf_counter.h b/src/perf_counter/perf_counter.h index 1425bc228c..3118b8aacf 100644 --- a/src/perf_counter/perf_counter.h +++ b/src/perf_counter/perf_counter.h @@ -35,7 +35,8 @@ #include "utils/autoref_ptr.h" #include "utils/fmt_utils.h" -typedef enum dsn_perf_counter_type_t { +typedef enum dsn_perf_counter_type_t +{ COUNTER_TYPE_NUMBER, COUNTER_TYPE_VOLATILE_NUMBER, // special kind of NUMBER which will be reset on get COUNTER_TYPE_RATE, @@ -45,7 +46,8 @@ typedef enum dsn_perf_counter_type_t { } dsn_perf_counter_type_t; USER_DEFINED_ENUM_FORMATTER(dsn_perf_counter_type_t) -typedef enum dsn_perf_counter_percentile_type_t { +typedef enum dsn_perf_counter_percentile_type_t +{ COUNTER_PERCENTILE_50, COUNTER_PERCENTILE_90, COUNTER_PERCENTILE_95, diff --git a/src/perf_counter/perf_counter_atomic.h b/src/perf_counter/perf_counter_atomic.h index bd7fbd152c..40ca5a4444 100644 --- a/src/perf_counter/perf_counter_atomic.h +++ b/src/perf_counter/perf_counter_atomic.h @@ -432,4 +432,4 @@ class perf_counter_number_percentile_atomic : public perf_counter }; #pragma pack(pop) -} // namespace +} // namespace dsn diff --git a/src/perf_counter/perf_counter_wrapper.h b/src/perf_counter/perf_counter_wrapper.h index 26d25e551d..6bd51a5bd5 100644 --- a/src/perf_counter/perf_counter_wrapper.h +++ b/src/perf_counter/perf_counter_wrapper.h @@ -108,4 +108,4 @@ class perf_counter_wrapper // use raw pointer to make the class object small, so it can be accessed quickly dsn::perf_counter *_counter; }; -} +} // namespace dsn diff --git a/src/perf_counter/perf_counters.cpp b/src/perf_counter/perf_counters.cpp index 24e26c699e..65a5de162d 100644 --- a/src/perf_counter/perf_counters.cpp +++ b/src/perf_counter/perf_counters.cpp @@ -37,7 +37,7 @@ #include "perf_counter/perf_counter_utils.h" #include "runtime/api_layer1.h" #include "runtime/service_engine.h" -#include "runtime/task/task.h" +#include "task/task.h" #include "utils/autoref_ptr.h" #include "utils/command_manager.h" #include "utils/fmt_logging.h" diff --git a/src/ranger/ranger_resource_policy_manager.cpp b/src/ranger/ranger_resource_policy_manager.cpp index 4de545da83..d0ac9362e0 100644 --- a/src/ranger/ranger_resource_policy_manager.cpp +++ b/src/ranger/ranger_resource_policy_manager.cpp @@ -46,9 +46,9 @@ #include "ranger/ranger_resource_policy_manager.h" #include "ranger_resource_policy_manager.h" #include "rapidjson/allocators.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "task/async_calls.h" +#include "task/task.h" +#include "task/task_code.h" #include "utils/blob.h" #include "utils/filesystem.h" #include "utils/flags.h" @@ -207,12 +207,13 @@ void ranger_resource_policy_manager::start() CHECK_NOTNULL(_meta_svc, ""); _ranger_policy_meta_root = dsn::utils::filesystem::concat_path_unix_style( _meta_svc->cluster_root(), "ranger_policy_meta_root"); - tasking::enqueue_timer(LPC_USE_RANGER_ACCESS_CONTROL, - &_tracker, - [this]() { this->update_policies_from_ranger_service(); }, - std::chrono::seconds(FLAGS_update_ranger_policy_interval_sec), - 0, - std::chrono::milliseconds(1)); + tasking::enqueue_timer( + LPC_USE_RANGER_ACCESS_CONTROL, + &_tracker, + [this]() { this->update_policies_from_ranger_service(); }, + std::chrono::seconds(FLAGS_update_ranger_policy_interval_sec), + 0, + std::chrono::milliseconds(1)); } access_control_result ranger_resource_policy_manager::allowed( @@ -499,11 +500,12 @@ void ranger_resource_policy_manager::start_to_dump_and_sync_policies() } CHECK_EQ(err, dsn::ERR_TIMEOUT); LOG_ERROR("Create Ranger policy meta root timeout, retry later."); - dsn::tasking::enqueue(LPC_USE_RANGER_ACCESS_CONTROL, - &_tracker, - [this]() { start_to_dump_and_sync_policies(); }, - 0, - kLoadRangerPolicyRetryDelayMs); + dsn::tasking::enqueue( + LPC_USE_RANGER_ACCESS_CONTROL, + &_tracker, + [this]() { start_to_dump_and_sync_policies(); }, + 0, + kLoadRangerPolicyRetryDelayMs); }); } @@ -534,11 +536,12 @@ void ranger_resource_policy_manager::dump_policies_to_remote_storage() // The return error code is not 'ERR_TIMEOUT', use assert here. CHECK_EQ(e, dsn::ERR_TIMEOUT); LOG_ERROR("Dump Ranger policies to remote storage timeout, retry later."); - dsn::tasking::enqueue(LPC_USE_RANGER_ACCESS_CONTROL, - &_tracker, - [this]() { dump_policies_to_remote_storage(); }, - 0, - kLoadRangerPolicyRetryDelayMs); + dsn::tasking::enqueue( + LPC_USE_RANGER_ACCESS_CONTROL, + &_tracker, + [this]() { dump_policies_to_remote_storage(); }, + 0, + kLoadRangerPolicyRetryDelayMs); }); } diff --git a/src/ranger/ranger_resource_policy_manager.h b/src/ranger/ranger_resource_policy_manager.h index 974c0fe84d..876fbc2156 100644 --- a/src/ranger/ranger_resource_policy_manager.h +++ b/src/ranger/ranger_resource_policy_manager.h @@ -28,7 +28,7 @@ #include "ranger_resource_policy.h" #include "rapidjson/document.h" #include "ranger/access_type.h" -#include "runtime/task/task_tracker.h" +#include "task/task_tracker.h" #include "utils/enum_helper.h" #include "utils/error_code.h" #include "utils/synchronize.h" diff --git a/src/ranger/test/ranger_resource_policy_manager_test.cpp b/src/ranger/test/ranger_resource_policy_manager_test.cpp index a326dad48a..5df17cae0a 100644 --- a/src/ranger/test/ranger_resource_policy_manager_test.cpp +++ b/src/ranger/test/ranger_resource_policy_manager_test.cpp @@ -30,7 +30,7 @@ #include "ranger/access_type.h" #include "ranger/ranger_resource_policy.h" #include "ranger/ranger_resource_policy_manager.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "utils/blob.h" #include "utils/flags.h" diff --git a/src/redis_protocol/proxy_lib/proxy_layer.cpp b/src/redis_protocol/proxy_lib/proxy_layer.cpp index f3f15e6d5a..10652237ea 100644 --- a/src/redis_protocol/proxy_lib/proxy_layer.cpp +++ b/src/redis_protocol/proxy_lib/proxy_layer.cpp @@ -21,10 +21,10 @@ #include #include "proxy_layer.h" -#include "runtime/rpc/network.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task_spec.h" +#include "rpc/network.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_message.h" +#include "task/task_spec.h" #include "utils/autoref_ptr.h" #include "utils/fmt_logging.h" @@ -62,7 +62,7 @@ proxy_stub::proxy_stub(const proxy_session::factory &f, void proxy_stub::on_rpc_request(dsn::message_ex *request) { - auto source = ::dsn::host_port::from_address(request->header->from_address); + const auto &source = request->header->from_address; std::shared_ptr session; { ::dsn::zauto_read_lock l(_lock); @@ -87,11 +87,10 @@ void proxy_stub::on_rpc_request(dsn::message_ex *request) void proxy_stub::on_recv_remove_session_request(dsn::message_ex *request) { - auto source = ::dsn::host_port::from_address(request->header->from_address); - remove_session(source); + remove_session(request->header->from_address); } -void proxy_stub::remove_session(dsn::host_port remote) +void proxy_stub::remove_session(dsn::rpc_address remote) { std::shared_ptr session; { @@ -114,9 +113,9 @@ proxy_session::proxy_session(proxy_stub *op, dsn::message_ex *first_msg) CHECK_NOTNULL(first_msg, "null msg when create session"); _backup_one_request->add_ref(); - _session_remote = ::dsn::host_port::from_address(_backup_one_request->header->from_address); + _session_remote = _backup_one_request->header->from_address; _session_remote_str = _session_remote.to_string(); - CHECK_EQ_MSG(_session_remote.type(), HOST_TYPE_IPV4, "invalid host_port type"); + CHECK_EQ_MSG(_session_remote.type(), HOST_TYPE_IPV4, "invalid rpc_address type"); } proxy_session::~proxy_session() diff --git a/src/redis_protocol/proxy_lib/proxy_layer.h b/src/redis_protocol/proxy_lib/proxy_layer.h index 99884074ca..82be0b9435 100644 --- a/src/redis_protocol/proxy_lib/proxy_layer.h +++ b/src/redis_protocol/proxy_lib/proxy_layer.h @@ -25,9 +25,10 @@ #include #include -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" #include "runtime/serverlet.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "utils/threadpool_code.h" #include "utils/zlocks.h" @@ -79,8 +80,9 @@ class proxy_session : public std::enable_shared_from_this // when get message from raw parser, request & response of "dsn::message_ex*" are not in couple. // we need to backup one request to create a response struct. dsn::message_ex *_backup_one_request; - // the client for which this session served - dsn::host_port _session_remote; + // The client for which this session served for. + // The source IP address is possible to be reverse un-resolved, so use rpc_address directly. + dsn::rpc_address _session_remote; std::string _session_remote_str; }; @@ -107,14 +109,15 @@ class proxy_stub : public ::dsn::serverlet this->unregister_rpc_handler(RPC_CALL_RAW_MESSAGE); this->unregister_rpc_handler(RPC_CALL_RAW_SESSION_DISCONNECT); } - void remove_session(dsn::host_port remote_address); + void remove_session(dsn::rpc_address remote_address); private: void on_rpc_request(dsn::message_ex *request); void on_recv_remove_session_request(dsn::message_ex *); ::dsn::zrwlock_nr _lock; - std::unordered_map<::dsn::host_port, std::shared_ptr> _sessions; + // The source IP address is possible to be un-reverse resolved, so use rpc_address. + std::unordered_map<::dsn::rpc_address, std::shared_ptr> _sessions; proxy_session::factory _factory; ::dsn::host_port _uri_address; std::string _cluster; diff --git a/src/redis_protocol/proxy_lib/redis_parser.cpp b/src/redis_protocol/proxy_lib/redis_parser.cpp index 794d482e99..7e25620202 100644 --- a/src/redis_protocol/proxy_lib/redis_parser.cpp +++ b/src/redis_protocol/proxy_lib/redis_parser.cpp @@ -31,15 +31,15 @@ #include #include #include +#include -#include "absl/strings/string_view.h" #include "common/common.h" #include "common/replication_other_types.h" #include "pegasus/client.h" +#include "rpc/rpc_host_port.h" +#include "rpc/serialization.h" #include "rrdb/rrdb_types.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/serialization.h" #include "utils/api_utilities.h" #include "utils/binary_writer.h" #include "utils/error_code.h" @@ -207,7 +207,7 @@ void redis_parser::eat_all(char *dest, size_t length) bool redis_parser::end_array_size() { int32_t count = 0; - if (dsn_unlikely(!dsn::buf2int32(absl::string_view(_current_size), count))) { + if (dsn_unlikely(!dsn::buf2int32(std::string_view(_current_size), count))) { LOG_ERROR_PREFIX("invalid size string \"{}\"", _current_size); return false; } @@ -242,7 +242,7 @@ void redis_parser::append_current_bulk_string() bool redis_parser::end_bulk_string_size() { int32_t length = 0; - if (dsn_unlikely(!dsn::buf2int32(absl::string_view(_current_size), length))) { + if (dsn_unlikely(!dsn::buf2int32(std::string_view(_current_size), length))) { LOG_ERROR_PREFIX("invalid size string \"{}\"", _current_size); return false; } @@ -446,8 +446,9 @@ void redis_parser::set_internal(redis_parser::message_entry &entry) // with a reference to prevent the object from being destroyed std::shared_ptr ref_this = shared_from_this(); LOG_DEBUG_PREFIX("send SET command({})", entry.sequence_id); - auto on_set_reply = [ref_this, this, &entry]( - ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex *response) { + auto on_set_reply = [ref_this, this, &entry](::dsn::error_code ec, + dsn::message_ex *, + dsn::message_ex *response) { // when the "is_session_reset" flag is set, the socket may be broken. // so continue to reply the message is not necessary if (_is_session_reset.load(std::memory_order_acquire)) { @@ -551,8 +552,9 @@ void redis_parser::setex(message_entry &entry) } std::shared_ptr ref_this = shared_from_this(); - auto on_setex_reply = [ref_this, this, &entry]( - ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex *response) { + auto on_setex_reply = [ref_this, this, &entry](::dsn::error_code ec, + dsn::message_ex *, + dsn::message_ex *response) { if (_is_session_reset.load(std::memory_order_acquire)) { LOG_INFO_PREFIX("SETEX command seqid({}) got reply, but session has reset", entry.sequence_id); @@ -599,8 +601,9 @@ void redis_parser::get(message_entry &entry) } else { LOG_DEBUG_PREFIX("send GET command seqid({})", entry.sequence_id); std::shared_ptr ref_this = shared_from_this(); - auto on_get_reply = [ref_this, this, &entry]( - ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex *response) { + auto on_get_reply = [ref_this, this, &entry](::dsn::error_code ec, + dsn::message_ex *, + dsn::message_ex *response) { if (_is_session_reset.load(std::memory_order_acquire)) { LOG_INFO_PREFIX("GET command({}) got reply, but session has reset", entry.sequence_id); @@ -653,8 +656,9 @@ void redis_parser::del_internal(message_entry &entry) } else { LOG_DEBUG_PREFIX("send DEL command seqid({})", entry.sequence_id); std::shared_ptr ref_this = shared_from_this(); - auto on_del_reply = [ref_this, this, &entry]( - ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex *response) { + auto on_del_reply = [ref_this, this, &entry](::dsn::error_code ec, + dsn::message_ex *, + dsn::message_ex *response) { if (_is_session_reset.load(std::memory_order_acquire)) { LOG_INFO_PREFIX("DEL command seqid({}) got reply, but session has reset", entry.sequence_id); @@ -738,8 +742,9 @@ void redis_parser::ttl(message_entry &entry) } else { LOG_DEBUG_PREFIX("send PTTL/TTL command seqid({})", entry.sequence_id); std::shared_ptr ref_this = shared_from_this(); - auto on_ttl_reply = [ref_this, this, &entry, is_ttl]( - ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex *response) { + auto on_ttl_reply = [ref_this, this, &entry, is_ttl](::dsn::error_code ec, + dsn::message_ex *, + dsn::message_ex *response) { if (_is_session_reset.load(std::memory_order_acquire)) { LOG_INFO_PREFIX("TTL/PTTL command seqid({}) got reply, but session has reset", entry.sequence_id); @@ -832,7 +837,7 @@ void redis_parser::geo_radius(message_entry &entry) std::shared_ptr ref_this = shared_from_this(); auto search_callback = [ref_this, this, &entry, unit, WITHCOORD, WITHDIST, WITHHASH]( - int ec, std::list &&results) { + int ec, std::list &&results) { process_geo_radius_result( entry, unit, WITHCOORD, WITHDIST, WITHHASH, ec, std::move(results)); }; @@ -886,7 +891,7 @@ void redis_parser::geo_radius_by_member(message_entry &entry) std::shared_ptr ref_this = shared_from_this(); auto search_callback = [ref_this, this, &entry, unit, WITHCOORD, WITHDIST, WITHHASH]( - int ec, std::list &&results) { + int ec, std::list &&results) { process_geo_radius_result( entry, unit, WITHCOORD, WITHDIST, WITHHASH, ec, std::move(results)); }; @@ -945,7 +950,7 @@ void redis_parser::counter_internal(message_entry &entry) std::shared_ptr ref_this = shared_from_this(); auto on_incr_reply = [ref_this, this, command, &entry]( - ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex *response) { + ::dsn::error_code ec, dsn::message_ex *, dsn::message_ex *response) { if (_is_session_reset.load(std::memory_order_acquire)) { LOG_WARNING_PREFIX("command {} seqid({}) got reply, but session has reset", command, @@ -1143,7 +1148,7 @@ void redis_parser::geo_add(message_entry &entry) std::make_shared>(member_count); std::shared_ptr result(new redis_integer()); auto set_latlng_callback = [ref_this, this, &entry, result, set_count]( - int error_code, pegasus_client::internal_info &&info) { + int error_code, pegasus_client::internal_info &&info) { if (_is_session_reset.load(std::memory_order_acquire)) { LOG_INFO_PREFIX("GEOADD command seqid({}) got reply, but session has reset", entry.sequence_id); @@ -1243,8 +1248,10 @@ void redis_parser::geo_pos(message_entry &entry) std::make_shared>(member_count); std::shared_ptr result(new redis_array()); result->resize(member_count); - auto get_latlng_callback = [ref_this, this, &entry, result, get_count]( - int error_code, int index, double lat_degrees, double lng_degrees) { + auto get_latlng_callback = [ref_this, this, &entry, result, get_count](int error_code, + int index, + double lat_degrees, + double lng_degrees) { if (_is_session_reset.load(std::memory_order_acquire)) { LOG_INFO_PREFIX("GEOPOS command seqid({}) got reply, but session has reset", entry.sequence_id); diff --git a/src/redis_protocol/proxy_lib/redis_parser.h b/src/redis_protocol/proxy_lib/redis_parser.h index f66bce0002..fe0aff303c 100644 --- a/src/redis_protocol/proxy_lib/redis_parser.h +++ b/src/redis_protocol/proxy_lib/redis_parser.h @@ -33,8 +33,8 @@ #include "geo/lib/geo_client.h" #include "proxy_layer.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/rpc_stream.h" +#include "rpc/rpc_message.h" +#include "rpc/rpc_stream.h" #include "utils/blob.h" #include "utils/zlocks.h" @@ -44,7 +44,7 @@ class binary_writer; namespace apps { class rrdb_client; } -} +} // namespace dsn class proxy_test; @@ -265,5 +265,5 @@ class redis_parser : public proxy_session redis_parser(proxy_stub *op, dsn::message_ex *first_msg); ~redis_parser() override; }; -} -} // namespace +} // namespace proxy +} // namespace pegasus diff --git a/src/redis_protocol/proxy_ut/redis_proxy_test.cpp b/src/redis_protocol/proxy_ut/redis_proxy_test.cpp index 5d88285c10..30132e0b71 100644 --- a/src/redis_protocol/proxy_ut/redis_proxy_test.cpp +++ b/src/redis_protocol/proxy_ut/redis_proxy_test.cpp @@ -46,11 +46,11 @@ #include "proxy_layer.h" #include "redis_parser.h" #include "runtime/app_model.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/rpc_stream.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_message.h" +#include "rpc/rpc_stream.h" #include "runtime/service_app.h" -#include "runtime/task/task_spec.h" +#include "task/task_spec.h" #include "utils/blob.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" diff --git a/src/remote_cmd/remote_command.cpp b/src/remote_cmd/remote_command.cpp index a40bb7b1e3..69f142cd1b 100644 --- a/src/remote_cmd/remote_command.cpp +++ b/src/remote_cmd/remote_command.cpp @@ -23,11 +23,11 @@ #include #include "command_types.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_holder.h" #include "runtime/api_layer1.h" #include "runtime/api_task.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "utils/command_manager.h" #include "utils/error_code.h" #include "utils/threadpool_code.h" @@ -52,7 +52,7 @@ task_ptr async_call_remote(rpc_address remote, request->cmd = cmd; request->arguments = arguments; remote_command_rpc rpc(std::move(request), RPC_CLI_CLI_CALL, timeout); - return rpc.call(remote, nullptr, [ cb = std::move(callback), rpc ](error_code ec) { + return rpc.call(remote, nullptr, [cb = std::move(callback), rpc](error_code ec) { cb(ec, rpc.response()); }); } diff --git a/src/remote_cmd/remote_command.h b/src/remote_cmd/remote_command.h index 41bddfe5e1..000dd07dc0 100644 --- a/src/remote_cmd/remote_command.h +++ b/src/remote_cmd/remote_command.h @@ -22,7 +22,7 @@ #include #include -#include "runtime/task/task.h" +#include "task/task.h" namespace dsn { class error_code; diff --git a/src/replica/backup/cold_backup_context.cpp b/src/replica/backup/cold_backup_context.cpp index a96dd94e8e..0d9256d064 100644 --- a/src/replica/backup/cold_backup_context.cpp +++ b/src/replica/backup/cold_backup_context.cpp @@ -26,7 +26,7 @@ #include "common/replication.codes.h" #include "replica/replica.h" #include "runtime/api_layer1.h" -#include "runtime/task/async_calls.h" +#include "task/async_calls.h" #include "utils/blob.h" #include "utils/error_code.h" #include "utils/filesystem.h" @@ -226,24 +226,25 @@ void cold_backup_context::check_backup_on_remote() // before retry, should add_ref(), and must release_ref() after retry add_ref(); - tasking::enqueue(LPC_BACKGROUND_COLD_BACKUP, - nullptr, - [this]() { - // before retry, should check whether the status is ready for - // check - if (!is_ready_for_check()) { - LOG_INFO("{}: backup status has changed to {}, ignore " - "checking backup on remote", - name, - cold_backup_status_to_string(status())); - ignore_check(); - } else { - check_backup_on_remote(); - } - release_ref(); - }, - 0, - std::chrono::seconds(10)); + tasking::enqueue( + LPC_BACKGROUND_COLD_BACKUP, + nullptr, + [this]() { + // before retry, should check whether the status is ready for + // check + if (!is_ready_for_check()) { + LOG_INFO("{}: backup status has changed to {}, ignore " + "checking backup on remote", + name, + cold_backup_status_to_string(status())); + ignore_check(); + } else { + check_backup_on_remote(); + } + release_ref(); + }, + 0, + std::chrono::seconds(10)); } else { LOG_ERROR("{}: block service create file failed, file = {}, err = {}", name, @@ -290,22 +291,23 @@ void cold_backup_context::read_current_chkpt_file( file_handle->file_name()); add_ref(); - tasking::enqueue(LPC_BACKGROUND_COLD_BACKUP, - nullptr, - [this, file_handle]() { - if (!is_ready_for_check()) { - LOG_INFO("{}: backup status has changed to {}, ignore " - "checking backup on remote", - name, - cold_backup_status_to_string(status())); - ignore_check(); - } else { - read_current_chkpt_file(file_handle); - } - release_ref(); - }, - 0, - std::chrono::seconds(10)); + tasking::enqueue( + LPC_BACKGROUND_COLD_BACKUP, + nullptr, + [this, file_handle]() { + if (!is_ready_for_check()) { + LOG_INFO("{}: backup status has changed to {}, ignore " + "checking backup on remote", + name, + cold_backup_status_to_string(status())); + ignore_check(); + } else { + read_current_chkpt_file(file_handle); + } + release_ref(); + }, + 0, + std::chrono::seconds(10)); } else { LOG_ERROR("{}: read remote file failed, file = {}, err = {}", name, @@ -369,22 +371,23 @@ void cold_backup_context::remote_chkpt_dir_exist(const std::string &chkpt_dirnam chkpt_dirname); add_ref(); - tasking::enqueue(LPC_BACKGROUND_COLD_BACKUP, - nullptr, - [this, chkpt_dirname]() { - if (!is_ready_for_check()) { - LOG_INFO("{}: backup status has changed to {}, ignore " - "checking backup on remote", - name, - cold_backup_status_to_string(status())); - ignore_check(); - } else { - remote_chkpt_dir_exist(chkpt_dirname); - } - release_ref(); - }, - 0, - std::chrono::seconds(10)); + tasking::enqueue( + LPC_BACKGROUND_COLD_BACKUP, + nullptr, + [this, chkpt_dirname]() { + if (!is_ready_for_check()) { + LOG_INFO("{}: backup status has changed to {}, ignore " + "checking backup on remote", + name, + cold_backup_status_to_string(status())); + ignore_check(); + } else { + remote_chkpt_dir_exist(chkpt_dirname); + } + release_ref(); + }, + 0, + std::chrono::seconds(10)); } else { LOG_ERROR("{}: block service list remote dir failed, dirname = {}, err = {}", name, @@ -681,35 +684,36 @@ void cold_backup_context::upload_file(const std::string &local_filename) local_filename); add_ref(); - tasking::enqueue(LPC_BACKGROUND_COLD_BACKUP, - nullptr, - [this, local_filename]() { - // TODO: status change from ColdBackupUploading to - // ColdBackupPaused, and upload file timeout, but when callback - // is executed it catches the status(ColdBackupPaused) - // now, if status back to ColdBackupUploading very soon, and - // call upload_checkpoint_to_remote() here, - // upload_checkpoint_to_remote() maybe acquire the _lock first, - // then stop give back file(upload timeout), the file is still - // in uploading this file will not be uploaded until you call - // upload_checkpoint_to_remote() after it's given back - if (!is_ready_for_upload()) { - std::string full_path_local_file = - ::dsn::utils::filesystem::path_combine(checkpoint_dir, - local_filename); - LOG_INFO("{}: backup status has changed to {}, stop " - "upload checkpoint file to remote, file = {}", - name, - cold_backup_status_to_string(status()), - full_path_local_file); - file_upload_uncomplete(local_filename); - } else { - upload_file(local_filename); - } - release_ref(); - }, - 0, - std::chrono::seconds(10)); + tasking::enqueue( + LPC_BACKGROUND_COLD_BACKUP, + nullptr, + [this, local_filename]() { + // TODO: status change from ColdBackupUploading to + // ColdBackupPaused, and upload file timeout, but when callback + // is executed it catches the status(ColdBackupPaused) + // now, if status back to ColdBackupUploading very soon, and + // call upload_checkpoint_to_remote() here, + // upload_checkpoint_to_remote() maybe acquire the _lock first, + // then stop give back file(upload timeout), the file is still + // in uploading this file will not be uploaded until you call + // upload_checkpoint_to_remote() after it's given back + if (!is_ready_for_upload()) { + std::string full_path_local_file = + ::dsn::utils::filesystem::path_combine(checkpoint_dir, + local_filename); + LOG_INFO("{}: backup status has changed to {}, stop " + "upload checkpoint file to remote, file = {}", + name, + cold_backup_status_to_string(status()), + full_path_local_file); + file_upload_uncomplete(local_filename); + } else { + upload_file(local_filename); + } + release_ref(); + }, + 0, + std::chrono::seconds(10)); } else { LOG_ERROR("{}: block service create file failed, file = {}, err = {}", name, @@ -911,22 +915,23 @@ void cold_backup_context::write_current_chkpt_file(const std::string &value) current_chkpt_file); add_ref(); - tasking::enqueue(LPC_BACKGROUND_COLD_BACKUP, - nullptr, - [this, value]() { - if (!is_ready_for_upload()) { - LOG_INFO("{}: backup status has changed to {}, stop write " - "current checkpoint file", - name, - cold_backup_status_to_string(status())); - } else { - write_current_chkpt_file(value); - } - - release_ref(); - }, - 0, - std::chrono::seconds(10)); + tasking::enqueue( + LPC_BACKGROUND_COLD_BACKUP, + nullptr, + [this, value]() { + if (!is_ready_for_upload()) { + LOG_INFO("{}: backup status has changed to {}, stop write " + "current checkpoint file", + name, + cold_backup_status_to_string(status())); + } else { + write_current_chkpt_file(value); + } + + release_ref(); + }, + 0, + std::chrono::seconds(10)); } else { LOG_ERROR("{}: block service create file failed, file = {}, err = {}", name, @@ -963,22 +968,23 @@ void cold_backup_context::on_write(const dist::block_service::block_file_ptr &fi file_handle->file_name()); add_ref(); - tasking::enqueue(LPC_BACKGROUND_COLD_BACKUP, - nullptr, - [this, file_handle, value, callback]() { - if (!is_ready_for_upload()) { - LOG_INFO("{}: backup status has changed to {}, stop write " - "remote file, file = {}", - name, - cold_backup_status_to_string(status()), - file_handle->file_name()); - } else { - on_write(file_handle, value, callback); - } - release_ref(); - }, - 0, - std::chrono::seconds(10)); + tasking::enqueue( + LPC_BACKGROUND_COLD_BACKUP, + nullptr, + [this, file_handle, value, callback]() { + if (!is_ready_for_upload()) { + LOG_INFO("{}: backup status has changed to {}, stop write " + "remote file, file = {}", + name, + cold_backup_status_to_string(status()), + file_handle->file_name()); + } else { + on_write(file_handle, value, callback); + } + release_ref(); + }, + 0, + std::chrono::seconds(10)); } else { // here, must call the callback to release_ref callback(false); diff --git a/src/replica/backup/replica_backup_manager.cpp b/src/replica/backup/replica_backup_manager.cpp index 2fb70b7664..ef4c075609 100644 --- a/src/replica/backup/replica_backup_manager.cpp +++ b/src/replica/backup/replica_backup_manager.cpp @@ -17,7 +17,7 @@ #include "replica_backup_manager.h" -#include +#include #include #include #include @@ -36,7 +36,7 @@ #include "replica/replica_context.h" #include "replica/replication_app_base.h" #include "runtime/api_layer1.h" -#include "runtime/task/async_calls.h" +#include "task/async_calls.h" #include "utils/autoref_ptr.h" #include "utils/filesystem.h" #include "utils/flags.h" @@ -126,11 +126,12 @@ void replica_backup_manager::on_clear_cold_backup(const backup_clear_request &re "{}: delay clearing obsoleted cold backup context, cause backup_status == " "ColdBackupCheckpointing", backup_context->name); - tasking::enqueue(LPC_REPLICATION_COLD_BACKUP, - &_replica->_tracker, - [this, request]() { on_clear_cold_backup(request); }, - get_gpid().thread_hash(), - std::chrono::seconds(100)); + tasking::enqueue( + LPC_REPLICATION_COLD_BACKUP, + &_replica->_tracker, + [this, request]() { on_clear_cold_backup(request); }, + get_gpid().thread_hash(), + std::chrono::seconds(100)); return; } @@ -143,12 +144,12 @@ void replica_backup_manager::on_clear_cold_backup(const backup_clear_request &re void replica_backup_manager::start_collect_backup_info() { if (_collect_info_timer == nullptr) { - _collect_info_timer = - tasking::enqueue_timer(LPC_PER_REPLICA_COLLECT_INFO_TIMER, - &_replica->_tracker, - [this]() { collect_backup_info(); }, - std::chrono::milliseconds(FLAGS_gc_interval_ms), - get_gpid().thread_hash()); + _collect_info_timer = tasking::enqueue_timer( + LPC_PER_REPLICA_COLLECT_INFO_TIMER, + &_replica->_tracker, + [this]() { collect_backup_info(); }, + std::chrono::milliseconds(FLAGS_gc_interval_ms), + get_gpid().thread_hash()); } } @@ -192,11 +193,12 @@ void replica_backup_manager::background_clear_backup_checkpoint(const std::strin LOG_INFO_PREFIX("schedule to clear all checkpoint dirs of policy({}) after {} minutes", policy_name, FLAGS_cold_backup_checkpoint_reserve_minutes); - tasking::enqueue(LPC_BACKGROUND_COLD_BACKUP, - &_replica->_tracker, - [this, policy_name]() { clear_backup_checkpoint(policy_name); }, - get_gpid().thread_hash(), - std::chrono::minutes(FLAGS_cold_backup_checkpoint_reserve_minutes)); + tasking::enqueue( + LPC_BACKGROUND_COLD_BACKUP, + &_replica->_tracker, + [this, policy_name]() { clear_backup_checkpoint(policy_name); }, + get_gpid().thread_hash(), + std::chrono::minutes(FLAGS_cold_backup_checkpoint_reserve_minutes)); } // clear all checkpoint dirs of the policy @@ -233,9 +235,9 @@ void replica_backup_manager::send_clear_request_to_secondaries(const gpid &pid, request.__set_pid(pid); request.__set_policy_name(policy_name); - for (const auto &target_address : _replica->_primary_states.membership.secondaries) { + for (const auto &secondary : _replica->_primary_states.pc.secondaries) { rpc::call_one_way_typed( - target_address, RPC_CLEAR_COLD_BACKUP, request, get_gpid().thread_hash()); + secondary, RPC_CLEAR_COLD_BACKUP, request, get_gpid().thread_hash()); } } diff --git a/src/replica/backup/replica_backup_manager.h b/src/replica/backup/replica_backup_manager.h index 40d005537c..f3e7a655c0 100644 --- a/src/replica/backup/replica_backup_manager.h +++ b/src/replica/backup/replica_backup_manager.h @@ -20,7 +20,7 @@ #include #include "replica/replica_base.h" -#include "runtime/task/task.h" +#include "task/task.h" #include "utils/metrics.h" namespace dsn { diff --git a/src/replica/backup/replica_backup_server.cpp b/src/replica/backup/replica_backup_server.cpp index ef7ddc4fcf..4d639f2716 100644 --- a/src/replica/backup/replica_backup_server.cpp +++ b/src/replica/backup/replica_backup_server.cpp @@ -26,7 +26,7 @@ #include "replica/replica_stub.h" #include "replica_backup_manager.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/serialization.h" +#include "rpc/serialization.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/flags.h" diff --git a/src/replica/backup/test/run.sh b/src/replica/backup/test/run.sh index eb88404940..87d08b7b52 100755 --- a/src/replica/backup/test/run.sh +++ b/src/replica/backup/test/run.sh @@ -45,7 +45,7 @@ fi ./dsn_replica_backup_test if [ $? -ne 0 ]; then - tail -n 100 data/log/log.1.txt + tail -n 100 `find . -name pegasus.log.*` if [ -f core ]; then gdb ./dsn_replica_backup_test core -ex "bt" fi diff --git a/src/replica/bulk_load/replica_bulk_loader.cpp b/src/replica/bulk_load/replica_bulk_loader.cpp index a96b8337fb..8b876b4200 100644 --- a/src/replica/bulk_load/replica_bulk_loader.cpp +++ b/src/replica/bulk_load/replica_bulk_loader.cpp @@ -18,11 +18,11 @@ #include #include #include +#include #include #include #include -#include "absl/strings/string_view.h" #include "block_service/block_service_manager.h" #include "common/bulk_load_common.h" #include "common/gpid.h" @@ -36,11 +36,11 @@ #include "replica/replica_stub.h" #include "replica/replication_app_base.h" #include "replica_bulk_loader.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/async_calls.h" +#include "rpc/dns_resolver.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_host_port.h" +#include "task/async_calls.h" #include "utils/autoref_ptr.h" #include "utils/chrono_literals.h" #include "utils/env.h" @@ -188,14 +188,15 @@ void replica_bulk_loader::broadcast_group_bulk_load(const bulk_load_request &met LOG_INFO_PREFIX("start to broadcast group bulk load"); - for (const auto &hp : _replica->_primary_states.membership.hp_secondaries) { - if (hp == _stub->primary_host_port()) + for (const auto &secondary : _replica->_primary_states.pc.hp_secondaries) { + if (secondary == _stub->primary_host_port()) { continue; + } auto request = std::make_unique(); request->app_name = _replica->_app_info.app_name; - const auto &addr = dsn::dns_resolver::instance().resolve_address(hp); - SET_IP_AND_HOST_PORT(*request, target, addr, hp); + const auto &addr = dsn::dns_resolver::instance().resolve_address(secondary); + SET_IP_AND_HOST_PORT(*request, target, addr, secondary); _replica->_primary_states.get_replica_config(partition_status::PS_SECONDARY, request->config); request->cluster_name = meta_req.cluster_name; @@ -203,14 +204,14 @@ void replica_bulk_loader::broadcast_group_bulk_load(const bulk_load_request &met request->meta_bulk_load_status = meta_req.meta_bulk_load_status; request->remote_root_path = meta_req.remote_root_path; - LOG_INFO_PREFIX("send group_bulk_load_request to {}({})", hp, addr); + LOG_INFO_PREFIX("send group_bulk_load_request to {}({})", secondary, addr); group_bulk_load_rpc rpc( std::move(request), RPC_GROUP_BULK_LOAD, 0_ms, 0, get_gpid().thread_hash()); auto callback_task = rpc.call(addr, tracker(), [this, rpc](error_code err) mutable { on_group_bulk_load_reply(err, rpc.request(), rpc.response()); }); - _replica->_primary_states.group_bulk_load_pending_replies[hp] = callback_task; + _replica->_primary_states.group_bulk_load_pending_replies[secondary] = callback_task; } } @@ -486,7 +487,7 @@ void replica_bulk_loader::download_files(const std::string &provider_name, const std::string &remote_dir, const std::string &local_dir) { - FAIL_POINT_INJECT_F("replica_bulk_loader_download_files", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("replica_bulk_loader_download_files", [](std::string_view) {}); LOG_INFO_PREFIX("start to download files"); dist::block_service::block_filesystem *fs = @@ -740,8 +741,8 @@ void replica_bulk_loader::handle_bulk_load_finish(bulk_load_status::type new_sta } if (status() == partition_status::PS_PRIMARY) { - for (const auto &target_hp : _replica->_primary_states.membership.hp_secondaries) { - _replica->_primary_states.reset_node_bulk_load_states(target_hp); + for (const auto &secondary : _replica->_primary_states.pc.hp_secondaries) { + _replica->_primary_states.reset_node_bulk_load_states(secondary); } } @@ -929,29 +930,29 @@ void replica_bulk_loader::report_group_download_progress(/*out*/ bulk_load_respo } SET_VALUE_FROM_IP_AND_HOST_PORT(response, group_bulk_load_state, - _replica->_primary_states.membership.primary, - _replica->_primary_states.membership.hp_primary, + _replica->_primary_states.pc.primary, + _replica->_primary_states.pc.hp_primary, primary_state); LOG_INFO_PREFIX("primary = {}, download progress = {}%, status = {}", - FMT_HOST_PORT_AND_IP(_replica->_primary_states.membership, primary), + FMT_HOST_PORT_AND_IP(_replica->_primary_states.pc, primary), primary_state.download_progress, primary_state.download_status); int32_t total_progress = primary_state.download_progress; - for (const auto &target_hp : _replica->_primary_states.membership.hp_secondaries) { + for (const auto &secondary : _replica->_primary_states.pc.hp_secondaries) { const auto &secondary_state = - _replica->_primary_states.secondary_bulk_load_states[target_hp]; + _replica->_primary_states.secondary_bulk_load_states[secondary]; int32_t s_progress = secondary_state.__isset.download_progress ? secondary_state.download_progress : 0; error_code s_status = secondary_state.__isset.download_status ? secondary_state.download_status : ERR_OK; LOG_INFO_PREFIX( - "secondary = {}, download progress = {}%, status={}", target_hp, s_progress, s_status); - SET_VALUE_FROM_HOST_PORT(response, group_bulk_load_state, target_hp, secondary_state); + "secondary = {}, download progress = {}%, status={}", secondary, s_progress, s_status); + SET_VALUE_FROM_HOST_PORT(response, group_bulk_load_state, secondary, secondary_state); total_progress += s_progress; } - total_progress /= _replica->_primary_states.membership.max_replica_count; + total_progress /= _replica->_primary_states.pc.max_replica_count; LOG_INFO_PREFIX("total download progress = {}%", total_progress); response.__set_total_download_progress(total_progress); } @@ -971,26 +972,26 @@ void replica_bulk_loader::report_group_ingestion_status(/*out*/ bulk_load_respon primary_state.__set_ingest_status(_replica->_app->get_ingestion_status()); SET_VALUE_FROM_IP_AND_HOST_PORT(response, group_bulk_load_state, - _replica->_primary_states.membership.primary, - _replica->_primary_states.membership.hp_primary, + _replica->_primary_states.pc.primary, + _replica->_primary_states.pc.hp_primary, primary_state); LOG_INFO_PREFIX("primary = {}, ingestion status = {}", - FMT_HOST_PORT_AND_IP(_replica->_primary_states.membership, primary), + FMT_HOST_PORT_AND_IP(_replica->_primary_states.pc, primary), enum_to_string(primary_state.ingest_status)); bool is_group_ingestion_finish = (primary_state.ingest_status == ingestion_status::IS_SUCCEED) && - (_replica->_primary_states.membership.hp_secondaries.size() + 1 == - _replica->_primary_states.membership.max_replica_count); - for (const auto &target_hp : _replica->_primary_states.membership.hp_secondaries) { + (_replica->_primary_states.pc.hp_secondaries.size() + 1 == + _replica->_primary_states.pc.max_replica_count); + for (const auto &secondary : _replica->_primary_states.pc.hp_secondaries) { const auto &secondary_state = - _replica->_primary_states.secondary_bulk_load_states[target_hp]; + _replica->_primary_states.secondary_bulk_load_states[secondary]; ingestion_status::type ingest_status = secondary_state.__isset.ingest_status ? secondary_state.ingest_status : ingestion_status::IS_INVALID; LOG_INFO_PREFIX( - "secondary = {}, ingestion status={}", target_hp, enum_to_string(ingest_status)); - SET_VALUE_FROM_HOST_PORT(response, group_bulk_load_state, target_hp, secondary_state); + "secondary = {}, ingestion status={}", secondary, enum_to_string(ingest_status)); + SET_VALUE_FROM_HOST_PORT(response, group_bulk_load_state, secondary, secondary_state); is_group_ingestion_finish &= (ingest_status == ingestion_status::IS_SUCCEED); } response.__set_is_group_ingestion_finished(is_group_ingestion_finish); @@ -1018,24 +1019,24 @@ void replica_bulk_loader::report_group_cleaned_up(bulk_load_response &response) primary_state.__set_is_cleaned_up(is_cleaned_up()); SET_VALUE_FROM_IP_AND_HOST_PORT(response, group_bulk_load_state, - _replica->_primary_states.membership.primary, - _replica->_primary_states.membership.hp_primary, + _replica->_primary_states.pc.primary, + _replica->_primary_states.pc.hp_primary, primary_state); LOG_INFO_PREFIX("primary = {}, bulk load states cleaned_up = {}", - FMT_HOST_PORT_AND_IP(_replica->_primary_states.membership, primary), + FMT_HOST_PORT_AND_IP(_replica->_primary_states.pc, primary), primary_state.is_cleaned_up); - bool group_flag = (primary_state.is_cleaned_up) && - (_replica->_primary_states.membership.hp_secondaries.size() + 1 == - _replica->_primary_states.membership.max_replica_count); - for (const auto &target_hp : _replica->_primary_states.membership.hp_secondaries) { + bool group_flag = + (primary_state.is_cleaned_up) && (_replica->_primary_states.pc.hp_secondaries.size() + 1 == + _replica->_primary_states.pc.max_replica_count); + for (const auto &secondary : _replica->_primary_states.pc.hp_secondaries) { const auto &secondary_state = - _replica->_primary_states.secondary_bulk_load_states[target_hp]; - bool is_cleaned_up = - secondary_state.__isset.is_cleaned_up ? secondary_state.is_cleaned_up : false; + _replica->_primary_states.secondary_bulk_load_states[secondary]; + bool is_cleaned_up = secondary_state.__isset.is_cleaned_up ? secondary_state.is_cleaned_up + : false; LOG_INFO_PREFIX( - "secondary = {}, bulk load states cleaned_up = {}", target_hp, is_cleaned_up); - SET_VALUE_FROM_HOST_PORT(response, group_bulk_load_state, target_hp, secondary_state); + "secondary = {}, bulk load states cleaned_up = {}", secondary, is_cleaned_up); + SET_VALUE_FROM_HOST_PORT(response, group_bulk_load_state, secondary, secondary_state); group_flag &= is_cleaned_up; } LOG_INFO_PREFIX("group bulk load states cleaned_up = {}", group_flag); @@ -1057,22 +1058,22 @@ void replica_bulk_loader::report_group_is_paused(bulk_load_response &response) primary_state.__set_is_paused(_status == bulk_load_status::BLS_PAUSED); SET_VALUE_FROM_IP_AND_HOST_PORT(response, group_bulk_load_state, - _replica->_primary_states.membership.primary, - _replica->_primary_states.membership.hp_primary, + _replica->_primary_states.pc.primary, + _replica->_primary_states.pc.hp_primary, primary_state); LOG_INFO_PREFIX("primary = {}, bulk_load is_paused = {}", - FMT_HOST_PORT_AND_IP(_replica->_primary_states.membership, primary), + FMT_HOST_PORT_AND_IP(_replica->_primary_states.pc, primary), primary_state.is_paused); - bool group_is_paused = primary_state.is_paused && - (_replica->_primary_states.membership.hp_secondaries.size() + 1 == - _replica->_primary_states.membership.max_replica_count); - for (const auto &target_hp : _replica->_primary_states.membership.hp_secondaries) { + bool group_is_paused = + primary_state.is_paused && (_replica->_primary_states.pc.hp_secondaries.size() + 1 == + _replica->_primary_states.pc.max_replica_count); + for (const auto &secondary : _replica->_primary_states.pc.hp_secondaries) { partition_bulk_load_state secondary_state = - _replica->_primary_states.secondary_bulk_load_states[target_hp]; + _replica->_primary_states.secondary_bulk_load_states[secondary]; bool is_paused = secondary_state.__isset.is_paused ? secondary_state.is_paused : false; - LOG_INFO_PREFIX("secondary = {}, bulk_load is_paused = {}", target_hp, is_paused); - SET_VALUE_FROM_HOST_PORT(response, group_bulk_load_state, target_hp, secondary_state); + LOG_INFO_PREFIX("secondary = {}, bulk_load is_paused = {}", secondary, is_paused); + SET_VALUE_FROM_HOST_PORT(response, group_bulk_load_state, secondary, secondary_state); group_is_paused &= is_paused; } LOG_INFO_PREFIX("group bulk load is_paused = {}", group_is_paused); diff --git a/src/replica/bulk_load/replica_bulk_loader.h b/src/replica/bulk_load/replica_bulk_loader.h index 92f76810cd..e01285e541 100644 --- a/src/replica/bulk_load/replica_bulk_loader.h +++ b/src/replica/bulk_load/replica_bulk_loader.h @@ -29,7 +29,7 @@ #include "replica/replica.h" #include "replica/replica_base.h" #include "runtime/api_layer1.h" -#include "runtime/task/task.h" +#include "task/task.h" #include "utils/error_code.h" #include "utils/metrics.h" #include "utils/zlocks.h" diff --git a/src/replica/bulk_load/test/replica_bulk_loader_test.cpp b/src/replica/bulk_load/test/replica_bulk_loader_test.cpp index 6deeda9e4e..b2e7fde0b7 100644 --- a/src/replica/bulk_load/test/replica_bulk_loader_test.cpp +++ b/src/replica/bulk_load/test/replica_bulk_loader_test.cpp @@ -27,9 +27,9 @@ #include "gtest/gtest.h" #include "replica/test/mock_utils.h" #include "replica/test/replica_test_base.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task_tracker.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "task/task_tracker.h" #include "test_util/test_util.h" #include "utils/fail_point.h" #include "utils/filesystem.h" @@ -236,13 +236,13 @@ class replica_bulk_loader_test : public replica_test_base void mock_primary_states() { mock_replica_config(partition_status::PS_PRIMARY); - partition_configuration config; - config.max_replica_count = 3; - config.pid = PID; - config.ballot = BALLOT; - SET_IP_AND_HOST_PORT_BY_DNS(config, primary, PRIMARY_HP); - SET_IPS_AND_HOST_PORTS_BY_DNS(config, secondaries, SECONDARY_HP, SECONDARY_HP2); - _replica->set_primary_partition_configuration(config); + partition_configuration pc; + pc.max_replica_count = 3; + pc.pid = PID; + pc.ballot = BALLOT; + SET_IP_AND_HOST_PORT_BY_DNS(pc, primary, PRIMARY_HP); + SET_IPS_AND_HOST_PORTS_BY_DNS(pc, secondaries, SECONDARY_HP, SECONDARY_HP2); + _replica->set_primary_partition_configuration(pc); } void create_local_metadata_file() @@ -773,7 +773,7 @@ TEST_P(replica_bulk_loader_test, report_group_ingestion_status_test) // report_group_context_clean_flag unit tests TEST_P(replica_bulk_loader_test, report_group_cleanup_flag_in_unhealthy_state) { - // _primary_states.membership.secondaries is empty + // _primary_states.pc.secondaries is empty mock_replica_config(partition_status::PS_PRIMARY); ASSERT_FALSE(test_report_group_cleaned_up()); } diff --git a/src/replica/bulk_load/test/run.sh b/src/replica/bulk_load/test/run.sh index 7689036b02..fff86cd0e0 100755 --- a/src/replica/bulk_load/test/run.sh +++ b/src/replica/bulk_load/test/run.sh @@ -45,7 +45,7 @@ fi ./dsn_replica_bulk_load_test if [ $? -ne 0 ]; then - tail -n 100 data/log/log.1.txt + tail -n 100 `find . -name pegasus.log.*` if [ -f core ]; then gdb ./dsn_replica_bulk_load_test core -ex "bt" fi diff --git a/src/replica/disk_cleaner.cpp b/src/replica/disk_cleaner.cpp index 805da23914..8a191d67fc 100644 --- a/src/replica/disk_cleaner.cpp +++ b/src/replica/disk_cleaner.cpp @@ -36,7 +36,7 @@ #include "utils/fmt_logging.h" #include "utils/macros.h" #include "utils/string_conv.h" -#include "absl/strings/string_view.h" +#include DSN_DEFINE_uint64(replication, gc_disk_error_replica_interval_seconds, @@ -148,7 +148,7 @@ bool parse_timestamp_us(const std::string &name, size_t suffix_size, uint64_t &t } const auto ok = - dsn::buf2uint64(absl::string_view(name.data() + begin_idx, length), timestamp_us); + dsn::buf2uint64(std::string_view(name.data() + begin_idx, length), timestamp_us); return ok ? timestamp_us > MIN_TIMESTAMP_US : false; } diff --git a/src/replica/duplication/duplication_pipeline.cpp b/src/replica/duplication/duplication_pipeline.cpp index 54abd83cfb..739b1cc8f6 100644 --- a/src/replica/duplication/duplication_pipeline.cpp +++ b/src/replica/duplication/duplication_pipeline.cpp @@ -17,21 +17,30 @@ #include "duplication_pipeline.h" -#include -#include +#include +#include +#include #include #include +#include #include #include "load_from_private_log.h" #include "replica/duplication/replica_duplicator.h" #include "replica/mutation_log.h" #include "replica/replica.h" -#include "runtime/rpc/rpc_holder.h" #include "utils/autoref_ptr.h" #include "utils/errors.h" +#include "utils/flags.h" #include "utils/fmt_logging.h" +DSN_DEFINE_uint64( + replication, + dup_no_mutation_load_delay_ms, + 100, + "The duration of the delay until the next execution if there is no mutation to be loaded."); +DSN_TAG_VARIABLE(dup_no_mutation_load_delay_ms, FT_MUTABLE); + METRIC_DEFINE_counter(replica, dup_shipped_bytes, dsn::metric_unit::kBytes, @@ -46,7 +55,7 @@ namespace replication { // // /*static*/ std::function( - replica_base *, absl::string_view /*remote cluster*/, absl::string_view /*app*/)> + replica_base *, std::string_view /*remote cluster*/, std::string_view /*app*/)> mutation_duplicator::creator; // // @@ -57,9 +66,13 @@ void load_mutation::run() { decree last_decree = _duplicator->progress().last_decree; _start_decree = last_decree + 1; - if (_replica->private_log()->max_commit_on_disk() < _start_decree) { - // wait 100ms for next try if no mutation was added. - repeat(100_ms); + + // Load the mutations from plog that have been committed recently, if any. + const auto max_plog_committed_decree = + std::min(_replica->private_log()->max_decree_on_disk(), _replica->last_applied_decree()); + if (_start_decree > max_plog_committed_decree) { + // Wait for a while if no mutation was added. + repeat(std::chrono::milliseconds(FLAGS_dup_no_mutation_load_delay_ms)); return; } diff --git a/src/replica/duplication/duplication_sync_timer.cpp b/src/replica/duplication/duplication_sync_timer.cpp index 3d0df01aa4..90d9e1fe45 100644 --- a/src/replica/duplication/duplication_sync_timer.cpp +++ b/src/replica/duplication/duplication_sync_timer.cpp @@ -28,10 +28,10 @@ #include "replica/replica.h" #include "replica/replica_stub.h" #include "replica_duplicator_manager.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task_code.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "task/async_calls.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/chrono_literals.h" #include "utils/error_code.h" @@ -176,12 +176,13 @@ void duplication_sync_timer::start() { LOG_INFO("run duplication sync periodically in {}s", FLAGS_duplication_sync_period_second); - _timer_task = tasking::enqueue_timer(LPC_DUPLICATION_SYNC_TIMER, - &_stub->_tracker, - [this]() { run(); }, - FLAGS_duplication_sync_period_second * 1_s, - 0, - FLAGS_duplication_sync_period_second * 1_s); + _timer_task = tasking::enqueue_timer( + LPC_DUPLICATION_SYNC_TIMER, + &_stub->_tracker, + [this]() { run(); }, + FLAGS_duplication_sync_period_second * 1_s, + 0, + FLAGS_duplication_sync_period_second * 1_s); } std::multimap diff --git a/src/replica/duplication/duplication_sync_timer.h b/src/replica/duplication/duplication_sync_timer.h index 1a57a7631b..089282d5f5 100644 --- a/src/replica/duplication/duplication_sync_timer.h +++ b/src/replica/duplication/duplication_sync_timer.h @@ -24,7 +24,7 @@ #include "common/gpid.h" #include "common/replication_other_types.h" #include "duplication_types.h" -#include "runtime/task/task.h" +#include "task/task.h" #include "utils/zlocks.h" namespace dsn { diff --git a/src/replica/duplication/load_from_private_log.cpp b/src/replica/duplication/load_from_private_log.cpp index 6b0af82251..b0e501409f 100644 --- a/src/replica/duplication/load_from_private_log.cpp +++ b/src/replica/duplication/load_from_private_log.cpp @@ -20,7 +20,7 @@ #include #include -#include "absl/strings/string_view.h" +#include #include "common/duplication_common.h" #include "duplication_types.h" #include "load_from_private_log.h" @@ -119,7 +119,7 @@ void load_from_private_log::run() repeat(1_s); FAIL_POINT_INJECT_NOT_RETURN_F( - "duplication_sync_complete", [&](absl::string_view s) -> void { + "duplication_sync_complete", [&](std::string_view s) -> void { if (_duplicator->progress().confirmed_decree == invalid_decree) { // set_confirmed_decree(9), the value must be equal (decree_start of // `test_start_duplication` in `load_from_private_log_test.cpp`) -1 diff --git a/src/replica/duplication/mutation_batch.cpp b/src/replica/duplication/mutation_batch.cpp index e6d91d5f29..e6529ea536 100644 --- a/src/replica/duplication/mutation_batch.cpp +++ b/src/replica/duplication/mutation_batch.cpp @@ -17,22 +17,24 @@ #include #include +#include #include #include #include -#include "absl/strings/string_view.h" #include "common/replication.codes.h" #include "consensus_types.h" #include "metadata_types.h" #include "mutation_batch.h" +#include "replica/replica.h" #include "replica_duplicator.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" +#include "utils/ports.h" METRIC_DEFINE_gauge_int64(replica, dup_recent_lost_mutations, @@ -55,8 +57,10 @@ mutation_buffer::mutation_buffer(replica_base *r, void mutation_buffer::commit(decree d, commit_type ct) { - if (d <= last_committed_decree()) + if (d <= last_committed_decree()) { + // Ignore the decrees that have been committed. return; + } CHECK_EQ_PREFIX(ct, COMMIT_TO_DECREE_HARD); @@ -85,8 +89,8 @@ void mutation_buffer::commit(decree d, commit_type ct) min_decree(), max_decree()); METRIC_VAR_SET(dup_recent_lost_mutations, min_decree() - last_committed_decree()); - // if next_commit_mutation loss, let last_commit_decree catch up with min_decree, and - // the next loop will commit from min_decree + // If next_commit_mutation loss, let last_commit_decree catch up with min_decree, and + // the next loop will commit from min_decree. _last_committed_decree = min_decree() - 1; return; } @@ -101,13 +105,13 @@ void mutation_buffer::commit(decree d, commit_type ct) error_s mutation_batch::add(mutation_ptr mu) { if (mu->get_decree() <= _mutation_buffer->last_committed_decree()) { - // ignore + // Ignore the mutations that have been committed. return error_s::ok(); } auto old = _mutation_buffer->get_mutation_by_decree(mu->get_decree()); if (old != nullptr && old->data.header.ballot >= mu->data.header.ballot) { - // ignore + // The mutation with duplicate decree would be ignored. return error_s::ok(); } @@ -123,6 +127,16 @@ error_s mutation_batch::add(mutation_ptr mu) _start_decree); } + if (mu->get_decree() <= _replica->last_applied_decree()) { + // Once this mutation has been applied into rocksdb memtable, commit it for duplication; + // otherwise, this mutation would be delayed at least several minutes to be duplicated to + // the remote cluster. It would not be duplicated until some new mutations (such as empty + // writes) enter, since the last decree that is committed for this replica is NOT + // mu->data.header.decree but rather mu->data.header.last_committed_decree. See also + // `mutation_header` in src/common/consensus.thrift. + _mutation_buffer->commit(mu->get_decree(), COMMIT_TO_DECREE_HARD); + } + return error_s::ok(); } @@ -140,7 +154,7 @@ mutation_tuple_set mutation_batch::move_all_mutations() return std::move(_loaded_mutations); } -mutation_batch::mutation_batch(replica_duplicator *r) : replica_base(r) +mutation_batch::mutation_batch(replica_duplicator *r) : replica_base(r), _replica(r->_replica) { // Prepend a special tag identifying this is a mutation_batch, // so `dxxx_replica` logging in prepare_list will print along with its real caller. @@ -149,25 +163,30 @@ mutation_batch::mutation_batch(replica_duplicator *r) : replica_base(r) r->get_gpid(), std::string("mutation_batch@") + r->replica_name(), r->app_name()); _mutation_buffer = std::make_unique( &base, 0, PREPARE_LIST_NUM_ENTRIES, [this](mutation_ptr &mu) { - // committer + // The committer for the prepare list, used for + // duplicating to add the committed mutations to the + // loading list, which would be shipped to the remote + // cluster later. add_mutation_if_valid(mu, _start_decree); }); - // start duplication from confirmed_decree + // Start duplication from the confirmed decree that has been persisted in the meta server. _mutation_buffer->reset(r->progress().confirmed_decree); } void mutation_batch::add_mutation_if_valid(mutation_ptr &mu, decree start_decree) { if (mu->get_decree() < start_decree) { - // ignore + // Ignore the mutations before start_decree. return; } + for (mutation_update &update : mu->data.updates) { - // ignore WRITE_EMPTY if (update.code == RPC_REPLICATION_WRITE_EMPTY) { + // Ignore empty writes. continue; } + // Ignore non-idempotent writes. // Normally a duplicating replica will reply non-idempotent writes with // ERR_OPERATION_DISABLED, but there could still be a mutation written @@ -176,11 +195,21 @@ void mutation_batch::add_mutation_if_valid(mutation_ptr &mu, decree start_decree if (!task_spec::get(update.code)->rpc_request_is_write_idempotent) { continue; } + blob bb; - if (update.data.buffer() != nullptr) { + if (update.data.buffer()) { + // ATTENTION: instead of copy, move could optimize the performance. However, this + // would nullify the elements of mu->data.updates. bb = std::move(update.data); } else { - bb = blob::create_from_bytes(update.data.data(), update.data.length()); + // TODO(wangdan): if update.data.buffer() is nullptr, the blob object must have + // been used as `string_view`. + // + // Once `string_view` function is removed from blob, consider dropping following + // statements. + if (dsn_likely(update.data.data() != nullptr && !update.data.empty())) { + bb = blob::create_from_bytes(update.data.data(), update.data.length()); + } } _total_bytes += bb.length(); diff --git a/src/replica/duplication/mutation_batch.h b/src/replica/duplication/mutation_batch.h index 97795cea26..0cca5169e9 100644 --- a/src/replica/duplication/mutation_batch.h +++ b/src/replica/duplication/mutation_batch.h @@ -31,7 +31,7 @@ namespace dsn { namespace replication { - +class replica; class replica_duplicator; class mutation_buffer : public prepare_list @@ -57,15 +57,19 @@ class mutation_batch : replica_base explicit mutation_batch(replica_duplicator *r); + // Add mutations to prepare list. Only those who have been committed would be + // duplicated to the remote cluster. error_s add(mutation_ptr mu); + // Add the committed mutation to the loading list, which would be shipped to + // the remote cluster later. void add_mutation_if_valid(mutation_ptr &, decree start_decree); mutation_tuple_set move_all_mutations(); decree last_decree() const; - // mutations with decree < d will be ignored. + // Mutations with decree < d will be ignored. void set_start_decree(decree d); void reset_mutation_buffer(decree d); @@ -78,6 +82,8 @@ class mutation_batch : replica_base friend class replica_duplicator_test; friend class mutation_batch_test; + replica *_replica; + std::unique_ptr _mutation_buffer; mutation_tuple_set _loaded_mutations; decree _start_decree{invalid_decree}; diff --git a/src/replica/duplication/mutation_duplicator.h b/src/replica/duplication/mutation_duplicator.h index b1c29e00fa..cb6b3c65e2 100644 --- a/src/replica/duplication/mutation_duplicator.h +++ b/src/replica/duplication/mutation_duplicator.h @@ -68,7 +68,7 @@ class mutation_duplicator : public replica_base // Singleton creator of mutation_duplicator. static std::function( - replica_base *, absl::string_view /*remote cluster*/, absl::string_view /*app name*/)> + replica_base *, std::string_view /*remote cluster*/, std::string_view /*app name*/)> creator; explicit mutation_duplicator(replica_base *r) : replica_base(r) {} @@ -84,7 +84,7 @@ class mutation_duplicator : public replica_base }; inline std::unique_ptr new_mutation_duplicator( - replica_base *r, absl::string_view remote_cluster_address, absl::string_view app) + replica_base *r, std::string_view remote_cluster_address, std::string_view app) { return mutation_duplicator::creator(r, remote_cluster_address, app); } diff --git a/src/replica/duplication/replica_duplicator.cpp b/src/replica/duplication/replica_duplicator.cpp index 8102096518..d45626e52f 100644 --- a/src/replica/duplication/replica_duplicator.cpp +++ b/src/replica/duplication/replica_duplicator.cpp @@ -17,7 +17,6 @@ #include "replica_duplicator.h" -#include #include #include #include @@ -25,6 +24,7 @@ #include #include #include +#include #include #include "common/duplication_common.h" @@ -35,7 +35,6 @@ #include "load_from_private_log.h" #include "replica/mutation_log.h" #include "replica/replica.h" -#include "runtime/task/async_calls.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" @@ -60,21 +59,66 @@ replica_duplicator::replica_duplicator(const duplication_entry &ent, replica *r) _stub(r->get_replica_stub()), METRIC_VAR_INIT_replica(dup_confirmed_mutations) { + // Ensure that the checkpoint decree is at least 1. Otherwise, the checkpoint could not be + // created in time for empty replica; in consequence, the remote cluster would inevitably + // fail to pull the checkpoint files. + // + // The max decree in rocksdb memtable (the last applied decree) is considered as the min + // decree that should be covered by the checkpoint, which means currently all of the data + // in current rocksdb should be included into the created checkpoint. + // + // `_min_checkpoint_decree` is not persisted into zk. Once replica server was restarted, + // it would be reset to the decree that is applied most recently. + const auto last_applied_decree = _replica->last_applied_decree(); + _min_checkpoint_decree = std::max(last_applied_decree, static_cast(1)); + LOG_INFO_PREFIX("initialize checkpoint decree: min_checkpoint_decree={}, " + "last_committed_decree={}, last_applied_decree={}, " + "last_flushed_decree={}, last_durable_decree={}, " + "plog_max_decree_on_disk={}, plog_max_commit_on_disk={}", + _min_checkpoint_decree, + _replica->last_committed_decree(), + last_applied_decree, + _replica->last_flushed_decree(), + _replica->last_durable_decree(), + _replica->private_log()->max_decree_on_disk(), + _replica->private_log()->max_commit_on_disk()); + _status = ent.status; - auto it = ent.progress.find(get_gpid().get_partition_index()); + const auto it = ent.progress.find(get_gpid().get_partition_index()); + CHECK_PREFIX_MSG(it != ent.progress.end(), + "partition({}) not found in duplication progress: " + "app_name={}, dup_id={}, remote_cluster_name={}, remote_app_name={}", + get_gpid(), + r->get_app_info()->app_name, + id(), + _remote_cluster_name, + _remote_app_name); + + // Initial progress would be `invalid_decree` which was synced from meta server + // immediately after the duplication was created. + // See `init_progress()` in `meta_duplication_service::new_dup_from_init()`. + // + // _progress.last_decree would be used to update the state in meta server. + // See `replica_duplicator_manager::get_duplication_confirms_to_update()`. if (it->second == invalid_decree) { - // keep current max committed_decree as start point. - // todo(jiashuo1) _start_point_decree hasn't be ready to persist zk, so if master restart, - // the value will be reset 0 - _start_point_decree = _progress.last_decree = _replica->private_log()->max_commit_on_disk(); + _progress.last_decree = _min_checkpoint_decree; } else { _progress.last_decree = _progress.confirmed_decree = it->second; } - LOG_INFO_PREFIX("initialize replica_duplicator[{}] [dupid:{}, meta_confirmed_decree:{}]", - duplication_status_to_string(_status), + + LOG_INFO_PREFIX("initialize replica_duplicator: app_name={}, dup_id={}, " + "remote_cluster_name={}, remote_app_name={}, status={}, " + "replica_confirmed_decree={}, meta_persisted_decree={}/{}", + r->get_app_info()->app_name, id(), - it->second); + _remote_cluster_name, + _remote_app_name, + duplication_status_to_string(_status), + _progress.last_decree, + it->second, + _progress.confirmed_decree); + thread_pool(LPC_REPLICATION_LOW).task_tracker(tracker()).thread_hash(get_gpid().thread_hash()); if (_status == duplication_status::DS_PREPARE) { @@ -86,22 +130,25 @@ replica_duplicator::replica_duplicator(const duplication_entry &ent, replica *r) void replica_duplicator::prepare_dup() { - LOG_INFO_PREFIX("start prepare checkpoint to catch up with latest durable decree: " - "start_point_decree({}) < last_durable_decree({}) = {}", - _start_point_decree, + LOG_INFO_PREFIX("start to trigger checkpoint: min_checkpoint_decree={}, " + "last_committed_decree={}, last_applied_decree={}, " + "last_flushed_decree={}, last_durable_decree={}, " + "plog_max_decree_on_disk={}, plog_max_commit_on_disk={}", + _min_checkpoint_decree, + _replica->last_committed_decree(), + _replica->last_applied_decree(), + _replica->last_flushed_decree(), _replica->last_durable_decree(), - _start_point_decree < _replica->last_durable_decree()); + _replica->private_log()->max_decree_on_disk(), + _replica->private_log()->max_commit_on_disk()); - tasking::enqueue( - LPC_REPLICATION_COMMON, - &_tracker, - [this]() { _replica->trigger_manual_emergency_checkpoint(_start_point_decree); }, - get_gpid().thread_hash()); + _replica->async_trigger_manual_emergency_checkpoint(_min_checkpoint_decree, 0); } void replica_duplicator::start_dup_log() { - LOG_INFO_PREFIX("starting duplication {} [last_decree: {}, confirmed_decree: {}]", + LOG_INFO_PREFIX("starting duplication: {}, replica_confirmed_decree={}, " + "meta_persisted_decree={}", to_string(), _progress.last_decree, _progress.confirmed_decree); @@ -162,19 +209,19 @@ void replica_duplicator::update_status_if_needed(duplication_status::type next_s return; } - // DS_PREPARE means replica is checkpointing, it may need trigger multi time to catch - // _start_point_decree of the plog + // DS_PREPARE means this replica is making checkpoint, which might need to be triggered + // multiple times to catch up with _min_checkpoint_decree. if (_status == next_status && next_status != duplication_status::DS_PREPARE) { return; } - LOG_INFO_PREFIX( - "update duplication status: {}=>{}[start_point={}, last_commit={}, last_durable={}]", - duplication_status_to_string(_status), - duplication_status_to_string(next_status), - _start_point_decree, - _replica->last_committed_decree(), - _replica->last_durable_decree()); + LOG_INFO_PREFIX("update duplication status: {}=>{} [min_checkpoint_decree={}, " + "last_committed_decree={}, last_durable_decree={}]", + duplication_status_to_string(_status), + duplication_status_to_string(next_status), + _min_checkpoint_decree, + _replica->last_committed_decree(), + _replica->last_durable_decree()); _status = next_status; if (_status == duplication_status::DS_PREPARE) { @@ -220,7 +267,7 @@ error_s replica_duplicator::update_progress(const duplication_progress &p) decree last_confirmed_decree = _progress.confirmed_decree; _progress.confirmed_decree = std::max(_progress.confirmed_decree, p.confirmed_decree); _progress.last_decree = std::max(_progress.last_decree, p.last_decree); - _progress.checkpoint_has_prepared = _start_point_decree <= _replica->last_durable_decree(); + _progress.checkpoint_has_prepared = _min_checkpoint_decree <= _replica->last_durable_decree(); if (_progress.confirmed_decree > _progress.last_decree) { return FMT_ERR(ERR_INVALID_STATE, @@ -239,17 +286,16 @@ error_s replica_duplicator::update_progress(const duplication_progress &p) void replica_duplicator::verify_start_decree(decree start_decree) { - decree confirmed_decree = progress().confirmed_decree; - decree last_decree = progress().last_decree; - decree max_gced_decree = get_max_gced_decree(); - CHECK_LT_MSG(max_gced_decree, - start_decree, - "the logs haven't yet duplicated were accidentally truncated " - "[max_gced_decree: {}, start_decree: {}, confirmed_decree: {}, last_decree: {}]", - max_gced_decree, - start_decree, - confirmed_decree, - last_decree); + const auto max_gced_decree = get_max_gced_decree(); + CHECK_LT_PREFIX_MSG( + max_gced_decree, + start_decree, + "the logs haven't yet duplicated were accidentally truncated [max_gced_decree: {}, " + "start_decree: {}, replica_confirmed_decree: {}, meta_persisted_decree: {}]", + max_gced_decree, + start_decree, + progress().last_decree, + progress().confirmed_decree); } decree replica_duplicator::get_max_gced_decree() const diff --git a/src/replica/duplication/replica_duplicator.h b/src/replica/duplication/replica_duplicator.h index ebf4473b99..165226eb46 100644 --- a/src/replica/duplication/replica_duplicator.h +++ b/src/replica/duplication/replica_duplicator.h @@ -23,11 +23,12 @@ #include #include "common//duplication_common.h" +#include "common/json_helper.h" #include "common/replication_other_types.h" #include "duplication_types.h" #include "replica/replica_base.h" #include "runtime/pipeline.h" -#include "runtime/task/task_tracker.h" +#include "task/task_tracker.h" #include "utils/errors.h" #include "utils/metrics.h" #include "utils/zlocks.h" @@ -38,12 +39,13 @@ namespace replication { class duplication_progress { public: - // check if checkpoint has catch up with `_start_point_decree` + // Check if checkpoint has covered `_min_checkpoint_decree`. bool checkpoint_has_prepared{false}; - // the maximum decree that's been persisted in meta server + + // The max decree that has been persisted in the meta server. decree confirmed_decree{invalid_decree}; - // the maximum decree that's been duplicated to remote. + // The max decree that has been duplicated to the remote cluster. decree last_decree{invalid_decree}; duplication_progress &set_last_decree(decree d) @@ -143,6 +145,25 @@ class replica_duplicator : public replica_base, public pipeline::base void set_duplication_plog_checking(bool checking); + // Encode current progress of this duplication into json. + template + void encode_progress(TWriter &writer) const + { + writer.StartObject(); + + JSON_ENCODE_OBJ(writer, dupid, _id); + JSON_ENCODE_OBJ(writer, remote_cluster_name, _remote_cluster_name); + JSON_ENCODE_OBJ(writer, remote_app_name, _remote_app_name); + + { + zauto_read_lock l(_lock); + JSON_ENCODE_OBJ(writer, replica_confirmed_decree, _progress.last_decree); + JSON_ENCODE_OBJ(writer, meta_persisted_decree, _progress.confirmed_decree); + } + + writer.EndObject(); + } + private: friend class duplication_test_base; friend class replica_duplicator_test; @@ -150,6 +171,7 @@ class replica_duplicator : public replica_base, public pipeline::base friend class load_from_private_log_test; friend class ship_mutation_test; + friend class mutation_batch; friend class load_mutation; friend class ship_mutation; @@ -163,7 +185,10 @@ class replica_duplicator : public replica_base, public pipeline::base replica_stub *_stub; dsn::task_tracker _tracker; - decree _start_point_decree = invalid_decree; + // The min decree that should be covered by the checkpoint which is triggered by the + // newly added duplication. + decree _min_checkpoint_decree{invalid_decree}; + duplication_status::type _status{duplication_status::DS_INIT}; std::atomic _fail_mode{duplication_fail_mode::FAIL_SLOW}; diff --git a/src/replica/duplication/replica_duplicator_manager.cpp b/src/replica/duplication/replica_duplicator_manager.cpp index d60bf57b20..2e1e61cc4d 100644 --- a/src/replica/duplication/replica_duplicator_manager.cpp +++ b/src/replica/duplication/replica_duplicator_manager.cpp @@ -15,14 +15,18 @@ // specific language governing permissions and limitations // under the License. -#include +#include #include #include #include #include "common//duplication_common.h" #include "common/gpid.h" +#include "common/replication_enums.h" +#include "metadata_types.h" #include "replica/duplication/replica_duplicator.h" +#include "replica/duplication/replica_duplicator_manager.h" +#include "replica/replica.h" #include "replica_duplicator_manager.h" #include "utils/autoref_ptr.h" #include "utils/errors.h" @@ -41,29 +45,56 @@ replica_duplicator_manager::replica_duplicator_manager(replica *r) { } +void replica_duplicator_manager::update_duplication_map( + const std::map &new_dup_map) +{ + if (new_dup_map.empty() || _replica->status() != partition_status::PS_PRIMARY) { + remove_all_duplications(); + return; + } + + remove_non_existed_duplications(new_dup_map); + + for (const auto &kv2 : new_dup_map) { + sync_duplication(kv2.second); + } +} + std::vector replica_duplicator_manager::get_duplication_confirms_to_update() const { zauto_lock l(_lock); std::vector updates; - for (const auto &kv : _duplications) { - replica_duplicator *duplicator = kv.second.get(); - duplication_progress p = duplicator->progress(); - if (p.last_decree != p.confirmed_decree || - (kv.second->status() == duplication_status::DS_PREPARE && p.checkpoint_has_prepared)) { - if (p.last_decree < p.confirmed_decree) { - LOG_ERROR_PREFIX("invalid decree state: p.last_decree({}) < p.confirmed_decree({})", - p.last_decree, - p.confirmed_decree); - continue; - } - duplication_confirm_entry entry; - entry.dupid = duplicator->id(); - entry.confirmed_decree = p.last_decree; - entry.__set_checkpoint_prepared(p.checkpoint_has_prepared); - updates.emplace_back(entry); + for (const auto &[_, dup] : _duplications) { + // There are two conditions when we should send confirmed decrees to meta server to update + // the progress: + // + // 1. the acknowledged decree from remote cluster has changed, making it different from + // the one that is persisted in zk by meta server; otherwise, + // + // 2. the duplication has been in the stage of synchronizing checkpoint to the remote + // cluster, and the synchronized checkpoint has been ready. + const auto &progress = dup->progress(); + if (progress.last_decree == progress.confirmed_decree && + (dup->status() != duplication_status::DS_PREPARE || + !progress.checkpoint_has_prepared)) { + continue; } + + if (progress.last_decree < progress.confirmed_decree) { + LOG_ERROR_PREFIX( + "invalid decree state: progress.last_decree({}) < progress.confirmed_decree({})", + progress.last_decree, + progress.confirmed_decree); + continue; + } + + duplication_confirm_entry entry; + entry.dupid = dup->id(); + entry.confirmed_decree = progress.last_decree; + entry.__set_checkpoint_prepared(progress.checkpoint_has_prepared); + updates.emplace_back(entry); } return updates; } @@ -191,5 +222,17 @@ replica_duplicator_manager::get_dup_states() const return ret; } +void replica_duplicator_manager::remove_all_duplications() +{ + // fast path + if (_duplications.empty()) { + return; + } + + LOG_WARNING_PREFIX("remove all duplication, replica status = {}", + enum_to_string(_replica->status())); + _duplications.clear(); +} + } // namespace replication } // namespace dsn diff --git a/src/replica/duplication/replica_duplicator_manager.h b/src/replica/duplication/replica_duplicator_manager.h index 51bcbd1e1d..f8f95d3822 100644 --- a/src/replica/duplication/replica_duplicator_manager.h +++ b/src/replica/duplication/replica_duplicator_manager.h @@ -24,19 +24,16 @@ #include #include "common//duplication_common.h" -#include "common/replication_enums.h" #include "common/replication_other_types.h" #include "duplication_types.h" -#include "metadata_types.h" -#include "replica/replica.h" #include "replica/replica_base.h" #include "replica_duplicator.h" -#include "utils/fmt_logging.h" #include "utils/metrics.h" #include "utils/zlocks.h" namespace dsn { namespace replication { +class replica; /// replica_duplicator_manager manages the set of duplications on this replica. /// \see duplication_sync_timer @@ -51,19 +48,7 @@ class replica_duplicator_manager : public replica_base // - replica is not primary on replica-server perspective (status != PRIMARY) // - replica is not primary on meta-server perspective (progress.find(partition_id) == end()) // - the app is not assigned with duplication (dup_map.empty()) - void update_duplication_map(const std::map &new_dup_map) - { - if (new_dup_map.empty() || _replica->status() != partition_status::PS_PRIMARY) { - remove_all_duplications(); - return; - } - - remove_non_existed_duplications(new_dup_map); - - for (const auto &kv2 : new_dup_map) { - sync_duplication(kv2.second); - } - } + void update_duplication_map(const std::map &new_dup_map); /// collect updated duplication confirm points from this replica. std::vector get_duplication_confirms_to_update() const; @@ -93,21 +78,30 @@ class replica_duplicator_manager : public replica_base }; std::vector get_dup_states() const; + // Encode current progress of all duplication into json. + template + void encode_progress(TWriter &writer) const + { + zauto_lock l(_lock); + + if (_duplications.empty()) { + return; + } + + writer.Key("duplications"); + writer.StartArray(); + for (const auto &[_, dup] : _duplications) { + dup->encode_progress(writer); + } + writer.EndArray(); + } + private: void sync_duplication(const duplication_entry &ent); void remove_non_existed_duplications(const std::map &); - void remove_all_duplications() - { - // fast path - if (_duplications.empty()) - return; - - LOG_WARNING_PREFIX("remove all duplication, replica status = {}", - enum_to_string(_replica->status())); - _duplications.clear(); - } + void remove_all_duplications(); private: friend class duplication_sync_timer_test; diff --git a/src/replica/duplication/replica_follower.cpp b/src/replica/duplication/replica_follower.cpp index 6678017c7c..057358f59d 100644 --- a/src/replica/duplication/replica_follower.cpp +++ b/src/replica/duplication/replica_follower.cpp @@ -1,21 +1,21 @@ /* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, -* software distributed under the License is distributed on an -* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -* KIND, either express or implied. See the License for the -* specific language governing permissions and limitations -* under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ #include "replica_follower.h" @@ -23,6 +23,7 @@ #include #include #include +#include #include #include "common/duplication_common.h" @@ -32,17 +33,16 @@ #include "nfs/nfs_node.h" #include "replica/replica.h" #include "replica/replica_stub.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/group_host_port.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" +#include "rpc/dns_resolver.h" +#include "rpc/group_host_port.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" +#include "task/async_calls.h" #include "utils/fail_point.h" #include "utils/filesystem.h" #include "utils/fmt_logging.h" #include "utils/ports.h" -#include "absl/strings/string_view.h" #include "utils/strings.h" namespace dsn { @@ -132,13 +132,13 @@ void replica_follower::async_duplicate_checkpoint_from_master_replica() msg, &_tracker, [&](error_code err, query_cfg_response &&resp) mutable { - FAIL_POINT_INJECT_F("duplicate_checkpoint_ok", [&](absl::string_view s) -> void { + FAIL_POINT_INJECT_F("duplicate_checkpoint_ok", [&](std::string_view s) -> void { _tracker.set_tasks_success(); return; }); FAIL_POINT_INJECT_F("duplicate_checkpoint_failed", - [&](absl::string_view s) -> void { return; }); + [&](std::string_view s) -> void { return; }); if (update_master_replica_config(err, std::move(resp)) == ERR_OK) { copy_master_replica_checkpoint(); } @@ -184,12 +184,12 @@ error_code replica_follower::update_master_replica_config(error_code err, query_ } // since the request just specify one partition, the result size is single - _master_replica_config = resp.partitions[0]; + _pc = resp.partitions[0]; LOG_INFO_PREFIX( "query master[{}] config successfully and update local config: remote={}, gpid={}", master_replica_name(), - FMT_HOST_PORT_AND_IP(_master_replica_config, primary), - _master_replica_config.pid); + FMT_HOST_PORT_AND_IP(_pc, primary), + _pc.pid); return ERR_OK; } @@ -199,16 +199,13 @@ void replica_follower::copy_master_replica_checkpoint() LOG_INFO_PREFIX("query master[{}] replica checkpoint info and start use nfs copy the data", master_replica_name()); learn_request request; - request.pid = _master_replica_config.pid; - dsn::message_ex *msg = dsn::message_ex::create_request( - RPC_QUERY_LAST_CHECKPOINT_INFO, 0, _master_replica_config.pid.thread_hash()); + request.pid = _pc.pid; + dsn::message_ex *msg = + dsn::message_ex::create_request(RPC_QUERY_LAST_CHECKPOINT_INFO, 0, _pc.pid.thread_hash()); dsn::marshall(msg, request); - rpc::call(_master_replica_config.primary, - msg, - &_tracker, - [&](error_code err, learn_response &&resp) mutable { - nfs_copy_checkpoint(err, std::move(resp)); - }); + rpc::call(_pc.primary, msg, &_tracker, [&](error_code err, learn_response &&resp) mutable { + nfs_copy_checkpoint(err, std::move(resp)); + }); } // ThreadPool: THREAD_POOL_DEFAULT @@ -263,7 +260,7 @@ void replica_follower::nfs_copy_remote_files(const host_port &remote_node, &_tracker, [&, remote_dir](error_code err, size_t size) mutable { FAIL_POINT_INJECT_NOT_RETURN_F("nfs_copy_ok", - [&](absl::string_view s) -> void { err = ERR_OK; }); + [&](std::string_view s) -> void { err = ERR_OK; }); if (dsn_unlikely(err != ERR_OK)) { LOG_ERROR_PREFIX("nfs copy master[{}] checkpoint failed: checkpoint = {}, err = {}", diff --git a/src/replica/duplication/replica_follower.h b/src/replica/duplication/replica_follower.h index d6711c4b0c..c502790fb1 100644 --- a/src/replica/duplication/replica_follower.h +++ b/src/replica/duplication/replica_follower.h @@ -1,21 +1,21 @@ /* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, -* software distributed under the License is distributed on an -* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -* KIND, either express or implied. See the License for the -* specific language governing permissions and limitations -* under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ #pragma once @@ -26,8 +26,8 @@ #include "common/gpid.h" #include "dsn.layer2_types.h" #include "replica/replica_base.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task_tracker.h" +#include "rpc/rpc_host_port.h" +#include "task/task_tracker.h" #include "utils/error_code.h" #include "utils/zlocks.h" @@ -60,7 +60,7 @@ class replica_follower : replica_base std::string _master_cluster_name; std::string _master_app_name; std::vector _master_meta_list; - partition_configuration _master_replica_config; + partition_configuration _pc; bool need_duplicate{false}; @@ -78,11 +78,8 @@ class replica_follower : replica_base std::string master_replica_name() { std::string app_info = fmt::format("{}.{}", _master_cluster_name, _master_app_name); - if (_master_replica_config.hp_primary) { - return fmt::format("{}({}|{})", - app_info, - FMT_HOST_PORT_AND_IP(_master_replica_config, primary), - _master_replica_config.pid); + if (_pc.hp_primary) { + return fmt::format("{}({}|{})", app_info, FMT_HOST_PORT_AND_IP(_pc, primary), _pc.pid); } return app_info; } diff --git a/src/replica/duplication/test/dup_replica_http_service_test.cpp b/src/replica/duplication/test/dup_replica_http_service_test.cpp index 43a0a35153..5fbffbd49b 100644 --- a/src/replica/duplication/test/dup_replica_http_service_test.cpp +++ b/src/replica/duplication/test/dup_replica_http_service_test.cpp @@ -42,7 +42,8 @@ INSTANTIATE_TEST_SUITE_P(, dup_replica_http_service_test, ::testing::Values(fals TEST_P(dup_replica_http_service_test, query_duplication_handler) { - auto pri = stub->add_primary_replica(1, 1); + auto *pri = stub->add_primary_replica(1, 1); + pri->init_private_log(pri->dir()); // primary confirmed_decree duplication_entry ent; diff --git a/src/replica/duplication/test/duplication_sync_timer_test.cpp b/src/replica/duplication/test/duplication_sync_timer_test.cpp index 0f7855ed72..efa2581d44 100644 --- a/src/replica/duplication/test/duplication_sync_timer_test.cpp +++ b/src/replica/duplication/test/duplication_sync_timer_test.cpp @@ -31,9 +31,9 @@ #include "replica/duplication/replica_duplicator.h" #include "replica/duplication/replica_duplicator_manager.h" #include "replica/test/mock_utils.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" #include "utils/error_code.h" namespace dsn { @@ -52,7 +52,8 @@ class duplication_sync_timer_test : public duplication_test_base static const std::string kTestRemoteAppName = "temp"; // replica: {app_id:2, partition_id:1, duplications:{}} - stub->add_primary_replica(2, 1); + auto *rep = stub->add_primary_replica(2, 1); + rep->init_private_log(rep->dir()); ASSERT_NE(stub->find_replica(2, 1), nullptr); // appid:2 -> dupid:1 @@ -82,15 +83,16 @@ class duplication_sync_timer_test : public duplication_test_base { int total_app_num = 4; for (int appid = 1; appid <= total_app_num; appid++) { - auto r = stub->add_non_primary_replica(appid, 1); + auto *rep = stub->add_non_primary_replica(appid, 1); + rep->init_private_log(rep->dir()); // trigger duplication sync on partition 1 duplication_entry ent; ent.dupid = 1; - ent.progress[r->get_gpid().get_partition_index()] = 1000; + ent.progress[rep->get_gpid().get_partition_index()] = 1000; ent.status = duplication_status::DS_PAUSE; - auto dup = std::make_unique(ent, r); - add_dup(r, std::move(dup)); + auto dup = std::make_unique(ent, rep); + add_dup(rep, std::move(dup)); } RPC_MOCKING(duplication_sync_rpc) @@ -164,7 +166,8 @@ class duplication_sync_timer_test : public duplication_test_base std::map> dup_map; for (int32_t appid = 1; appid <= 10; appid++) { for (int partition_id = 0; partition_id < 3; partition_id++) { - stub->add_primary_replica(appid, partition_id); + auto *rep = stub->add_primary_replica(appid, partition_id); + rep->init_private_log(rep->dir()); } } @@ -254,19 +257,20 @@ class duplication_sync_timer_test : public duplication_test_base void test_update_confirmed_points() { for (int32_t appid = 1; appid <= 10; appid++) { - stub->add_primary_replica(appid, 1); + auto *rep = stub->add_primary_replica(appid, 1); + rep->init_private_log(rep->dir()); } for (int appid = 1; appid <= 3; appid++) { - auto r = stub->find_replica(appid, 1); + auto *rep = stub->find_replica(appid, 1); duplication_entry ent; ent.dupid = 1; ent.status = duplication_status::DS_PAUSE; - ent.progress[r->get_gpid().get_partition_index()] = 0; - auto dup = std::make_unique(ent, r); + ent.progress[rep->get_gpid().get_partition_index()] = 0; + auto dup = std::make_unique(ent, rep); dup->update_progress(dup->progress().set_last_decree(3).set_confirmed_decree(1)); - add_dup(r, std::move(dup)); + add_dup(rep, std::move(dup)); } duplication_entry ent; @@ -280,8 +284,8 @@ class duplication_sync_timer_test : public duplication_test_base dup_sync->on_duplication_sync_reply(ERR_OK, resp); for (int appid = 1; appid <= 3; appid++) { - auto r = stub->find_replica(appid, 1); - auto dup = find_dup(r, 1); + auto *rep = stub->find_replica(appid, 1); + auto *dup = find_dup(rep, 1); ASSERT_EQ(3, dup->progress().confirmed_decree); } @@ -294,7 +298,8 @@ class duplication_sync_timer_test : public duplication_test_base // 10 primaries int appid = 1; for (int partition_id = 0; partition_id < 10; partition_id++) { - stub->add_primary_replica(appid, partition_id); + auto *r = stub->add_primary_replica(appid, partition_id); + r->init_private_log(r->dir()); } duplication_entry ent; @@ -353,7 +358,8 @@ class duplication_sync_timer_test : public duplication_test_base // there must be some internal problems. void test_receive_illegal_duplication_status() { - stub->add_primary_replica(1, 0); + auto *rep = stub->add_primary_replica(1, 0); + rep->init_private_log(rep->dir()); duplication_entry ent; ent.dupid = 2; diff --git a/src/replica/duplication/test/duplication_test_base.h b/src/replica/duplication/test/duplication_test_base.h index eb914f38e0..49101341ed 100644 --- a/src/replica/duplication/test/duplication_test_base.h +++ b/src/replica/duplication/test/duplication_test_base.h @@ -34,7 +34,7 @@ class duplication_test_base : public replica_test_base public: duplication_test_base() { - mutation_duplicator::creator = [](replica_base *r, absl::string_view, absl::string_view) { + mutation_duplicator::creator = [](replica_base *r, std::string_view, std::string_view) { return std::make_unique(r); }; stub->_duplication_sync_timer = std::make_unique(stub.get()); @@ -54,17 +54,16 @@ class duplication_test_base : public replica_test_base return dup_entities[dupid].get(); } - std::unique_ptr create_test_duplicator(decree confirmed = invalid_decree, - decree start = invalid_decree) + std::unique_ptr + create_test_duplicator(decree confirmed_decree = invalid_decree) { duplication_entry dup_ent; dup_ent.dupid = 1; dup_ent.remote = "remote_address"; dup_ent.status = duplication_status::DS_PAUSE; - dup_ent.progress[_replica->get_gpid().get_partition_index()] = confirmed; + dup_ent.progress[_replica->get_gpid().get_partition_index()] = confirmed_decree; auto duplicator = std::make_unique(dup_ent, _replica.get()); - duplicator->_start_point_decree = start; return duplicator; } @@ -76,13 +75,19 @@ class duplication_test_base : public replica_test_base return log_file_map; } - mutation_ptr create_test_mutation(int64_t decree, const std::string &data) override + mutation_ptr + create_test_mutation(int64_t decree, int64_t last_committed_decree, const char *data) override { - auto mut = replica_test_base::create_test_mutation(decree, data); + auto mut = replica_test_base::create_test_mutation(decree, last_committed_decree, data); mut->data.updates[0].code = RPC_DUPLICATION_IDEMPOTENT_WRITE; // must be idempotent write return mut; } + mutation_ptr create_test_mutation(int64_t decree, const char *data) override + { + return duplication_test_base::create_test_mutation(decree, decree - 1, data); + } + void wait_all(const std::unique_ptr &dup) { dup->tracker()->wait_outstanding_tasks(); diff --git a/src/replica/duplication/test/load_from_private_log_test.cpp b/src/replica/duplication/test/load_from_private_log_test.cpp index 0d7141745d..4fba2fdf53 100644 --- a/src/replica/duplication/test/load_from_private_log_test.cpp +++ b/src/replica/duplication/test/load_from_private_log_test.cpp @@ -35,10 +35,10 @@ #include "replica/mutation.h" #include "replica/mutation_log.h" #include "replica/test/mock_utils.h" +#include "rpc/rpc_holder.h" #include "runtime/pipeline.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_tracker.h" +#include "task/task_code.h" +#include "task/task_tracker.h" #include "utils/autoref_ptr.h" #include "utils/chrono_literals.h" #include "utils/env.h" @@ -92,8 +92,7 @@ class load_from_private_log_test : public duplication_test_base // each round mlog will replay the former logs, and create new file mutation_log_ptr mlog = create_private_log(); for (int i = 1; i <= 10; i++) { - std::string msg = "hello!"; - mutation_ptr mu = create_test_mutation(10 * f + i, msg); + auto mu = create_test_mutation(10 * f + i, "hello!"); mlog->append(mu, LPC_AIO_IMMEDIATE_CALLBACK, nullptr, nullptr, 0); } mlog->tracker()->wait_outstanding_tasks(); @@ -149,9 +148,8 @@ class load_from_private_log_test : public duplication_test_base auto reserved_plog_force_flush = FLAGS_plog_force_flush; FLAGS_plog_force_flush = true; for (int i = decree_start; i <= num_entries + decree_start; i++) { - std::string msg = "hello!"; // decree - last_commit_decree = 1 by default - mutation_ptr mu = create_test_mutation(i, msg); + auto mu = create_test_mutation(i, "hello!"); // mock the last_commit_decree of first mu equal with `last_commit_decree_start` if (i == decree_start) { mu->data.header.last_committed_decree = last_commit_decree_start; @@ -160,7 +158,7 @@ class load_from_private_log_test : public duplication_test_base } // commit the last entry - mutation_ptr mu = create_test_mutation(decree_start + num_entries + 1, "hello!"); + auto mu = create_test_mutation(decree_start + num_entries + 1, "hello!"); mlog->append(mu, LPC_AIO_IMMEDIATE_CALLBACK, nullptr, nullptr, 0); FLAGS_plog_force_flush = reserved_plog_force_flush; @@ -362,13 +360,12 @@ TEST_P(load_from_private_log_test, ignore_useless) int num_entries = 100; for (int i = 1; i <= num_entries; i++) { - std::string msg = "hello!"; - mutation_ptr mu = create_test_mutation(i, msg); + auto mu = create_test_mutation(i, "hello!"); mlog->append(mu, LPC_AIO_IMMEDIATE_CALLBACK, nullptr, nullptr, 0); } // commit the last entry - mutation_ptr mu = create_test_mutation(1 + num_entries, "hello!"); + auto mu = create_test_mutation(1 + num_entries, "hello!"); mlog->append(mu, LPC_AIO_IMMEDIATE_CALLBACK, nullptr, nullptr, 0); mlog->close(); diff --git a/src/replica/duplication/test/mutation_batch_test.cpp b/src/replica/duplication/test/mutation_batch_test.cpp index 541531c5e1..a4eac81812 100644 --- a/src/replica/duplication/test/mutation_batch_test.cpp +++ b/src/replica/duplication/test/mutation_batch_test.cpp @@ -15,9 +15,11 @@ // specific language governing permissions and limitations // under the License. +#include #include -#include +#include #include +#include #include #include #include @@ -29,86 +31,184 @@ #include "gtest/gtest.h" #include "replica/duplication/mutation_batch.h" #include "replica/duplication/mutation_duplicator.h" +#include "replica/duplication/replica_duplicator.h" #include "replica/mutation.h" #include "replica/prepare_list.h" -#include "runtime/task/task_code.h" +#include "replica/test/mock_utils.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" +#include "utils/blob.h" -namespace dsn { -namespace replication { +namespace dsn::replication { class mutation_batch_test : public duplication_test_base { -public: - void - reset_buffer(const mutation_batch &batcher, const decree last_commit, decree start, decree end) +protected: + mutation_batch_test() { - batcher._mutation_buffer->reset(last_commit); - batcher._mutation_buffer->_start_decree = start; - batcher._mutation_buffer->_end_decree = end; + _replica->init_private_log(_replica->dir()); + _duplicator = create_test_duplicator(0); + _batcher = std::make_unique(_duplicator.get()); } - void commit_buffer(const mutation_batch &batcher, const decree current_decree) + void reset_buffer(const decree last_commit, const decree start, const decree end) const { - batcher._mutation_buffer->commit(current_decree, COMMIT_TO_DECREE_HARD); + _batcher->_mutation_buffer->reset(last_commit); + _batcher->_mutation_buffer->_start_decree = start; + _batcher->_mutation_buffer->_end_decree = end; } + + void commit_buffer(const decree current_decree) const + { + _batcher->_mutation_buffer->commit(current_decree, COMMIT_TO_DECREE_HARD); + } + + void check_mutation_contents(const std::vector &expected_mutations) const + { + const auto all_mutations = _batcher->move_all_mutations(); + + std::vector actual_mutations; + std::transform(all_mutations.begin(), + all_mutations.end(), + std::back_inserter(actual_mutations), + [](const mutation_tuple &tuple) { return std::get<2>(tuple).to_string(); }); + + ASSERT_EQ(expected_mutations, actual_mutations); + } + + std::unique_ptr _duplicator; + std::unique_ptr _batcher; }; INSTANTIATE_TEST_SUITE_P(, mutation_batch_test, ::testing::Values(false, true)); -TEST_P(mutation_batch_test, add_mutation_if_valid) +TEST_P(mutation_batch_test, prepare_mutation) { - auto duplicator = create_test_duplicator(0); - mutation_batch batcher(duplicator.get()); - - mutation_tuple_set result; - - std::string s = "hello"; - mutation_ptr mu1 = create_test_mutation(1, s); - batcher.add_mutation_if_valid(mu1, 0); - result = batcher.move_all_mutations(); - mutation_tuple mt1 = *result.begin(); - - s = "world"; - mutation_ptr mu2 = create_test_mutation(2, s); - batcher.add_mutation_if_valid(mu2, 0); - result = batcher.move_all_mutations(); - mutation_tuple mt2 = *result.begin(); - - ASSERT_EQ(std::get<2>(mt1).to_string(), "hello"); - ASSERT_EQ(std::get<2>(mt2).to_string(), "world"); - - // decree 1 should be ignored - mutation_ptr mu3 = create_test_mutation(1, s); - batcher.add_mutation_if_valid(mu2, 2); - batcher.add_mutation_if_valid(mu3, 1); - result = batcher.move_all_mutations(); - ASSERT_EQ(result.size(), 2); + auto mu1 = create_test_mutation(1, 0, "first mutation"); + set_last_applied_decree(1); + ASSERT_TRUE(_batcher->add(mu1)); + ASSERT_EQ(1, _batcher->last_decree()); + + auto mu2 = create_test_mutation(2, 1, "abcde"); + set_last_applied_decree(2); + ASSERT_TRUE(_batcher->add(mu2)); + ASSERT_EQ(2, _batcher->last_decree()); + + auto mu3 = create_test_mutation(3, 2, "hello world"); + ASSERT_TRUE(_batcher->add(mu3)); + + // The last decree has not been updated. + ASSERT_EQ(2, _batcher->last_decree()); + + auto mu4 = create_test_mutation(4, 2, "foo bar"); + ASSERT_TRUE(_batcher->add(mu4)); + ASSERT_EQ(2, _batcher->last_decree()); + + // The committed mutation would be ignored. + auto mu2_another = create_test_mutation(2, 1, "another second mutation"); + ASSERT_TRUE(_batcher->add(mu2_another)); + ASSERT_EQ(2, _batcher->last_decree()); + + // The mutation with duplicate decree would be ignored. + auto mu3_another = create_test_mutation(3, 2, "123 xyz"); + ASSERT_TRUE(_batcher->add(mu3_another)); + ASSERT_EQ(2, _batcher->last_decree()); + + auto mu5 = create_test_mutation(5, 2, "5th mutation"); + set_last_applied_decree(5); + ASSERT_TRUE(_batcher->add(mu5)); + ASSERT_EQ(5, _batcher->last_decree()); + + check_mutation_contents({"first mutation", "abcde", "hello world", "foo bar", "5th mutation"}); } -TEST_P(mutation_batch_test, ignore_non_idempotent_write) +TEST_P(mutation_batch_test, add_null_mutation) { - auto duplicator = create_test_duplicator(0); - mutation_batch batcher(duplicator.get()); + auto mu = create_test_mutation(1, nullptr); + _batcher->add_mutation_if_valid(mu, 0); + + check_mutation_contents({""}); +} + +TEST_P(mutation_batch_test, add_empty_mutation) +{ + auto mu = create_test_mutation(1, ""); + _batcher->add_mutation_if_valid(mu, 0); + + check_mutation_contents({""}); +} + +// TODO(wangdan): once `string_view` function is removed from blob, drop this test. +TEST_P(mutation_batch_test, add_string_view_mutation) +{ + auto mu = create_test_mutation(1, nullptr); + const std::string data("hello"); + mu->data.updates.back().data = blob(data.data(), 0, data.size()); + _batcher->add_mutation_if_valid(mu, 0); + + check_mutation_contents({"hello"}); +} - std::string s = "hello"; - mutation_ptr mu = create_test_mutation(1, s); +TEST_P(mutation_batch_test, add_a_valid_mutation) +{ + auto mu = create_test_mutation(1, "hello"); + _batcher->add_mutation_if_valid(mu, 0); + + check_mutation_contents({"hello"}); +} + +TEST_P(mutation_batch_test, add_multiple_valid_mutations) +{ + // The mutation could not be reused, since in mutation_batch::add_mutation_if_valid + // the elements of mutation::data::updates would be moved and nullified. + auto mu1 = create_test_mutation(1, "hello"); + _batcher->add_mutation_if_valid(mu1, 0); + + auto mu2 = create_test_mutation(2, "world"); + _batcher->add_mutation_if_valid(mu2, 2); + + auto mu3 = create_test_mutation(3, "hi"); + _batcher->add_mutation_if_valid(mu3, 2); + + check_mutation_contents({"hello", "world", "hi"}); +} + +TEST_P(mutation_batch_test, add_invalid_mutation) +{ + auto mu2 = create_test_mutation(2, "world"); + _batcher->add_mutation_if_valid(mu2, 2); + + // mu1 would be ignored, since its decree is less than the start decree. + auto mu1 = create_test_mutation(1, "hello"); + _batcher->add_mutation_if_valid(mu1, 2); + + auto mu3 = create_test_mutation(3, "hi"); + _batcher->add_mutation_if_valid(mu3, 2); + + auto mu4 = create_test_mutation(1, "ok"); + _batcher->add_mutation_if_valid(mu4, 1); + + // "ok" would be the first, since its timestamp (i.e. decree in create_test_mutation) + // is the smallest. + check_mutation_contents({"ok", "world", "hi"}); +} + +TEST_P(mutation_batch_test, ignore_non_idempotent_write) +{ + auto mu = create_test_mutation(1, "hello"); mu->data.updates[0].code = RPC_DUPLICATION_NON_IDEMPOTENT_WRITE; - batcher.add_mutation_if_valid(mu, 0); - mutation_tuple_set result = batcher.move_all_mutations(); - ASSERT_EQ(result.size(), 0); + _batcher->add_mutation_if_valid(mu, 0); + check_mutation_contents({}); } TEST_P(mutation_batch_test, mutation_buffer_commit) { - auto duplicator = create_test_duplicator(0); - mutation_batch batcher(duplicator.get()); - // mock mutation_buffer[last=10, start=15, end=20], last + 1(next commit decree) is out of - // [start~end] - reset_buffer(batcher, 10, 15, 20); - commit_buffer(batcher, 15); - ASSERT_EQ(batcher.last_decree(), 14); + // Mock mutation_buffer[last=10, start=15, end=20], last + 1(next commit decree) is out of + // [start~end], then last would become min_decree() - 1, see mutation_buffer::commit() for + // details. + reset_buffer(10, 15, 20); + commit_buffer(15); + ASSERT_EQ(14, _batcher->last_decree()); } -} // namespace replication -} // namespace dsn +} // namespace dsn::replication diff --git a/src/replica/duplication/test/replica_duplicator_manager_test.cpp b/src/replica/duplication/test/replica_duplicator_manager_test.cpp index 8123cd623d..1f69997576 100644 --- a/src/replica/duplication/test/replica_duplicator_manager_test.cpp +++ b/src/replica/duplication/test/replica_duplicator_manager_test.cpp @@ -44,14 +44,15 @@ class replica_duplicator_manager_test : public duplication_test_base void test_remove_non_existed_duplications() { - auto r = stub->add_primary_replica(2, 1); - auto &d = r->get_replica_duplicator_manager(); + auto *rep = stub->add_primary_replica(2, 1); + rep->init_private_log(rep->dir()); + auto &d = rep->get_replica_duplicator_manager(); duplication_entry ent; ent.dupid = 1; ent.status = duplication_status::DS_PAUSE; ent.remote = "dsn://slave-cluster"; - ent.progress[r->get_gpid().get_partition_index()] = 0; + ent.progress[rep->get_gpid().get_partition_index()] = 0; d.sync_duplication(ent); ASSERT_EQ(d._duplications.size(), 1); @@ -66,20 +67,21 @@ class replica_duplicator_manager_test : public duplication_test_base void test_set_confirmed_decree_non_primary() { - auto r = stub->add_primary_replica(2, 1); - auto &d = r->get_replica_duplicator_manager(); + auto *rep = stub->add_primary_replica(2, 1); + rep->init_private_log(rep->dir()); + auto &d = rep->get_replica_duplicator_manager(); duplication_entry ent; ent.dupid = 1; ent.status = duplication_status::DS_PAUSE; ent.remote = "dsn://slave-cluster"; - ent.progress[r->get_gpid().get_partition_index()] = 100; + ent.progress[rep->get_gpid().get_partition_index()] = 100; d.sync_duplication(ent); ASSERT_EQ(d._duplications.size(), 1); ASSERT_EQ(d._primary_confirmed_decree, invalid_decree); // replica failover - r->as_secondary(); + rep->as_secondary(); d.update_confirmed_decree_if_secondary(99); ASSERT_EQ(d._duplications.size(), 0); @@ -103,7 +105,8 @@ class replica_duplicator_manager_test : public duplication_test_base void test_get_duplication_confirms() { - auto r = stub->add_primary_replica(2, 1); + auto *rep = stub->add_primary_replica(2, 1); + rep->init_private_log(rep->dir()); int total_dup_num = 10; int update_dup_num = 4; // the number of dups that will be updated @@ -112,25 +115,25 @@ class replica_duplicator_manager_test : public duplication_test_base duplication_entry ent; ent.dupid = id; ent.status = duplication_status::DS_PAUSE; - ent.progress[r->get_gpid().get_partition_index()] = 0; + ent.progress[rep->get_gpid().get_partition_index()] = 0; - auto dup = std::make_unique(ent, r); + auto dup = std::make_unique(ent, rep); dup->update_progress(dup->progress().set_last_decree(2).set_confirmed_decree(1)); - add_dup(r, std::move(dup)); + add_dup(rep, std::move(dup)); } for (dupid_t id = update_dup_num + 1; id <= total_dup_num; id++) { duplication_entry ent; ent.dupid = id; ent.status = duplication_status::DS_PAUSE; - ent.progress[r->get_gpid().get_partition_index()] = 0; + ent.progress[rep->get_gpid().get_partition_index()] = 0; - auto dup = std::make_unique(ent, r); + auto dup = std::make_unique(ent, rep); dup->update_progress(dup->progress().set_last_decree(1).set_confirmed_decree(1)); - add_dup(r, std::move(dup)); + add_dup(rep, std::move(dup)); } - auto result = r->get_replica_duplicator_manager().get_duplication_confirms_to_update(); + auto result = rep->get_replica_duplicator_manager().get_duplication_confirms_to_update(); ASSERT_EQ(result.size(), update_dup_num); } @@ -142,24 +145,25 @@ class replica_duplicator_manager_test : public duplication_test_base int64_t min_confirmed_decree; }; - auto r = stub->add_non_primary_replica(2, 1); - auto assert_test = [r, this](test_case tt) { + auto *rep = stub->add_non_primary_replica(2, 1); + rep->init_private_log(rep->dir()); + auto assert_test = [rep, this](test_case tt) { for (int id = 1; id <= tt.confirmed_decree.size(); id++) { duplication_entry ent; ent.dupid = id; ent.status = duplication_status::DS_PAUSE; - ent.progress[r->get_gpid().get_partition_index()] = 0; + ent.progress[rep->get_gpid().get_partition_index()] = 0; - auto dup = std::make_unique(ent, r); + auto dup = std::make_unique(ent, rep); dup->update_progress(dup->progress() .set_last_decree(tt.confirmed_decree[id - 1]) .set_confirmed_decree(tt.confirmed_decree[id - 1])); - add_dup(r, std::move(dup)); + add_dup(rep, std::move(dup)); } - ASSERT_EQ(r->get_replica_duplicator_manager().min_confirmed_decree(), + ASSERT_EQ(rep->get_replica_duplicator_manager().min_confirmed_decree(), tt.min_confirmed_decree); - r->get_replica_duplicator_manager()._duplications.clear(); + rep->get_replica_duplicator_manager()._duplications.clear(); }; { @@ -169,7 +173,7 @@ class replica_duplicator_manager_test : public duplication_test_base } { // primary - r->as_primary(); + rep->as_primary(); test_case tt{{1, 2, 3}, 1}; assert_test(tt); @@ -203,17 +207,19 @@ TEST_P(replica_duplicator_manager_test, min_confirmed_decree) { test_min_confirm TEST_P(replica_duplicator_manager_test, update_checkpoint_prepared) { - auto r = stub->add_primary_replica(2, 1); + auto *rep = stub->add_primary_replica(2, 1); + rep->init_private_log(rep->dir()); + duplication_entry ent; ent.dupid = 1; ent.status = duplication_status::DS_PAUSE; - ent.progress[r->get_gpid().get_partition_index()] = 0; + ent.progress[rep->get_gpid().get_partition_index()] = 0; - auto dup = std::make_unique(ent, r); - r->update_last_durable_decree(100); + auto dup = std::make_unique(ent, rep); + rep->update_last_durable_decree(100); dup->update_progress(dup->progress().set_last_decree(2).set_confirmed_decree(1)); - add_dup(r, std::move(dup)); - auto updates = r->get_replica_duplicator_manager().get_duplication_confirms_to_update(); + add_dup(rep, std::move(dup)); + auto updates = rep->get_replica_duplicator_manager().get_duplication_confirms_to_update(); for (const auto &update : updates) { ASSERT_TRUE(update.checkpoint_prepared); } diff --git a/src/replica/duplication/test/replica_duplicator_test.cpp b/src/replica/duplication/test/replica_duplicator_test.cpp index 817e3090f8..fc07be00ab 100644 --- a/src/replica/duplication/test/replica_duplicator_test.cpp +++ b/src/replica/duplication/test/replica_duplicator_test.cpp @@ -33,7 +33,7 @@ #include "replica/mutation_log.h" #include "replica/test/mock_utils.h" #include "runtime/pipeline.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/errors.h" @@ -64,17 +64,18 @@ class replica_duplicator_test : public duplication_test_base decree last_durable_decree() const { return _replica->last_durable_decree(); } - decree log_dup_start_decree(const std::unique_ptr &dup) const + decree min_checkpoint_decree(const std::unique_ptr &dup) const { - return dup->_start_point_decree; + return dup->_min_checkpoint_decree; } - void test_new_duplicator(const std::string &remote_app_name, bool specify_remote_app_name) + void test_new_duplicator(const std::string &remote_app_name, + bool specify_remote_app_name, + int64_t confirmed_decree) { const dupid_t dupid = 1; const std::string remote = "remote_address"; const duplication_status::type status = duplication_status::DS_PAUSE; - const int64_t confirmed_decree = 100; duplication_entry dup_ent; dup_ent.dupid = dupid; @@ -90,8 +91,13 @@ class replica_duplicator_test : public duplication_test_base ASSERT_EQ(remote, duplicator->remote_cluster_name()); ASSERT_EQ(remote_app_name, duplicator->remote_app_name()); ASSERT_EQ(status, duplicator->_status); + ASSERT_EQ(1, duplicator->_min_checkpoint_decree); ASSERT_EQ(confirmed_decree, duplicator->progress().confirmed_decree); - ASSERT_EQ(confirmed_decree, duplicator->progress().last_decree); + if (confirmed_decree == invalid_decree) { + ASSERT_EQ(1, duplicator->progress().last_decree); + } else { + ASSERT_EQ(confirmed_decree, duplicator->progress().last_decree); + } auto &expected_env = *duplicator; ASSERT_EQ(duplicator->tracker(), expected_env.__conf.tracker); @@ -144,12 +150,25 @@ INSTANTIATE_TEST_SUITE_P(, replica_duplicator_test, ::testing::Values(false, tru TEST_P(replica_duplicator_test, new_duplicator_without_remote_app_name) { - test_new_duplicator("temp", false); + test_new_duplicator("temp", false, 100); } TEST_P(replica_duplicator_test, new_duplicator_with_remote_app_name) { - test_new_duplicator("another_test_app", true); + test_new_duplicator("another_test_app", true, 100); +} + +// Initial confirmed decree immediately after the duplication was created is `invalid_decree` +// which was synced from meta server. +TEST_P(replica_duplicator_test, new_duplicator_with_initial_confirmed_decree) +{ + test_new_duplicator("test_initial_confirmed_decree", true, invalid_decree); +} + +// The duplication progressed and confirmed decree became valid. +TEST_P(replica_duplicator_test, new_duplicator_with_non_initial_confirmed_decree) +{ + test_new_duplicator("test_non_initial_confirmed_decree", true, 1); } TEST_P(replica_duplicator_test, pause_start_duplication) { test_pause_start_duplication(); } @@ -157,39 +176,51 @@ TEST_P(replica_duplicator_test, pause_start_duplication) { test_pause_start_dupl TEST_P(replica_duplicator_test, duplication_progress) { auto duplicator = create_test_duplicator(); - ASSERT_EQ(duplicator->progress().last_decree, 0); // start duplication from empty plog - ASSERT_EQ(duplicator->progress().confirmed_decree, invalid_decree); + // Start duplication from empty replica. + ASSERT_EQ(1, min_checkpoint_decree(duplicator)); + ASSERT_EQ(1, duplicator->progress().last_decree); + ASSERT_EQ(invalid_decree, duplicator->progress().confirmed_decree); + + // Update the max decree that has been duplicated to the remote cluster. duplicator->update_progress(duplicator->progress().set_last_decree(10)); - ASSERT_EQ(duplicator->progress().last_decree, 10); - ASSERT_EQ(duplicator->progress().confirmed_decree, invalid_decree); + ASSERT_EQ(10, duplicator->progress().last_decree); + ASSERT_EQ(invalid_decree, duplicator->progress().confirmed_decree); + // Update the max decree that has been persisted in the meta server. duplicator->update_progress(duplicator->progress().set_confirmed_decree(10)); - ASSERT_EQ(duplicator->progress().confirmed_decree, 10); - ASSERT_EQ(duplicator->progress().last_decree, 10); + ASSERT_EQ(10, duplicator->progress().last_decree); + ASSERT_EQ(10, duplicator->progress().confirmed_decree); - ASSERT_EQ(duplicator->update_progress(duplicator->progress().set_confirmed_decree(1)), - error_s::make(ERR_INVALID_STATE, "never decrease confirmed_decree: new(1) old(10)")); + ASSERT_EQ(error_s::make(ERR_INVALID_STATE, "never decrease confirmed_decree: new(1) old(10)"), + duplicator->update_progress(duplicator->progress().set_confirmed_decree(1))); - ASSERT_EQ(duplicator->update_progress(duplicator->progress().set_confirmed_decree(12)), - error_s::make(ERR_INVALID_STATE, - "last_decree(10) should always larger than confirmed_decree(12)")); + ASSERT_EQ(error_s::make(ERR_INVALID_STATE, + "last_decree(10) should always larger than confirmed_decree(12)"), + duplicator->update_progress(duplicator->progress().set_confirmed_decree(12))); - auto duplicator_for_checkpoint = create_test_duplicator(invalid_decree, 100); + // Test that the checkpoint has not been created. + replica()->update_last_applied_decree(100); + auto duplicator_for_checkpoint = create_test_duplicator(); ASSERT_FALSE(duplicator_for_checkpoint->progress().checkpoint_has_prepared); - replica()->update_last_durable_decree(101); + // Test that the checkpoint has been created. + replica()->update_last_durable_decree(100); duplicator_for_checkpoint->update_progress(duplicator->progress()); ASSERT_TRUE(duplicator_for_checkpoint->progress().checkpoint_has_prepared); } -TEST_P(replica_duplicator_test, prapre_dup) +TEST_P(replica_duplicator_test, prepare_dup) { - auto duplicator = create_test_duplicator(invalid_decree, 100); + replica()->update_last_applied_decree(100); replica()->update_expect_last_durable_decree(100); + + auto duplicator = create_test_duplicator(); duplicator->prepare_dup(); wait_all(duplicator); - ASSERT_EQ(last_durable_decree(), log_dup_start_decree(duplicator)); + + ASSERT_EQ(100, min_checkpoint_decree(duplicator)); + ASSERT_EQ(100, last_durable_decree()); } } // namespace replication diff --git a/src/replica/duplication/test/replica_follower_test.cpp b/src/replica/duplication/test/replica_follower_test.cpp index 15728386ef..45cf7e9dfa 100644 --- a/src/replica/duplication/test/replica_follower_test.cpp +++ b/src/replica/duplication/test/replica_follower_test.cpp @@ -32,9 +32,9 @@ #include "nfs/nfs_node.h" #include "replica/duplication/replica_follower.h" #include "replica/test/mock_utils.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task_tracker.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "task/task_tracker.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/fail_point.h" @@ -99,7 +99,7 @@ class replica_follower_test : public duplication_test_base const partition_configuration &master_replica_config(replica_follower *follower) const { - return follower->_master_replica_config; + return follower->_pc; } error_code nfs_copy_checkpoint(replica_follower *follower, error_code err, learn_response resp) @@ -225,42 +225,42 @@ TEST_P(replica_follower_test, test_update_master_replica_config) ASSERT_FALSE(master_replica_config(follower).hp_primary); resp.partition_count = _app_info.partition_count; - partition_configuration p; - resp.partitions.emplace_back(p); - resp.partitions.emplace_back(p); + partition_configuration pc; + resp.partitions.emplace_back(pc); + resp.partitions.emplace_back(pc); ASSERT_EQ(update_master_replica_config(follower, resp), ERR_INVALID_DATA); ASSERT_FALSE(master_replica_config(follower).primary); ASSERT_FALSE(master_replica_config(follower).hp_primary); resp.partitions.clear(); - p.pid = gpid(2, 100); - resp.partitions.emplace_back(p); + pc.pid = gpid(2, 100); + resp.partitions.emplace_back(pc); ASSERT_EQ(update_master_replica_config(follower, resp), ERR_INCONSISTENT_STATE); ASSERT_FALSE(master_replica_config(follower).primary); ASSERT_FALSE(master_replica_config(follower).hp_primary); resp.partitions.clear(); - RESET_IP_AND_HOST_PORT(p, primary); - p.pid = gpid(2, 1); - resp.partitions.emplace_back(p); + RESET_IP_AND_HOST_PORT(pc, primary); + pc.pid = gpid(2, 1); + resp.partitions.emplace_back(pc); ASSERT_EQ(update_master_replica_config(follower, resp), ERR_INVALID_STATE); ASSERT_FALSE(master_replica_config(follower).primary); ASSERT_FALSE(master_replica_config(follower).hp_primary); resp.partitions.clear(); - p.pid = gpid(2, 1); + pc.pid = gpid(2, 1); const host_port primary("localhost", 34801); const host_port secondary1("localhost", 34802); const host_port secondary2("localhost", 34803); - SET_IP_AND_HOST_PORT_BY_DNS(p, primary, primary); - SET_IPS_AND_HOST_PORTS_BY_DNS(p, secondaries, secondary1, secondary2); - resp.partitions.emplace_back(p); + SET_IP_AND_HOST_PORT_BY_DNS(pc, primary, primary); + SET_IPS_AND_HOST_PORTS_BY_DNS(pc, secondaries, secondary1, secondary2); + resp.partitions.emplace_back(pc); ASSERT_EQ(update_master_replica_config(follower, resp), ERR_OK); - ASSERT_EQ(master_replica_config(follower).primary, p.primary); - ASSERT_EQ(master_replica_config(follower).hp_primary, p.hp_primary); - ASSERT_EQ(master_replica_config(follower).pid, p.pid); + ASSERT_EQ(master_replica_config(follower).primary, pc.primary); + ASSERT_EQ(master_replica_config(follower).hp_primary, pc.hp_primary); + ASSERT_EQ(master_replica_config(follower).pid, pc.pid); } TEST_P(replica_follower_test, test_nfs_copy_checkpoint) diff --git a/src/replica/duplication/test/run.sh b/src/replica/duplication/test/run.sh index 90dee780db..bab5b57caa 100755 --- a/src/replica/duplication/test/run.sh +++ b/src/replica/duplication/test/run.sh @@ -45,7 +45,7 @@ fi ./dsn_replica_dup_test if [ $? -ne 0 ]; then - tail -n 100 data/log/log.1.txt + tail -n 100 `find . -name pegasus.log.*` if [ -f core ]; then gdb ./dsn_replica_dup_test core -ex "bt" fi diff --git a/src/replica/log_file.h b/src/replica/log_file.h index fcaff2187a..74700310e0 100644 --- a/src/replica/log_file.h +++ b/src/replica/log_file.h @@ -37,7 +37,7 @@ #include "common/gpid.h" #include "common/replication_other_types.h" #include "runtime/api_task.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/zlocks.h" diff --git a/src/replica/log_file_stream.h b/src/replica/log_file_stream.h index 3b901aed73..2b2567229b 100644 --- a/src/replica/log_file_stream.h +++ b/src/replica/log_file_stream.h @@ -66,7 +66,7 @@ class log_file::file_streamer fill_buffers(); } - // TODO(wutao1): use absl::string_view instead of using blob. + // TODO(wutao1): use std::string_view instead of using blob. // WARNING: the resulted blob is not guaranteed to be reference counted. // possible error_code: // ERR_OK result would always size as expected diff --git a/src/replica/mutation.cpp b/src/replica/mutation.cpp index f6b3c7e896..e1c73e8aee 100644 --- a/src/replica/mutation.cpp +++ b/src/replica/mutation.cpp @@ -36,7 +36,7 @@ #include "common/replication.codes.h" #include "replica.h" #include "runtime/api_task.h" -#include "runtime/task/task_spec.h" +#include "task/task_spec.h" #include "utils/binary_reader.h" #include "utils/binary_writer.h" #include "utils/blob.h" @@ -463,5 +463,5 @@ void mutation_queue::clear(std::vector &queued_mutations) // is handled by prepare_list // _current_op_count = 0; } -} -} // namespace end +} // namespace replication +} // namespace dsn diff --git a/src/replica/mutation.h b/src/replica/mutation.h index d5b7f238ad..757ce450e6 100644 --- a/src/replica/mutation.h +++ b/src/replica/mutation.h @@ -35,10 +35,10 @@ #include "common/replication_common.h" #include "common/replication_other_types.h" #include "consensus_types.h" +#include "rpc/rpc_message.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "task/task.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/fmt_logging.h" #include "utils/link.h" @@ -48,6 +48,7 @@ class binary_reader; class binary_writer; class blob; class gpid; + namespace utils { class latency_tracer; } // namespace utils @@ -250,5 +251,5 @@ class mutation_queue mutation_ptr _pending_mutation; slist _hdr; }; -} -} // namespace +} // namespace replication +} // namespace dsn diff --git a/src/replica/mutation_cache.cpp b/src/replica/mutation_cache.cpp index f1e8f25d67..603ab28fcb 100644 --- a/src/replica/mutation_cache.cpp +++ b/src/replica/mutation_cache.cpp @@ -147,5 +147,5 @@ mutation_ptr mutation_cache::get_mutation_by_decree(decree decree) else return _array[(_start_idx + (decree - _start_decree) + _max_count) % _max_count]; } -} -} // namespace end +} // namespace replication +} // namespace dsn diff --git a/src/replica/mutation_cache.h b/src/replica/mutation_cache.h index ac01127360..8e2c0d6b1f 100644 --- a/src/replica/mutation_cache.h +++ b/src/replica/mutation_cache.h @@ -71,5 +71,5 @@ class mutation_cache decree _start_decree; std::atomic _end_decree; }; -} -} // namespace +} // namespace replication +} // namespace dsn diff --git a/src/replica/mutation_log.cpp b/src/replica/mutation_log.cpp index 84a1e5be37..df3ca2e6b2 100644 --- a/src/replica/mutation_log.cpp +++ b/src/replica/mutation_log.cpp @@ -167,8 +167,8 @@ void mutation_log_private::get_in_memory_mutations(decree start_decree, for (auto &mu : issued_write->mutations()) { // if start_ballot is invalid or equal to mu.ballot, check decree // otherwise check ballot - ballot current_ballot = - (start_ballot == invalid_ballot) ? invalid_ballot : mu->get_ballot(); + ballot current_ballot = (start_ballot == invalid_ballot) ? invalid_ballot + : mu->get_ballot(); if ((mu->get_decree() >= start_decree && start_ballot == current_ballot) || current_ballot > start_ballot) { mutation_list.push_back(mutation::copy_no_reply(mu)); @@ -179,8 +179,8 @@ void mutation_log_private::get_in_memory_mutations(decree start_decree, for (auto &mu : pending_mutations) { // if start_ballot is invalid or equal to mu.ballot, check decree // otherwise check ballot - ballot current_ballot = - (start_ballot == invalid_ballot) ? invalid_ballot : mu->get_ballot(); + ballot current_ballot = (start_ballot == invalid_ballot) ? invalid_ballot + : mu->get_ballot(); if ((mu->get_decree() >= start_decree && start_ballot == current_ballot) || current_ballot > start_ballot) { mutation_list.push_back(mutation::copy_no_reply(mu)); @@ -243,6 +243,7 @@ void mutation_log_private::write_pending_mutations(bool release_lock_required) // move or reset pending variables std::shared_ptr pending = std::move(_pending_write); _issued_write = pending; + decree max_decree = _pending_write_max_decree; decree max_commit = _pending_write_max_commit; _pending_write_max_commit = 0; _pending_write_max_decree = 0; @@ -250,11 +251,12 @@ void mutation_log_private::write_pending_mutations(bool release_lock_required) // Free plog from lock during committing log block, in the meantime // new mutations can still be appended. _plock.unlock(); - commit_pending_mutations(pr.first, pending, max_commit); + commit_pending_mutations(pr.first, pending, max_decree, max_commit); } void mutation_log_private::commit_pending_mutations(log_file_ptr &lf, std::shared_ptr &pending, + decree max_decree, decree max_commit) { if (dsn_unlikely(FLAGS_enable_latency_tracer)) { @@ -263,64 +265,66 @@ void mutation_log_private::commit_pending_mutations(log_file_ptr &lf, } } - lf->commit_log_blocks(*pending, - LPC_WRITE_REPLICATION_LOG_PRIVATE, - &_tracker, - [this, lf, pending, max_commit](error_code err, size_t sz) mutable { - CHECK(_is_writing.load(std::memory_order_relaxed), ""); - - for (auto &block : pending->all_blocks()) { - auto hdr = (log_block_header *)block.front().data(); - CHECK_EQ(hdr->magic, 0xdeadbeef); - } - - if (dsn_unlikely(FLAGS_enable_latency_tracer)) { - for (const auto &mu : pending->mutations()) { - ADD_CUSTOM_POINT(mu->_tracer, "commit_pending_completed"); - } - } - - // notify the callbacks - // ATTENTION: callback may be called before this code block executed - // done. - for (auto &c : pending->callbacks()) { - c->enqueue(err, sz); - } - - if (err != ERR_OK) { - LOG_ERROR("write private log failed, err = {}", err); - _is_writing.store(false, std::memory_order_relaxed); - if (_io_error_callback) { - _io_error_callback(err); - } - return; - } - CHECK_EQ(sz, pending->size()); - - // flush to ensure that there is no gap between private log and - // in-memory buffer - // so that we can get all mutations in learning process. - // - // FIXME : the file could have been closed - if (FLAGS_plog_force_flush) { - lf->flush(); - } - - // update _private_max_commit_on_disk after written into log file done - update_max_commit_on_disk(max_commit); - - _is_writing.store(false, std::memory_order_relaxed); - - // start to write if possible - _plock.lock(); - - if (!_is_writing.load(std::memory_order_acquire) && _pending_write) { - write_pending_mutations(true); - } else { - _plock.unlock(); - } - }, - get_gpid().thread_hash()); + lf->commit_log_blocks( + *pending, + LPC_WRITE_REPLICATION_LOG_PRIVATE, + &_tracker, + [this, lf, pending, max_decree, max_commit](error_code err, size_t sz) mutable { + CHECK(_is_writing.load(std::memory_order_relaxed), ""); + + for (auto &block : pending->all_blocks()) { + auto hdr = (log_block_header *)block.front().data(); + CHECK_EQ(hdr->magic, 0xdeadbeef); + } + + if (dsn_unlikely(FLAGS_enable_latency_tracer)) { + for (const auto &mu : pending->mutations()) { + ADD_CUSTOM_POINT(mu->_tracer, "commit_pending_completed"); + } + } + + // notify the callbacks + // ATTENTION: callback may be called before this code block executed + // done. + for (auto &c : pending->callbacks()) { + c->enqueue(err, sz); + } + + if (err != ERR_OK) { + LOG_ERROR("write private log failed, err = {}", err); + _is_writing.store(false, std::memory_order_relaxed); + if (_io_error_callback) { + _io_error_callback(err); + } + return; + } + CHECK_EQ(sz, pending->size()); + + // flush to ensure that there is no gap between private log and + // in-memory buffer + // so that we can get all mutations in learning process. + // + // FIXME : the file could have been closed + if (FLAGS_plog_force_flush) { + lf->flush(); + } + + // Update both _plog_max_decree_on_disk and _plog_max_commit_on_disk + // after written into log file done. + update_max_decree_on_disk(max_decree, max_commit); + + _is_writing.store(false, std::memory_order_relaxed); + + // start to write if possible + _plock.lock(); + + if (!_is_writing.load(std::memory_order_acquire) && _pending_write) { + write_pending_mutations(true); + } else { + _plock.unlock(); + } + }, + get_gpid().thread_hash()); } /////////////////////////////////////////////////////////////// @@ -355,7 +359,8 @@ void mutation_log::init_states() // replica states _private_log_info = {0, 0}; - _private_max_commit_on_disk = 0; + _plog_max_decree_on_disk = 0; + _plog_max_commit_on_disk = 0; } error_code mutation_log::open(replay_callback read_callback, @@ -522,6 +527,7 @@ error_code mutation_log::open(replay_callback read_callback, if (ret) { this->update_max_decree_no_lock(mu->data.header.pid, mu->data.header.decree); if (this->_is_private) { + this->update_max_decree_on_disk_no_lock(mu->data.header.decree); this->update_max_commit_on_disk_no_lock(mu->data.header.last_committed_decree); } } @@ -531,8 +537,8 @@ error_code mutation_log::open(replay_callback read_callback, end_offset); if (ERR_OK == err) { - _global_start_offset = - _log_files.size() > 0 ? _log_files.begin()->second->start_offset() : 0; + _global_start_offset = _log_files.size() > 0 ? _log_files.begin()->second->start_offset() + : 0; _global_end_offset = end_offset; _last_file_index = _log_files.size() > 0 ? _log_files.rbegin()->first : 0; _is_opened = true; @@ -617,22 +623,22 @@ error_code mutation_log::create_new_log_file() blk->add(temp_writer.get_buffer()); _global_end_offset += blk->size(); - logf->commit_log_block(*blk, - _current_log_file->start_offset(), - LPC_WRITE_REPLICATION_LOG_COMMON, - &_tracker, - [this, blk, logf](::dsn::error_code err, size_t sz) { - delete blk; - if (ERR_OK != err) { - LOG_ERROR( - "write mutation log file header failed, file = {}, err = {}", - logf->path(), - err); - CHECK(_io_error_callback, ""); - _io_error_callback(err); - } - }, - 0); + logf->commit_log_block( + *blk, + _current_log_file->start_offset(), + LPC_WRITE_REPLICATION_LOG_COMMON, + &_tracker, + [this, blk, logf](::dsn::error_code err, size_t sz) { + delete blk; + if (ERR_OK != err) { + LOG_ERROR("write mutation log file header failed, file = {}, err = {}", + logf->path(), + err); + CHECK(_io_error_callback, ""); + _io_error_callback(err); + } + }, + 0); CHECK_EQ_MSG(_global_end_offset, _current_log_file->start_offset() + sizeof(log_block_header) + header_len, @@ -702,11 +708,18 @@ decree mutation_log::max_decree(gpid gpid) const return _private_log_info.max_decree; } +decree mutation_log::max_decree_on_disk() const +{ + zauto_lock l(_lock); + CHECK(_is_private, "this method is only valid for private logs"); + return _plog_max_decree_on_disk; +} + decree mutation_log::max_commit_on_disk() const { zauto_lock l(_lock); CHECK(_is_private, "this method is only valid for private logs"); - return _private_max_commit_on_disk; + return _plog_max_commit_on_disk; } decree mutation_log::max_gced_decree(gpid gpid) const @@ -862,17 +875,26 @@ void mutation_log::update_max_decree_no_lock(gpid gpid, decree d) } } -void mutation_log::update_max_commit_on_disk(decree d) +void mutation_log::update_max_decree_on_disk(decree max_decree, decree max_commit) { zauto_lock l(_lock); - update_max_commit_on_disk_no_lock(d); + update_max_decree_on_disk_no_lock(max_decree); + update_max_commit_on_disk_no_lock(max_commit); +} + +void mutation_log::update_max_decree_on_disk_no_lock(decree d) +{ + CHECK(_is_private, "this method is only valid for private logs"); + if (d > _plog_max_decree_on_disk) { + _plog_max_decree_on_disk = d; + } } void mutation_log::update_max_commit_on_disk_no_lock(decree d) { CHECK(_is_private, "this method is only valid for private logs"); - if (d > _private_max_commit_on_disk) { - _private_max_commit_on_disk = d; + if (d > _plog_max_commit_on_disk) { + _plog_max_commit_on_disk = d; } } diff --git a/src/replica/mutation_log.h b/src/replica/mutation_log.h index c4ce671ecb..7689004aae 100644 --- a/src/replica/mutation_log.h +++ b/src/replica/mutation_log.h @@ -43,9 +43,9 @@ #include "mutation.h" #include "replica/replica_base.h" #include "runtime/api_task.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_tracker.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_tracker.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/errors.h" @@ -76,9 +76,10 @@ class mutation_log : public ref_counter typedef std::function io_failure_callback; public: - // append a log mutation - // return value: nullptr for error - // thread safe + // Append a log mutation. + // Return value: nullptr for error. + // + // Thread safe. virtual ::dsn::task_ptr append(mutation_ptr &mu, dsn::task_code callback_code, dsn::task_tracker *tracker, @@ -86,34 +87,37 @@ class mutation_log : public ref_counter int hash = 0, int64_t *pending_size = nullptr) = 0; - // get learn state in memory, including pending and writing mutations - // return true if some data is filled into writer - // return false if no data is filled into writer - // thread safe + // Get learn state in memory, including pending and writing mutations: + // - return true if some data is filled into writer + // - return false if no data is filled into writer + // + // Thread safe virtual bool get_learn_state_in_memory(decree start_decree, binary_writer &writer) const { return false; } - // only for private log - // get in-memory mutations, including pending and writing mutations + // Only for private log. + // get in-memory mutations, including pending and writing mutations. virtual void get_in_memory_mutations(decree start_decree, ballot current_ballot, /*out*/ std::vector &mutations_list) const { } - // flush the pending buffer until all data is on disk - // thread safe + // Flush the pending buffer until all data is on disk. + // + // Thread safe. virtual void flush() = 0; - // flush the pending buffer at most once - // thread safe + // Flush the pending buffer at most once. + // + // Thread safe. virtual void flush_once() = 0; public: // - // ctors + // Ctors // when is_private = true, should specify "private_gpid" // mutation_log(const std::string &dir, int32_t max_log_file_mb, gpid gpid, replica *r = nullptr); @@ -121,22 +125,24 @@ class mutation_log : public ref_counter virtual ~mutation_log() = default; // - // initialization + // Initialization // - // open and replay - // returns ERR_OK if succeed - // not thread safe, but only be called when init + // Open and replay. + // return ERR_OK if succeed. + // Not thread safe, but only be called when init. error_code open(replay_callback read_callback, io_failure_callback write_error_callback); error_code open(replay_callback read_callback, io_failure_callback write_error_callback, const std::map &replay_condition); - // close the log - // thread safe + + // Close the log. + // + // Thread safe. void close(); // - // replay + // Replay. // static error_code replay(std::vector &log_files, replay_callback callback, @@ -173,55 +179,61 @@ class mutation_log : public ref_counter error_code reset_from(const std::string &dir, replay_callback, io_failure_callback); // - // maintain max_decree & valid_start_offset + // Maintain max_decree & valid_start_offset // - // when open a exist replica, need to set valid_start_offset on open - // thread safe + // valid_start_offset is needed to be set while opening an existing replica. + // + // Thread safe. void set_valid_start_offset_on_open(gpid gpid, int64_t valid_start_offset); - // when create a new replica, need to reset current max decree - // returns current global end offset, needs to be remebered by caller for gc usage - // thread safe + // Current max decree is needed to be reset, while creating a new replica. + // Return current global end offset, should be remebered by caller for gc usage. + // + // Thread safe. int64_t on_partition_reset(gpid gpid, decree max_decree); - // update current max decree - // thread safe + // Update current max decree. + // + // Thread safe. void update_max_decree(gpid gpid, decree d); - // update current max commit of private log - // thread safe - void update_max_commit_on_disk(decree d); + // Update current max decree and committed decree that have ever been written onto disk + // for plog. + // + // Thread safe. + void update_max_decree_on_disk(decree max_decree, decree max_commit); // - // garbage collection logs that are already covered by + // Garbage collection logs that are already covered by // durable state on disk, return deleted log segment count // - // garbage collection for private log, returns removed file count. - // can remove log files if satisfy all the conditions: + // Garbage collection for private log, returns removed file count. + // + // Log files could be removed once all the following conditions are satisfied: // - the file is not the current log file // - the file is not covered by reserve_max_size or reserve_max_time // - file.max_decree <= "durable_decree" || file.end_offset <= "valid_start_offset" - // that means, should reserve files if satisfy one of the conditions: + // which means, files should be reserved if one of the conditions is satisfied: // - the file is the current log file // - the file is covered by both reserve_max_size and reserve_max_time // - file.max_decree > "durable_decree" && file.end_offset > "valid_start_offset" - // thread safe + // + // Thread safe. int garbage_collection(gpid gpid, decree durable_decree, int64_t valid_start_offset, int64_t reserve_max_size, int64_t reserve_max_time); - // - // when this is a private log, log files are learned by remote replicas - // return true if private log surely covers the learning range - // + // When this is a private log, log files are learned by remote replicas + // return true if private log surely covers the learning range. bool get_learn_state(gpid gpid, decree start, /*out*/ learn_state &state) const; - // only valid for private log. - // get parent mutations in memory and private log files during partition split. + // Only valid for private log. + // + // Get parent mutations in memory and private log files during partition split. // `total_file_size` is used for the metrics of partition split. void get_parent_mutations_and_logs(gpid pid, decree start_decree, @@ -231,23 +243,28 @@ class mutation_log : public ref_counter /*out*/ uint64_t &total_file_size) const; // - // other inquiry routines + // Other inquiry routines // - // log dir - // thread safe (because nerver changed) + // Get log dir. + // + // Thread safe (because nerver changed). const std::string &dir() const { return _dir; } - // replica - replica *owner_replica() const { return _owner_replica; } - - // get current max decree for gpid - // returns 0 if not found - // thread safe + // Get current max decree for gpid. + // Return 0 if not found. + // + // Thread safe. decree max_decree(gpid gpid) const; - // get current max commit on disk of private log. - // thread safe + // Get current max decree on disk for plog. + // + // Thread safe. + decree max_decree_on_disk() const; + + // Get current max committed decree on disk for plog. + // + // Thread safe. decree max_commit_on_disk() const; // Decree of the maximum garbage-collected mutation. @@ -260,7 +277,7 @@ class mutation_log : public ref_counter // than the others, the max_gced_decree = 9. // Returns `invalid_decree` when plog directory is empty. // - // thread-safe & private log only + // Thread safe & private log only. decree max_gced_decree(gpid gpid) const; decree max_gced_decree_no_lock(gpid gpid) const; @@ -269,11 +286,14 @@ class mutation_log : public ref_counter // thread-safe log_file_map_by_index get_log_file_map() const; - // check the consistence of valid_start_offset - // thread safe + // Check the consistence of valid_start_offset + // + // Thread safe. void check_valid_start_offset(gpid gpid, int64_t valid_start_offset) const; - // get total size. + // Get the total size. + // + // Thread safe. int64_t total_size() const; void hint_switch_file() { _switch_file_hint = true; } @@ -282,20 +302,22 @@ class mutation_log : public ref_counter task_tracker *tracker() { return &_tracker; } protected: - // thread-safe // 'size' is data size to write; the '_global_end_offset' will be updated by 'size'. // can switch file only when create_new_log_if_needed = true; // return pair: the first is target file to write; the second is the global offset to start - // write + // write. + // + // Thread safe. std::pair mark_new_offset(size_t size, bool create_new_log_if_needed); - // thread-safe + + // Thread safe. int64_t get_global_offset() const { zauto_lock l(_lock); return _global_end_offset; } - // init memory states + // Init memory states. virtual void init_states(); private: @@ -310,10 +332,13 @@ class mutation_log : public ref_counter replay_callback callback, /*out*/ int64_t &end_offset); - // update max decree without lock + // Update max decree without lock. void update_max_decree_no_lock(gpid gpid, decree d); - // update max commit on disk without lock + // Update max decree on disk without lock. + void update_max_decree_on_disk_no_lock(decree d); + + // Update max committed decree on disk without lock. void update_max_commit_on_disk_no_lock(decree d); // create new log file and set it as the current log file @@ -323,7 +348,7 @@ class mutation_log : public ref_counter // - _lock.locked() error_code create_new_log_file(); - // get total size ithout lock. + // Get total size without lock. int64_t total_size_no_lock() const; protected: @@ -367,11 +392,16 @@ class mutation_log : public ref_counter // replica log info for private log replica_log_info _private_log_info; - decree - _private_max_commit_on_disk; // the max last_committed_decree of written mutations up to now - // used for limiting garbage collection of shared log, because - // the ending of private log should be covered by shared log + + // The max decree of the mutations that have ever been written onto the disk for plog. + decree _plog_max_decree_on_disk; + + // The max decree of the committed mutations that have ever been written onto the disk + // for plog. Since it is set with mutation.data.header.last_committed_decree, it must + // be less than _plog_max_decree_on_disk. + decree _plog_max_commit_on_disk; }; + typedef dsn::ref_ptr mutation_log_ptr; class mutation_log_private : public mutation_log, private replica_base @@ -418,6 +448,7 @@ class mutation_log_private : public mutation_log, private replica_base void commit_pending_mutations(log_file_ptr &lf, std::shared_ptr &pending, + decree max_decree, decree max_commit); void init_states() override; diff --git a/src/replica/mutation_log_replay.cpp b/src/replica/mutation_log_replay.cpp index 126d19cdfc..0e7956c8d7 100644 --- a/src/replica/mutation_log_replay.cpp +++ b/src/replica/mutation_log_replay.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -38,10 +39,9 @@ #include "utils/errors.h" #include "utils/fail_point.h" #include "utils/fmt_logging.h" -#include "absl/strings/string_view.h" +#include "utils/ports.h" -namespace dsn { -namespace replication { +namespace dsn::replication { /*static*/ error_code mutation_log::replay(log_file_ptr log, replay_callback callback, @@ -72,40 +72,55 @@ namespace replication { return err.code(); } -/*static*/ error_s mutation_log::replay_block(log_file_ptr &log, - replay_callback &callback, - size_t start_offset, - int64_t &end_offset) -{ - FAIL_POINT_INJECT_F("mutation_log_replay_block", [](absl::string_view) -> error_s { - return error_s::make(ERR_INCOMPLETE_DATA, "mutation_log_replay_block"); - }); +namespace { - blob bb; - std::unique_ptr reader; - - log->reset_stream(start_offset); // start reading from given offset - int64_t global_start_offset = start_offset + log->start_offset(); - end_offset = global_start_offset; // reset end_offset to the start. - - // reads the entire block into memory - error_code err = log->read_next_log_block(bb); - if (err != ERR_OK) { - return error_s::make(err, "failed to read log block"); +dsn::error_s read_block(dsn::replication::log_file_ptr &log, + size_t start_offset, + int64_t &end_offset, + std::unique_ptr &reader) +{ + log->reset_stream(start_offset); // Start reading from given offset. + int64_t global_start_offset = static_cast(start_offset) + log->start_offset(); + end_offset = global_start_offset; // Reset end_offset to the start. + + { + // Read the entire block into memory. + blob bb; + const auto err = log->read_next_log_block(bb); + if (dsn_unlikely(err != dsn::ERR_OK)) { + return FMT_ERR(err, "failed to read log block"); + } + reader = std::make_unique(std::move(bb)); } - reader = std::make_unique(bb); - end_offset += sizeof(log_block_header); + end_offset += sizeof(dsn::replication::log_block_header); // The first block is log_file_header. if (global_start_offset == log->start_offset()) { end_offset += log->read_file_header(*reader); if (!log->is_right_header()) { - return error_s::make(ERR_INVALID_DATA, "failed to read log file header"); + return FMT_ERR(dsn::ERR_INVALID_DATA, "failed to read log file header"); } - // continue to parsing the data block + // Continue to parsing the data block. } + return dsn::error_s::ok(); +} + +} // anonymous namespace + +/*static*/ error_s mutation_log::replay_block(log_file_ptr &log, + replay_callback &callback, + size_t start_offset, + int64_t &end_offset) +{ + FAIL_POINT_INJECT_F("mutation_log_replay_block", [](std::string_view) -> error_s { + return error_s::make(ERR_INCOMPLETE_DATA, "mutation_log_replay_block"); + }); + + std::unique_ptr reader; + RETURN_NOT_OK(read_block(log, start_offset, end_offset, reader)); + while (!reader->is_eof()) { auto old_size = reader->get_remaining_size(); mutation_ptr mu = mutation::read_from(*reader, nullptr); @@ -218,5 +233,4 @@ namespace replication { return err; } -} // namespace replication -} // namespace dsn +} // namespace dsn::replication diff --git a/src/replica/mutation_log_utils.cpp b/src/replica/mutation_log_utils.cpp index 9fed18ab8b..e69fd13909 100644 --- a/src/replica/mutation_log_utils.cpp +++ b/src/replica/mutation_log_utils.cpp @@ -24,7 +24,7 @@ * THE SOFTWARE. */ -#include +#include #include #include @@ -37,9 +37,9 @@ namespace dsn { namespace replication { namespace log_utils { -/*extern*/ error_s open_read(absl::string_view path, /*out*/ log_file_ptr &file) +/*extern*/ error_s open_read(std::string_view path, /*out*/ log_file_ptr &file) { - FAIL_POINT_INJECT_F("open_read", [](absl::string_view) -> error_s { + FAIL_POINT_INJECT_F("open_read", [](std::string_view) -> error_s { return error_s::make(ERR_FILE_OPERATION_FAILED, "open_read"); }); @@ -53,7 +53,7 @@ namespace log_utils { /*extern*/ error_s list_all_files(const std::string &dir, /*out*/ std::vector &files) { - FAIL_POINT_INJECT_F("list_all_files", [](absl::string_view) -> error_s { + FAIL_POINT_INJECT_F("list_all_files", [](std::string_view) -> error_s { return error_s::make(ERR_FILE_OPERATION_FAILED, "list_all_files"); }); diff --git a/src/replica/mutation_log_utils.h b/src/replica/mutation_log_utils.h index fafa4fbf56..56a5405439 100644 --- a/src/replica/mutation_log_utils.h +++ b/src/replica/mutation_log_utils.h @@ -34,13 +34,13 @@ #include "replica/mutation_log.h" #include "utils/autoref_ptr.h" #include "utils/errors.h" -#include "absl/strings/string_view.h" +#include namespace dsn { namespace replication { namespace log_utils { -extern error_s open_read(absl::string_view path, /*out*/ log_file_ptr &file); +extern error_s open_read(std::string_view path, /*out*/ log_file_ptr &file); extern error_s list_all_files(const std::string &dir, /*out*/ std::vector &files); diff --git a/src/replica/prepare_list.cpp b/src/replica/prepare_list.cpp index e6ba29260d..d2fdff2334 100644 --- a/src/replica/prepare_list.cpp +++ b/src/replica/prepare_list.cpp @@ -107,22 +107,22 @@ error_code prepare_list::prepare(mutation_ptr &mu, CHECK_EQ_PREFIX_MSG(mutation_cache::put(mu), ERR_OK, "mutation_cache::put failed"); return ERR_OK; - //// delayed commit - only when capacity is an issue - // case partition_status::PS_POTENTIAL_SECONDARY: - // while (true) - // { - // error_code err = mutation_cache::put(mu); - // if (err == ERR_CAPACITY_EXCEEDED) - // { - // CHECK_GE(mu->data.header.last_committed_decree, min_decree()); - // commit (min_decree(), true); - // pop_min(); - // } - // else - // break; - // } - // CHECK_EQ(err, ERR_OK); - // return ERR_OK; + //// delayed commit - only when capacity is an issue + // case partition_status::PS_POTENTIAL_SECONDARY: + // while (true) + // { + // error_code err = mutation_cache::put(mu); + // if (err == ERR_CAPACITY_EXCEEDED) + // { + // CHECK_GE(mu->data.header.last_committed_decree, min_decree()); + // commit (min_decree(), true); + // pop_min(); + // } + // else + // break; + // } + // CHECK_EQ(err, ERR_OK); + // return ERR_OK; case partition_status::PS_INACTIVE: // only possible during init if (mu->data.header.last_committed_decree > max_decree()) { diff --git a/src/replica/replica.cpp b/src/replica/replica.cpp index 5bb1f17b83..4175bb8168 100644 --- a/src/replica/replica.cpp +++ b/src/replica/replica.cpp @@ -26,7 +26,7 @@ #include "replica.h" -#include +#include #include #include #include @@ -41,17 +41,17 @@ #include "common/replication_common.h" #include "common/replication_enums.h" #include "consensus_types.h" -#include "duplication/replica_duplicator_manager.h" #include "duplication/replica_follower.h" #include "mutation.h" #include "mutation_log.h" +#include "replica/duplication/replica_duplicator_manager.h" #include "replica/prepare_list.h" #include "replica/replica_context.h" #include "replica/replication_app_base.h" #include "replica_admin_types.h" #include "replica_disk_migrator.h" #include "replica_stub.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/rpc_message.h" #include "security/access_controller.h" #include "split/replica_split_manager.h" #include "utils/filesystem.h" @@ -369,7 +369,7 @@ void replica::init_state() _config.pid.set_app_id(0); _config.pid.set_partition_index(0); _config.status = partition_status::PS_INACTIVE; - _primary_states.membership.ballot = 0; + _primary_states.pc.ballot = 0; _create_time_ms = dsn_now_ms(); _last_config_change_time_ms = _create_time_ms; update_last_checkpoint_generate_time(); @@ -578,6 +578,10 @@ mutation_ptr replica::new_mutation(decree decree) return mu; } +decree replica::last_applied_decree() const { return _app->last_committed_decree(); } + +decree replica::last_flushed_decree() const { return _app->last_flushed_decree(); } + decree replica::last_durable_decree() const { return _app->last_durable_decree(); } decree replica::last_prepared_decree() const diff --git a/src/replica/replica.h b/src/replica/replica.h index 3b90641cdd..4ee215c3dc 100644 --- a/src/replica/replica.h +++ b/src/replica/replica.h @@ -30,13 +30,16 @@ #include #include #include +#include #include #include #include #include +#include "common/json_helper.h" #include "common/replication_other_types.h" #include "dsn.layer2_types.h" +#include "duplication/replica_duplicator_manager.h" // IWYU pragma: keep #include "meta_admin_types.h" #include "metadata_types.h" #include "mutation.h" @@ -46,11 +49,11 @@ #include "replica/backup/cold_backup_context.h" #include "replica/replica_base.h" #include "replica_context.h" +#include "rpc/rpc_message.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_message.h" #include "runtime/serverlet.h" -#include "runtime/task/task.h" -#include "runtime/task/task_tracker.h" +#include "task/task.h" +#include "task/task_tracker.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/metrics.h" @@ -70,6 +73,7 @@ class gpid; class host_port; namespace dist { + namespace block_service { class block_filesystem; } // namespace block_service @@ -82,6 +86,7 @@ namespace replication { class backup_request; class backup_response; + class configuration_restore_request; class detect_hotkey_request; class detect_hotkey_response; @@ -96,14 +101,12 @@ class replica; class replica_backup_manager; class replica_bulk_loader; class replica_disk_migrator; -class replica_duplicator_manager; class replica_follower; class replica_split_manager; class replica_stub; class replication_app_base; class replication_options; struct dir_node; - typedef dsn::ref_ptr cold_backup_context_ptr; namespace test { @@ -186,7 +189,7 @@ class replica : public serverlet, public ref_counter, public replica_ba // void on_config_proposal(configuration_update_request &proposal); void on_config_sync(const app_info &info, - const partition_configuration &config, + const partition_configuration &pc, split_status::type meta_split_status); void on_cold_backup(const backup_request &request, /*out*/ backup_response &response); @@ -223,8 +226,38 @@ class replica : public serverlet, public ref_counter, public replica_ba const app_info *get_app_info() const { return &_app_info; } decree max_prepared_decree() const { return _prepare_list->max_decree(); } decree last_committed_decree() const { return _prepare_list->last_committed_decree(); } + + // The last decree that has been applied into rocksdb memtable. + decree last_applied_decree() const; + + // The last decree that has been flushed into rocksdb sst. + decree last_flushed_decree() const; + decree last_prepared_decree() const; decree last_durable_decree() const; + + // Encode current progress of decrees into json, including both local writes and duplications + // of this replica. + template + void encode_progress(TWriter &writer) const + { + writer.StartObject(); + + JSON_ENCODE_OBJ(writer, max_prepared_decree, max_prepared_decree()); + JSON_ENCODE_OBJ(writer, max_plog_decree, _private_log->max_decree(get_gpid())); + JSON_ENCODE_OBJ(writer, max_plog_decree_on_disk, _private_log->max_decree_on_disk()); + JSON_ENCODE_OBJ(writer, max_plog_commit_on_disk, _private_log->max_commit_on_disk()); + JSON_ENCODE_OBJ(writer, last_committed_decree, last_committed_decree()); + JSON_ENCODE_OBJ(writer, last_applied_decree, last_applied_decree()); + JSON_ENCODE_OBJ(writer, last_flushed_decree, last_flushed_decree()); + JSON_ENCODE_OBJ(writer, last_durable_decree, last_durable_decree()); + JSON_ENCODE_OBJ(writer, max_gc_decree, _private_log->max_gced_decree(get_gpid())); + + _duplication_mgr->encode_progress(writer); + + writer.EndObject(); + } + const std::string &dir() const { return _dir; } uint64_t create_time_milliseconds() const { return _create_time_ms; } const char *name() const { return replica_name(); } @@ -237,7 +270,24 @@ class replica : public serverlet, public ref_counter, public replica_ba // // Duplication // - error_code trigger_manual_emergency_checkpoint(decree old_decree); + + using trigger_checkpoint_callback = std::function; + + // Choose a fixed thread from pool to trigger an emergency checkpoint asynchronously. + // A new checkpoint would still be created even if the replica is empty (hasn't received + // any write operation). + // + // Parameters: + // - `min_checkpoint_decree`: the min decree that should be covered by the triggered + // checkpoint. Should be a number greater than 0 which means a new checkpoint must be + // created. + // - `delay_ms`: the delayed time in milliseconds that the triggering task is put into + // the thread pool. + // - `callback`: the callback processor handling the error code of triggering checkpoint. + void async_trigger_manual_emergency_checkpoint(decree min_checkpoint_decree, + uint32_t delay_ms, + trigger_checkpoint_callback callback = {}); + void on_query_last_checkpoint(learn_response &response); std::shared_ptr get_duplication_manager() const { @@ -395,7 +445,7 @@ class replica : public serverlet, public ref_counter, public replica_ba void remove(configuration_update_request &proposal); void update_configuration_on_meta_server(config_type::type type, const host_port &node, - partition_configuration &new_config); + partition_configuration &new_pc); void on_update_configuration_on_meta_server_reply(error_code err, dsn::message_ex *request, @@ -409,7 +459,7 @@ class replica : public serverlet, public ref_counter, public replica_ba void update_app_envs_internal(const std::map &envs); void query_app_envs(/*out*/ std::map &envs); - bool update_configuration(const partition_configuration &config); + bool update_configuration(const partition_configuration &pc); bool update_local_configuration(const replica_configuration &config, bool same_ballot = false); error_code update_init_info_ballot_and_decree(); @@ -429,13 +479,6 @@ class replica : public serverlet, public ref_counter, public replica_ba error_code background_sync_checkpoint(); void catch_up_with_private_logs(partition_status::type s); void on_checkpoint_completed(error_code err); - void on_copy_checkpoint_ack(error_code err, - const std::shared_ptr &req, - const std::shared_ptr &resp); - void on_copy_checkpoint_file_completed(error_code err, - size_t sz, - std::shared_ptr resp, - const std::string &chk_dir); // Enable/Disable plog garbage collection to be executed. For example, to duplicate data // to target cluster, we could firstly disable plog garbage collection, then do copy_data. @@ -447,6 +490,10 @@ class replica : public serverlet, public ref_counter, public replica_ba bool is_plog_gc_enabled() const; std::string get_plog_gc_enabled_message() const; + // Trigger an emergency checkpoint for duplication. Once the replica is empty (hasn't + // received any write operation), there would be no checkpoint created. + error_code trigger_manual_emergency_checkpoint(decree min_checkpoint_decree); + ///////////////////////////////////////////////////////////////// // cold backup virtual void generate_backup_checkpoint(cold_backup_context_ptr backup_context); @@ -486,8 +533,8 @@ class replica : public serverlet, public ref_counter, public replica_ba void update_restore_progress(uint64_t f_size); // Used for remote command - // TODO: remove this interface and only expose the http interface - // now this remote commend will be used by `scripts/pegasus_manual_compact.sh` + // TODO(clang-tidy): remove this interface and only expose the http interface + // now this remote commend will be used by `admin_tools/pegasus_manual_compact.sh` std::string query_manual_compact_state() const; manual_compaction_status::type get_manual_compact_status() const; diff --git a/src/replica/replica_2pc.cpp b/src/replica/replica_2pc.cpp index ccbf3c87cf..884c5c4dc3 100644 --- a/src/replica/replica_2pc.cpp +++ b/src/replica/replica_2pc.cpp @@ -55,18 +55,18 @@ #include "replica/replica_context.h" #include "replica/replication_app_base.h" #include "replica_stub.h" +#include "rpc/dns_resolver.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "rpc/rpc_stream.h" +#include "rpc/serialization.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/rpc_stream.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" #include "security/access_controller.h" #include "split/replica_split_manager.h" +#include "task/async_calls.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/api_utilities.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" @@ -200,8 +200,8 @@ void replica::on_client_write(dsn::message_ex *request, bool ignore_throttling) LOG_INFO_PREFIX("receive bulk load ingestion request"); // bulk load ingestion request requires that all secondaries should be alive - if (static_cast(_primary_states.membership.hp_secondaries.size()) + 1 < - _primary_states.membership.max_replica_count) { + if (static_cast(_primary_states.pc.hp_secondaries.size()) + 1 < + _primary_states.pc.max_replica_count) { response_client_write(request, ERR_NOT_ENOUGH_MEMBER); return; } @@ -209,7 +209,7 @@ void replica::on_client_write(dsn::message_ex *request, bool ignore_throttling) _bulk_load_ingestion_start_time_ms = dsn_now_ms(); } - if (static_cast(_primary_states.membership.hp_secondaries.size()) + 1 < + if (static_cast(_primary_states.pc.hp_secondaries.size()) + 1 < _options->app_mutation_2pc_min_replica_count(_app_info.max_replica_count)) { response_client_write(request, ERR_NOT_ENOUGH_MEMBER); return; @@ -269,8 +269,8 @@ void replica::init_prepare(mutation_ptr &mu, bool reconciliation, bool pop_all_c break; } LOG_INFO_PREFIX("try to prepare bulk load mutation({})", mu->name()); - if (static_cast(_primary_states.membership.hp_secondaries.size()) + 1 < - _primary_states.membership.max_replica_count) { + if (static_cast(_primary_states.pc.hp_secondaries.size()) + 1 < + _primary_states.pc.max_replica_count) { err = ERR_NOT_ENOUGH_MEMBER; break; } @@ -282,7 +282,7 @@ void replica::init_prepare(mutation_ptr &mu, bool reconciliation, bool pop_all_c // stop prepare if there are too few replicas unless it's a reconciliation // for reconciliation, we should ensure every prepared mutation to be committed // please refer to PacificA paper - if (static_cast(_primary_states.membership.hp_secondaries.size()) + 1 < + if (static_cast(_primary_states.pc.hp_secondaries.size()) + 1 < _options->app_mutation_2pc_min_replica_count(_app_info.max_replica_count) && !reconciliation) { err = ERR_NOT_ENOUGH_MEMBER; @@ -300,8 +300,8 @@ void replica::init_prepare(mutation_ptr &mu, bool reconciliation, bool pop_all_c // remote prepare mu->set_prepare_ts(); mu->set_left_secondary_ack_count( - (unsigned int)_primary_states.membership.hp_secondaries.size()); - for (const auto &secondary : _primary_states.membership.hp_secondaries) { + static_cast(_primary_states.pc.hp_secondaries.size())); + for (const auto &secondary : _primary_states.pc.hp_secondaries) { send_prepare_message(secondary, partition_status::PS_SECONDARY, mu, @@ -383,14 +383,14 @@ void replica::send_prepare_message(const ::dsn::host_port &hp, mu->write_to(writer, msg); } - mu->remote_tasks()[hp] = - rpc::call(dsn::dns_resolver::instance().resolve_address(hp), - msg, - &_tracker, - [=](error_code err, dsn::message_ex *request, dsn::message_ex *reply) { - on_prepare_reply(std::make_pair(mu, rconfig.status), err, request, reply); - }, - get_gpid().thread_hash()); + mu->remote_tasks()[hp] = rpc::call( + dsn::dns_resolver::instance().resolve_address(hp), + msg, + &_tracker, + [=](error_code err, dsn::message_ex *request, dsn::message_ex *reply) { + on_prepare_reply(std::make_pair(mu, rconfig.status), err, request, reply); + }, + get_gpid().thread_hash()); LOG_DEBUG_PREFIX("mutation {} send_prepare_message to {} as {}", mu->name(), diff --git a/src/replica/replica_backup.cpp b/src/replica/replica_backup.cpp index 427eabc822..1be16e30c0 100644 --- a/src/replica/replica_backup.cpp +++ b/src/replica/replica_backup.cpp @@ -47,7 +47,7 @@ #include "replica/replication_app_base.h" #include "replica_stub.h" #include "runtime/api_layer1.h" -#include "runtime/task/async_calls.h" +#include "task/async_calls.h" #include "utils/autoref_ptr.h" #include "utils/env.h" #include "utils/error_code.h" @@ -125,14 +125,15 @@ void replica::on_cold_backup(const backup_request &request, /*out*/ backup_respo LOG_INFO("{}: delay clearing obsoleted cold backup context, cause backup_status == " "ColdBackupCheckpointing", new_context->name); - tasking::enqueue(LPC_REPLICATION_COLD_BACKUP, - &_tracker, - [this, request]() { - backup_response response; - on_cold_backup(request, response); - }, - get_gpid().thread_hash(), - std::chrono::seconds(100)); + tasking::enqueue( + LPC_REPLICATION_COLD_BACKUP, + &_tracker, + [this, request]() { + backup_response response; + on_cold_backup(request, response); + }, + get_gpid().thread_hash(), + std::chrono::seconds(100)); } else { // TODO(wutao1): deleting cold backup context should be // extracted as a function like try_delete_cold_backup_context; @@ -255,10 +256,10 @@ void replica::on_cold_backup(const backup_request &request, /*out*/ backup_respo void replica::send_backup_request_to_secondary(const backup_request &request) { - for (const auto &target_address : _primary_states.membership.secondaries) { + for (const auto &secondary : _primary_states.pc.secondaries) { // primary will send backup_request to secondary periodically // so, we shouldn't handle the response - rpc::call_one_way_typed(target_address, RPC_COLD_BACKUP, request, get_gpid().thread_hash()); + rpc::call_one_way_typed(secondary, RPC_COLD_BACKUP, request, get_gpid().thread_hash()); } } @@ -488,13 +489,14 @@ void replica::generate_backup_checkpoint(cold_backup_context_ptr backup_context) file_infos.size(), total_size); // TODO: in primary, this will make the request send to secondary again - tasking::enqueue(LPC_REPLICATION_COLD_BACKUP, - &_tracker, - [this, backup_context]() { - backup_response response; - on_cold_backup(backup_context->request, response); - }, - get_gpid().thread_hash()); + tasking::enqueue( + LPC_REPLICATION_COLD_BACKUP, + &_tracker, + [this, backup_context]() { + backup_response response; + on_cold_backup(backup_context->request, response); + }, + get_gpid().thread_hash()); } else { backup_context->fail_checkpoint("statistic file info under checkpoint failed"); return; @@ -728,13 +730,14 @@ void replica::local_create_backup_checkpoint(cold_backup_context_ptr backup_cont } backup_context->checkpoint_file_total_size = total_size; backup_context->complete_checkpoint(); - tasking::enqueue(LPC_REPLICATION_COLD_BACKUP, - &_tracker, - [this, backup_context]() { - backup_response response; - on_cold_backup(backup_context->request, response); - }, - get_gpid().thread_hash()); + tasking::enqueue( + LPC_REPLICATION_COLD_BACKUP, + &_tracker, + [this, backup_context]() { + backup_response response; + on_cold_backup(backup_context->request, response); + }, + get_gpid().thread_hash()); } } diff --git a/src/replica/replica_base.cpp b/src/replica/replica_base.cpp index ce4700abdd..f45d9b43f8 100644 --- a/src/replica/replica_base.cpp +++ b/src/replica/replica_base.cpp @@ -17,7 +17,7 @@ #include "replica_base.h" -#include +#include #include METRIC_DEFINE_entity(replica); @@ -41,7 +41,7 @@ metric_entity_ptr instantiate_replica_metric_entity(const gpid &id) } // anonymous namespace -replica_base::replica_base(gpid id, absl::string_view name, absl::string_view app_name) +replica_base::replica_base(gpid id, std::string_view name, std::string_view app_name) : _gpid(id), _name(name), _app_name(app_name), diff --git a/src/replica/replica_base.h b/src/replica/replica_base.h index 8909197a64..79583c13f1 100644 --- a/src/replica/replica_base.h +++ b/src/replica/replica_base.h @@ -29,7 +29,7 @@ #include #include "common/gpid.h" -#include "absl/strings/string_view.h" +#include #include "utils/fmt_logging.h" #include "utils/metrics.h" @@ -39,7 +39,7 @@ namespace replication { /// Base class for types that are one-instance-per-replica. struct replica_base { - replica_base(gpid id, absl::string_view name, absl::string_view app_name); + replica_base(gpid id, std::string_view name, std::string_view app_name); explicit replica_base(replica_base *rhs) : replica_base(rhs->get_gpid(), rhs->replica_name(), rhs->_app_name) diff --git a/src/replica/replica_check.cpp b/src/replica/replica_check.cpp index c4a86dec59..c0e13aca55 100644 --- a/src/replica/replica_check.cpp +++ b/src/replica/replica_check.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -46,19 +47,18 @@ #include "replica/replica_context.h" #include "replica/replication_app_base.h" #include "replica_stub.h" +#include "rpc/dns_resolver.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" #include "split/replica_split_manager.h" +#include "task/async_calls.h" +#include "task/task.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/fail_point.h" #include "utils/flags.h" #include "utils/fmt_logging.h" -#include "absl/strings/string_view.h" #include "utils/metrics.h" #include "utils/thread_access_checker.h" @@ -82,7 +82,7 @@ namespace replication { void replica::init_group_check() { - FAIL_POINT_INJECT_F("replica_init_group_check", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("replica_init_group_check", [](std::string_view) {}); _checker.only_one_thread_access(); @@ -92,17 +92,17 @@ void replica::init_group_check() return; CHECK(nullptr == _primary_states.group_check_task, ""); - _primary_states.group_check_task = - tasking::enqueue_timer(LPC_GROUP_CHECK, - &_tracker, - [this] { broadcast_group_check(); }, - std::chrono::milliseconds(FLAGS_group_check_interval_ms), - get_gpid().thread_hash()); + _primary_states.group_check_task = tasking::enqueue_timer( + LPC_GROUP_CHECK, + &_tracker, + [this] { broadcast_group_check(); }, + std::chrono::milliseconds(FLAGS_group_check_interval_ms), + get_gpid().thread_hash()); } void replica::broadcast_group_check() { - FAIL_POINT_INJECT_F("replica_broadcast_group_check", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("replica_broadcast_group_check", [](std::string_view) {}); CHECK_NOTNULL(_primary_states.group_check_task, ""); @@ -152,17 +152,17 @@ void replica::broadcast_group_check() LOG_INFO_PREFIX("send group check to {} with state {}", hp, enum_to_string(it->second)); - dsn::task_ptr callback_task = - rpc::call(addr, - RPC_GROUP_CHECK, - *request, - &_tracker, - [=](error_code err, group_check_response &&resp) { - auto alloc = std::make_shared(std::move(resp)); - on_group_check_reply(err, request, alloc); - }, - std::chrono::milliseconds(0), - get_gpid().thread_hash()); + dsn::task_ptr callback_task = rpc::call( + addr, + RPC_GROUP_CHECK, + *request, + &_tracker, + [=](error_code err, group_check_response &&resp) { + auto alloc = std::make_shared(std::move(resp)); + on_group_check_reply(err, request, alloc); + }, + std::chrono::milliseconds(0), + get_gpid().thread_hash()); _primary_states.group_check_pending_replies[hp] = callback_task; } @@ -274,10 +274,11 @@ void replica::on_group_check_reply(error_code err, void replica::inject_error(error_code err) { - tasking::enqueue(LPC_REPLICATION_ERROR, - &_tracker, - [this, err]() { handle_local_failure(err); }, - get_gpid().thread_hash()); + tasking::enqueue( + LPC_REPLICATION_ERROR, + &_tracker, + [this, err]() { handle_local_failure(err); }, + get_gpid().thread_hash()); } } // namespace replication } // namespace dsn diff --git a/src/replica/replica_chkpt.cpp b/src/replica/replica_chkpt.cpp index 0145dcab0f..b975626b61 100644 --- a/src/replica/replica_chkpt.cpp +++ b/src/replica/replica_chkpt.cpp @@ -42,17 +42,18 @@ #include "metadata_types.h" #include "mutation_log.h" #include "replica.h" +#include "replica/mutation.h" #include "replica/prepare_list.h" #include "replica/replica_context.h" #include "replica/replication_app_base.h" #include "replica_stub.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_host_port.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" #include "split/replica_split_manager.h" +#include "task/async_calls.h" +#include "task/task.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/chrono_literals.h" @@ -69,12 +70,14 @@ DSN_DEFINE_int32(replication, checkpoint_max_interval_hours, 2, "The maximum time interval in hours of replica checkpoints must be generated"); + DSN_DEFINE_int32(replication, log_private_reserve_max_size_mb, 1000, "The maximum size of useless private log to be reserved. NOTE: only when " "'log_private_reserve_max_size_mb' and 'log_private_reserve_max_time_seconds' are " "both satisfied, the useless logs can be reserved"); + DSN_DEFINE_int32( replication, log_private_reserve_max_time_seconds, @@ -83,6 +86,11 @@ DSN_DEFINE_int32( "when 'log_private_reserve_max_size_mb' and 'log_private_reserve_max_time_seconds' " "are both satisfied, the useless logs can be reserved"); +DSN_DEFINE_uint32(replication, + trigger_checkpoint_retry_interval_ms, + 100, + "The wait interval before next attempt for empty write."); + namespace dsn { namespace replication { @@ -186,8 +194,59 @@ void replica::on_checkpoint_timer() }); } +void replica::async_trigger_manual_emergency_checkpoint(decree min_checkpoint_decree, + uint32_t delay_ms, + trigger_checkpoint_callback callback) +{ + CHECK_GT_PREFIX_MSG(min_checkpoint_decree, + 0, + "min_checkpoint_decree should be a number greater than 0 " + "which means a new checkpoint must be created"); + + tasking::enqueue( + LPC_REPLICATION_COMMON, + &_tracker, + [min_checkpoint_decree, callback, this]() { + _checker.only_one_thread_access(); + + if (_app == nullptr) { + LOG_ERROR_PREFIX("app hasn't been initialized or has been released"); + return; + } + + const auto last_applied_decree = this->last_applied_decree(); + if (last_applied_decree == 0) { + LOG_INFO_PREFIX("ready to commit an empty write to trigger checkpoint: " + "min_checkpoint_decree={}, last_applied_decree={}, " + "last_durable_decree={}", + min_checkpoint_decree, + last_applied_decree, + last_durable_decree()); + + // For the empty replica, here we commit an empty write would be to increase + // the decree to at least 1, to ensure that the checkpoint would inevitably + // be created even if the replica is empty. + mutation_ptr mu = new_mutation(invalid_decree); + mu->add_client_request(RPC_REPLICATION_WRITE_EMPTY, nullptr); + init_prepare(mu, false); + + async_trigger_manual_emergency_checkpoint( + min_checkpoint_decree, FLAGS_trigger_checkpoint_retry_interval_ms, callback); + + return; + } + + const auto err = trigger_manual_emergency_checkpoint(min_checkpoint_decree); + if (callback) { + callback(err); + } + }, + get_gpid().thread_hash(), + std::chrono::milliseconds(delay_ms)); +} + // ThreadPool: THREAD_POOL_REPLICATION -error_code replica::trigger_manual_emergency_checkpoint(decree old_decree) +error_code replica::trigger_manual_emergency_checkpoint(decree min_checkpoint_decree) { _checker.only_one_thread_access(); @@ -196,20 +255,18 @@ error_code replica::trigger_manual_emergency_checkpoint(decree old_decree) return ERR_LOCAL_APP_FAILURE; } - if (old_decree <= _app->last_durable_decree()) { - LOG_INFO_PREFIX("checkpoint has been completed: old = {} vs latest = {}", - old_decree, - _app->last_durable_decree()); + const auto last_durable_decree = this->last_durable_decree(); + if (min_checkpoint_decree <= last_durable_decree) { + LOG_INFO_PREFIX( + "checkpoint has been completed: min_checkpoint_decree={}, last_durable_decree={}", + min_checkpoint_decree, + last_durable_decree); _is_manual_emergency_checkpointing = false; - _stub->_manual_emergency_checkpointing_count == 0 - ? 0 - : (--_stub->_manual_emergency_checkpointing_count); return ERR_OK; } if (_is_manual_emergency_checkpointing) { - LOG_WARNING_PREFIX("replica is checkpointing, last_durable_decree = {}", - _app->last_durable_decree()); + LOG_WARNING_PREFIX("replica is checkpointing, last_durable_decree={}", last_durable_decree); return ERR_BUSY; } @@ -243,11 +300,12 @@ void replica::init_checkpoint(bool is_emergency) // // we may issue a new task to do backgroup_async_checkpoint // even if the old one hasn't finished yet - tasking::enqueue(LPC_CHECKPOINT_REPLICA, - &_tracker, - [this, is_emergency] { background_async_checkpoint(is_emergency); }, - 0, - 10_ms); + tasking::enqueue( + LPC_CHECKPOINT_REPLICA, + &_tracker, + [this, is_emergency] { background_async_checkpoint(is_emergency); }, + 0, + 10_ms); if (is_emergency) { METRIC_VAR_INCREMENT(emergency_checkpoints); @@ -307,9 +365,9 @@ error_code replica::background_async_checkpoint(bool is_emergency) if (_is_manual_emergency_checkpointing) { _is_manual_emergency_checkpointing = false; - _stub->_manual_emergency_checkpointing_count == 0 - ? 0 - : (--_stub->_manual_emergency_checkpointing_count); + if (_stub->_manual_emergency_checkpointing_count > 0) { + --_stub->_manual_emergency_checkpointing_count; + } } return err; @@ -320,19 +378,20 @@ error_code replica::background_async_checkpoint(bool is_emergency) LOG_INFO_PREFIX("call app.async_checkpoint() returns ERR_TRY_AGAIN, time_used_ns = {}" ", schedule later checkpoint after 10 seconds", used_time); - tasking::enqueue(LPC_PER_REPLICA_CHECKPOINT_TIMER, - &_tracker, - [this] { init_checkpoint(false); }, - get_gpid().thread_hash(), - std::chrono::seconds(10)); + tasking::enqueue( + LPC_PER_REPLICA_CHECKPOINT_TIMER, + &_tracker, + [this] { init_checkpoint(false); }, + get_gpid().thread_hash(), + std::chrono::seconds(10)); return err; } if (_is_manual_emergency_checkpointing) { _is_manual_emergency_checkpointing = false; - _stub->_manual_emergency_checkpointing_count == 0 - ? 0 - : (--_stub->_manual_emergency_checkpointing_count); + if (_stub->_manual_emergency_checkpointing_count > 0) { + --_stub->_manual_emergency_checkpointing_count; + } } if (err == ERR_WRONG_TIMING) { // do nothing @@ -388,11 +447,11 @@ void replica::catch_up_with_private_logs(partition_status::type s) auto err = apply_learned_state_from_private_log(state); if (s == partition_status::PS_POTENTIAL_SECONDARY) { - _potential_secondary_states.learn_remote_files_completed_task = - tasking::create_task(LPC_CHECKPOINT_REPLICA_COMPLETED, - &_tracker, - [this, err]() { this->on_learn_remote_state_completed(err); }, - get_gpid().thread_hash()); + _potential_secondary_states.learn_remote_files_completed_task = tasking::create_task( + LPC_CHECKPOINT_REPLICA_COMPLETED, + &_tracker, + [this, err]() { this->on_learn_remote_state_completed(err); }, + get_gpid().thread_hash()); _potential_secondary_states.learn_remote_files_completed_task->enqueue(); } else if (s == partition_status::PS_PARTITION_SPLIT) { _split_states.async_learn_task = tasking::enqueue( @@ -401,11 +460,11 @@ void replica::catch_up_with_private_logs(partition_status::type s) std::bind(&replica_split_manager::child_catch_up_states, get_split_manager()), get_gpid().thread_hash()); } else { - _secondary_states.checkpoint_completed_task = - tasking::create_task(LPC_CHECKPOINT_REPLICA_COMPLETED, - &_tracker, - [this, err]() { this->on_checkpoint_completed(err); }, - get_gpid().thread_hash()); + _secondary_states.checkpoint_completed_task = tasking::create_task( + LPC_CHECKPOINT_REPLICA_COMPLETED, + &_tracker, + [this, err]() { this->on_checkpoint_completed(err); }, + get_gpid().thread_hash()); _secondary_states.checkpoint_completed_task->enqueue(); } } diff --git a/src/replica/replica_config.cpp b/src/replica/replica_config.cpp index d6e97ed99a..8fc73eec71 100644 --- a/src/replica/replica_config.cpp +++ b/src/replica/replica_config.cpp @@ -35,12 +35,12 @@ #include #include #include +#include #include #include #include #include -#include "absl/strings/string_view.h" #include "bulk_load/replica_bulk_loader.h" #include "common/gpid.h" #include "common/replica_envs.h" @@ -51,6 +51,7 @@ #include "consensus_types.h" #include "dsn.layer2_types.h" #include "failure_detector/failure_detector_multimaster.h" +#include "gutil/map_util.h" #include "meta_admin_types.h" #include "metadata_types.h" #include "mutation.h" @@ -59,16 +60,16 @@ #include "replica/replica_context.h" #include "replica/replication_app_base.h" #include "replica_stub.h" +#include "rpc/dns_resolver.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" #include "security/access_controller.h" #include "split/replica_split_manager.h" +#include "task/async_calls.h" +#include "task/task.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/fail_point.h" @@ -89,9 +90,9 @@ bool get_bool_envs(const std::map &envs, const std::string &name, bool &value) { - auto iter = envs.find(name); - if (iter != envs.end()) { - if (!buf2bool(iter->second, value)) { + const auto *value_ptr = gutil::FindOrNull(envs, name); + if (value_ptr != nullptr) { + if (!buf2bool(*value_ptr, value)) { return false; } } @@ -189,9 +190,9 @@ void replica::add_potential_secondary(const configuration_update_request &propos } CHECK_EQ(proposal.config.ballot, get_ballot()); - CHECK_EQ(proposal.config.pid, _primary_states.membership.pid); - CHECK_EQ(proposal.config.hp_primary, _primary_states.membership.hp_primary); - CHECK(proposal.config.hp_secondaries == _primary_states.membership.hp_secondaries, ""); + CHECK_EQ(proposal.config.pid, _primary_states.pc.pid); + CHECK_EQ(proposal.config.hp_primary, _primary_states.pc.hp_primary); + CHECK(proposal.config.hp_secondaries == _primary_states.pc.hp_secondaries, ""); host_port node; GET_HOST_PORT(proposal, node, node); @@ -199,17 +200,17 @@ void replica::add_potential_secondary(const configuration_update_request &propos CHECK(!_primary_states.check_exist(node, partition_status::PS_SECONDARY), "node = {}", node); int potential_secondaries_count = - _primary_states.membership.hp_secondaries.size() + _primary_states.learners.size(); - if (potential_secondaries_count >= _primary_states.membership.max_replica_count - 1) { + _primary_states.pc.hp_secondaries.size() + _primary_states.learners.size(); + if (potential_secondaries_count >= _primary_states.pc.max_replica_count - 1) { if (proposal.type == config_type::CT_ADD_SECONDARY) { - if (_primary_states.learners.find(node) == _primary_states.learners.end()) { + if (!gutil::ContainsKey(_primary_states.learners, node)) { LOG_INFO_PREFIX( "already have enough secondaries or potential secondaries, ignore new " "potential secondary proposal"); return; } } else if (proposal.type == config_type::CT_ADD_SECONDARY_FOR_LB) { - if (potential_secondaries_count >= _primary_states.membership.max_replica_count) { + if (potential_secondaries_count >= _primary_states.pc.max_replica_count) { LOG_INFO_PREFIX("only allow one extra (potential) secondary, ingnore new potential " "secondary proposal"); return; @@ -225,9 +226,9 @@ void replica::add_potential_secondary(const configuration_update_request &propos state.prepare_start_decree = invalid_decree; state.timeout_task = nullptr; // TODO: add timer for learner task - auto it = _primary_states.learners.find(node); - if (it != _primary_states.learners.end()) { - state.signature = it->second.signature; + const auto *rls = gutil::FindOrNull(_primary_states.learners, node); + if (rls != nullptr) { + state.signature = rls->signature; } else { state.signature = ++_primary_states.next_learning_version; _primary_states.learners[node] = state; @@ -255,12 +256,12 @@ void replica::upgrade_to_secondary_on_primary(const ::dsn::host_port &node) { LOG_INFO_PREFIX("upgrade potential secondary {} to secondary", node); - partition_configuration new_config = _primary_states.membership; + partition_configuration new_pc = _primary_states.pc; // add secondary - ADD_IP_AND_HOST_PORT_BY_DNS(new_config, secondaries, node); + ADD_IP_AND_HOST_PORT_BY_DNS(new_pc, secondaries, node); - update_configuration_on_meta_server(config_type::CT_UPGRADE_TO_SECONDARY, node, new_config); + update_configuration_on_meta_server(config_type::CT_UPGRADE_TO_SECONDARY, node, new_pc); } void replica::downgrade_to_secondary_on_primary(configuration_update_request &proposal) @@ -269,9 +270,9 @@ void replica::downgrade_to_secondary_on_primary(configuration_update_request &pr return; } - CHECK_EQ(proposal.config.pid, _primary_states.membership.pid); - CHECK_EQ(proposal.config.hp_primary, _primary_states.membership.hp_primary); - CHECK(proposal.config.hp_secondaries == _primary_states.membership.hp_secondaries, ""); + CHECK_EQ(proposal.config.pid, _primary_states.pc.pid); + CHECK_EQ(proposal.config.hp_primary, _primary_states.pc.hp_primary); + CHECK(proposal.config.hp_secondaries == _primary_states.pc.hp_secondaries, ""); CHECK_EQ(proposal.hp_node, proposal.config.hp_primary); CHECK_EQ(proposal.node, proposal.config.primary); @@ -286,9 +287,9 @@ void replica::downgrade_to_inactive_on_primary(configuration_update_request &pro if (proposal.config.ballot != get_ballot() || status() != partition_status::PS_PRIMARY) return; - CHECK_EQ(proposal.config.pid, _primary_states.membership.pid); - CHECK_EQ(proposal.config.hp_primary, _primary_states.membership.hp_primary); - CHECK(proposal.config.hp_secondaries == _primary_states.membership.hp_secondaries, ""); + CHECK_EQ(proposal.config.pid, _primary_states.pc.pid); + CHECK_EQ(proposal.config.hp_primary, _primary_states.pc.hp_primary); + CHECK(proposal.config.hp_secondaries == _primary_states.pc.hp_secondaries, ""); host_port node; GET_HOST_PORT(proposal, node, node); @@ -314,9 +315,9 @@ void replica::remove(configuration_update_request &proposal) if (proposal.config.ballot != get_ballot() || status() != partition_status::PS_PRIMARY) return; - CHECK_EQ(proposal.config.pid, _primary_states.membership.pid); - CHECK_EQ(proposal.config.hp_primary, _primary_states.membership.hp_primary); - CHECK(proposal.config.hp_secondaries == _primary_states.membership.hp_secondaries, ""); + CHECK_EQ(proposal.config.pid, _primary_states.pc.pid); + CHECK_EQ(proposal.config.hp_primary, _primary_states.pc.hp_primary); + CHECK(proposal.config.hp_secondaries == _primary_states.pc.hp_secondaries, ""); host_port node; GET_HOST_PORT(proposal, node, node); @@ -375,24 +376,24 @@ void replica::on_remove(const replica_configuration &request) void replica::update_configuration_on_meta_server(config_type::type type, const host_port &node, - partition_configuration &new_config) + partition_configuration &new_pc) { // type should never be `CT_REGISTER_CHILD` // if this happens, it means serious mistake happened during partition split // assert here to stop split and avoid splitting wrong CHECK_NE_PREFIX(type, config_type::CT_REGISTER_CHILD); - new_config.last_committed_decree = last_committed_decree(); + new_pc.last_committed_decree = last_committed_decree(); if (type == config_type::CT_PRIMARY_FORCE_UPDATE_BALLOT) { CHECK(status() == partition_status::PS_INACTIVE && _inactive_is_transient && _is_initializing, ""); - CHECK_EQ(new_config.hp_primary, node); + CHECK_EQ(new_pc.hp_primary, node); } else if (type != config_type::CT_ASSIGN_PRIMARY && type != config_type::CT_UPGRADE_TO_PRIMARY) { CHECK_EQ(status(), partition_status::PS_PRIMARY); - CHECK_EQ(new_config.ballot, _primary_states.membership.ballot); + CHECK_EQ(new_pc.ballot, _primary_states.pc.ballot); } // disable 2pc during reconfiguration @@ -406,7 +407,7 @@ void replica::update_configuration_on_meta_server(config_type::type type, std::shared_ptr request(new configuration_update_request); request->info = _app_info; - request->config = new_config; + request->config = new_pc; request->config.ballot++; request->type = type; SET_IP_AND_HOST_PORT_BY_DNS(*request, node, node); @@ -425,14 +426,14 @@ void replica::update_configuration_on_meta_server(config_type::type type, rpc_address target( dsn::dns_resolver::instance().resolve_address(_stub->_failure_detector->get_servers())); - _primary_states.reconfiguration_task = - rpc::call(target, - msg, - &_tracker, - [=](error_code err, dsn::message_ex *reqmsg, dsn::message_ex *response) { - on_update_configuration_on_meta_server_reply(err, reqmsg, response, request); - }, - get_gpid().thread_hash()); + _primary_states.reconfiguration_task = rpc::call( + target, + msg, + &_tracker, + [=](error_code err, dsn::message_ex *reqmsg, dsn::message_ex *response) { + on_update_configuration_on_meta_server_reply(err, reqmsg, response, request); + }, + get_gpid().thread_hash()); } void replica::on_update_configuration_on_meta_server_reply( @@ -464,7 +465,7 @@ void replica::on_update_configuration_on_meta_server_reply( _primary_states.reconfiguration_task = tasking::enqueue( LPC_DELAY_UPDATE_CONFIG, &_tracker, - [ this, request, req2 = std::move(req) ]() { + [this, request, req2 = std::move(req)]() { rpc_address target(dsn::dns_resolver::instance().resolve_address( _stub->_failure_detector->get_servers())); rpc_response_task_ptr t = rpc::create_rpc_response_task( @@ -585,9 +586,10 @@ void replica::update_bool_envs(const std::map &envs, void replica::update_ac_allowed_users(const std::map &envs) { std::string allowed_users; - auto iter = envs.find(replica_envs::REPLICA_ACCESS_CONTROLLER_ALLOWED_USERS); - if (iter != envs.end()) { - allowed_users = iter->second; + const auto *env = + gutil::FindOrNull(envs, replica_envs::REPLICA_ACCESS_CONTROLLER_ALLOWED_USERS); + if (env != nullptr) { + allowed_users = *env; } _access_controller->update_allowed_users(allowed_users); @@ -595,9 +597,10 @@ void replica::update_ac_allowed_users(const std::map & void replica::update_ac_ranger_policies(const std::map &envs) { - auto iter = envs.find(replica_envs::REPLICA_ACCESS_CONTROLLER_RANGER_POLICIES); - if (iter != envs.end()) { - _access_controller->update_ranger_policies(iter->second); + const auto *env = + gutil::FindOrNull(envs, replica_envs::REPLICA_ACCESS_CONTROLLER_RANGER_POLICIES); + if (env != nullptr) { + _access_controller->update_ranger_policies(*env); } } @@ -623,14 +626,14 @@ void replica::update_allow_ingest_behind(const std::map &envs) { - auto env_iter = envs.find(replica_envs::DENY_CLIENT_REQUEST); - if (env_iter == envs.end()) { + const auto *env = gutil::FindOrNull(envs, replica_envs::DENY_CLIENT_REQUEST); + if (env == nullptr) { _deny_client.reset(); return; } std::vector sub_sargs; - utils::split_args(env_iter->second.c_str(), sub_sargs, '*', true); + utils::split_args(env->c_str(), sub_sargs, '*', true); CHECK_EQ_PREFIX(sub_sargs.size(), 2); _deny_client.reconfig = (sub_sargs[0] == "reconfig"); @@ -645,23 +648,24 @@ void replica::query_app_envs(/*out*/ std::map &envs) } } -bool replica::update_configuration(const partition_configuration &config) +bool replica::update_configuration(const partition_configuration &pc) { - CHECK_GE(config.ballot, get_ballot()); + CHECK_GE(pc.ballot, get_ballot()); replica_configuration rconfig; - replica_helper::get_replica_config(config, _stub->primary_host_port(), rconfig); + replica_helper::get_replica_config(pc, _stub->primary_host_port(), rconfig); if (rconfig.status == partition_status::PS_PRIMARY && (rconfig.ballot > get_ballot() || status() != partition_status::PS_PRIMARY)) { - _primary_states.reset_membership(config, config.hp_primary != _stub->primary_host_port()); + _primary_states.reset_membership(pc, pc.hp_primary != _stub->primary_host_port()); } - if (config.ballot > get_ballot() || + if (pc.ballot > get_ballot() || is_same_ballot_status_change_allowed(status(), rconfig.status)) { return update_local_configuration(rconfig, true); - } else + } else { return false; + } } bool replica::is_same_ballot_status_change_allowed(partition_status::type olds, @@ -690,7 +694,7 @@ bool replica::is_same_ballot_status_change_allowed(partition_status::type olds, bool replica::update_local_configuration(const replica_configuration &config, bool same_ballot /* = false*/) { - FAIL_POINT_INJECT_F("replica_update_local_configuration", [=](absl::string_view) -> bool { + FAIL_POINT_INJECT_F("replica_update_local_configuration", [=](std::string_view) -> bool { auto old_status = status(); _config = config; LOG_INFO_PREFIX( @@ -1041,7 +1045,8 @@ bool replica::update_local_configuration(const replica_configuration &config, init_prepare(next, false); } - if (_primary_states.membership.hp_secondaries.size() + 1 < + CHECK(_primary_states.pc.__isset.hp_secondaries, ""); + if (_primary_states.pc.hp_secondaries.size() + 1 < _options->app_mutation_2pc_min_replica_count(_app_info.max_replica_count)) { std::vector queued; _primary_states.write_queue.clear(queued); @@ -1069,12 +1074,12 @@ bool replica::update_local_configuration_with_no_ballot_change(partition_status: // ThreadPool: THREAD_POOL_REPLICATION void replica::on_config_sync(const app_info &info, - const partition_configuration &config, + const partition_configuration &pc, split_status::type meta_split_status) { LOG_DEBUG_PREFIX("configuration sync"); // no outdated update - if (config.ballot < get_ballot()) + if (pc.ballot < get_ballot()) return; update_app_max_replica_count(info.max_replica_count); @@ -1091,26 +1096,25 @@ void replica::on_config_sync(const app_info &info, } else { if (_is_initializing) { // in initializing, when replica still primary, need to inc ballot - if (config.hp_primary == _stub->primary_host_port() && + if (pc.hp_primary == _stub->primary_host_port() && status() == partition_status::PS_INACTIVE && _inactive_is_transient) { update_configuration_on_meta_server(config_type::CT_PRIMARY_FORCE_UPDATE_BALLOT, - config.hp_primary, - const_cast(config)); + pc.hp_primary, + const_cast(pc)); return; } _is_initializing = false; } - update_configuration(config); + update_configuration(pc); if (status() == partition_status::PS_INACTIVE && !_inactive_is_transient) { - if (config.hp_primary == _stub->primary_host_port() // dead primary - || - !config.hp_primary // primary is dead (otherwise let primary remove this) - ) { + if (pc.hp_primary == _stub->primary_host_port() // dead primary + || !pc.hp_primary // primary is dead (otherwise let primary remove this) + ) { LOG_INFO_PREFIX("downgrade myself as inactive is not transient, remote_config({})", - boost::lexical_cast(config)); - _stub->remove_replica_on_meta_server(_app_info, config); + boost::lexical_cast(pc)); + _stub->remove_replica_on_meta_server(_app_info, pc); } else { LOG_INFO_PREFIX("state is non-transient inactive, waiting primary to remove me"); } diff --git a/src/replica/replica_context.cpp b/src/replica/replica_context.cpp index a1fda93147..c2b4a2fa6c 100644 --- a/src/replica/replica_context.cpp +++ b/src/replica/replica_context.cpp @@ -33,7 +33,7 @@ #include "replica.h" #include "replica_context.h" #include "replica_stub.h" -#include "runtime/rpc/rpc_address.h" +#include "rpc/rpc_address.h" #include "utils/error_code.h" #include "utils/utils.h" @@ -67,7 +67,7 @@ void primary_context::cleanup(bool clean_pending_mutations) } group_bulk_load_pending_replies.clear(); - membership.ballot = 0; + pc.ballot = 0; cleanup_bulk_load_states(); @@ -91,25 +91,26 @@ void primary_context::do_cleanup_pending_mutations(bool clean_pending_mutations) } } -void primary_context::reset_membership(const partition_configuration &config, bool clear_learners) +void primary_context::reset_membership(const partition_configuration &new_pc, bool clear_learners) { statuses.clear(); if (clear_learners) { learners.clear(); } - if (config.ballot > membership.ballot) - next_learning_version = (((uint64_t)config.ballot) << 32) + 1; - else + if (new_pc.ballot > pc.ballot) { + next_learning_version = (((uint64_t)new_pc.ballot) << 32) + 1; + } else { ++next_learning_version; + } - membership = config; + pc = new_pc; - if (membership.hp_primary) { - statuses[membership.hp_primary] = partition_status::PS_PRIMARY; + if (pc.hp_primary) { + statuses[pc.hp_primary] = partition_status::PS_PRIMARY; } - for (auto it = config.hp_secondaries.begin(); it != config.hp_secondaries.end(); ++it) { + for (auto it = new_pc.hp_secondaries.begin(); it != new_pc.hp_secondaries.end(); ++it) { statuses[*it] = partition_status::PS_SECONDARY; learners.erase(*it); } @@ -123,9 +124,9 @@ void primary_context::get_replica_config(partition_status::type st, /*out*/ replica_configuration &config, uint64_t learner_signature /*= invalid_signature*/) { - config.pid = membership.pid; - SET_OBJ_IP_AND_HOST_PORT(config, primary, membership, primary); - config.ballot = membership.ballot; + config.pid = pc.pid; + SET_OBJ_IP_AND_HOST_PORT(config, primary, pc, primary); + config.ballot = pc.ballot; config.status = st; config.learner_signature = learner_signature; } @@ -134,9 +135,9 @@ bool primary_context::check_exist(const ::dsn::host_port &node, partition_status { switch (st) { case partition_status::PS_PRIMARY: - return membership.hp_primary == node; + return pc.hp_primary == node; case partition_status::PS_SECONDARY: - return utils::contains(membership.hp_secondaries, node); + return utils::contains(pc.hp_secondaries, node); case partition_status::PS_POTENTIAL_SECONDARY: return learners.find(node) != learners.end(); default: @@ -176,7 +177,7 @@ bool primary_context::secondary_disk_abnormal() const for (const auto &kv : secondary_disk_status) { if (kv.second != disk_status::NORMAL) { LOG_INFO("partition[{}] secondary[{}] disk space is {}", - membership.pid, + pc.pid, kv.first, enum_to_string(kv.second)); return true; diff --git a/src/replica/replica_context.h b/src/replica/replica_context.h index 0a3b499243..c1719c8de9 100644 --- a/src/replica/replica_context.h +++ b/src/replica/replica_context.h @@ -40,9 +40,9 @@ #include "dsn.layer2_types.h" #include "metadata_types.h" #include "mutation.h" +#include "rpc/rpc_host_port.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task.h" +#include "task/task.h" #include "utils/autoref_ptr.h" #include "utils/fmt_logging.h" @@ -100,7 +100,7 @@ class primary_context void cleanup(bool clean_pending_mutations = true); bool is_cleaned(); - void reset_membership(const partition_configuration &config, bool clear_learners); + void reset_membership(const partition_configuration &new_pc, bool clear_learners); void get_replica_config(partition_status::type status, /*out*/ replica_configuration &config, uint64_t learner_signature = invalid_signature); @@ -120,7 +120,7 @@ class primary_context public: // membership mgr, including learners - partition_configuration membership; + partition_configuration pc; node_statuses statuses; learner_map learners; uint64_t next_learning_version; diff --git a/src/replica/replica_disk_migrator.cpp b/src/replica/replica_disk_migrator.cpp index 47d48d0669..597644103f 100644 --- a/src/replica/replica_disk_migrator.cpp +++ b/src/replica/replica_disk_migrator.cpp @@ -20,7 +20,7 @@ #include #include -#include "absl/strings/string_view.h" +#include #include "common/fs_manager.h" #include "common/gpid.h" #include "common/replication.codes.h" @@ -30,7 +30,7 @@ #include "replica/replica_stub.h" #include "replica/replication_app_base.h" #include "replica_disk_migrator.h" -#include "runtime/task/async_calls.h" +#include "task/async_calls.h" #include "utils/error_code.h" #include "utils/fail_point.h" #include "utils/filesystem.h" @@ -56,7 +56,6 @@ void replica_disk_migrator::on_migrate_replica(replica_disk_migrate_rpc rpc) LPC_REPLICATION_COMMON, _replica->tracker(), [=]() { - if (!check_migration_args(rpc)) { return; } @@ -165,7 +164,7 @@ void replica_disk_migrator::migrate_replica(const replica_disk_migrate_request & // THREAD_POOL_REPLICATION_LONG bool replica_disk_migrator::init_target_dir(const replica_disk_migrate_request &req) { - FAIL_POINT_INJECT_F("init_target_dir", [this](absl::string_view) -> bool { + FAIL_POINT_INJECT_F("init_target_dir", [this](std::string_view) -> bool { reset_status(); return false; }); @@ -211,7 +210,7 @@ bool replica_disk_migrator::init_target_dir(const replica_disk_migrate_request & // THREAD_POOL_REPLICATION_LONG bool replica_disk_migrator::migrate_replica_checkpoint(const replica_disk_migrate_request &req) { - FAIL_POINT_INJECT_F("migrate_replica_checkpoint", [this](absl::string_view) -> bool { + FAIL_POINT_INJECT_F("migrate_replica_checkpoint", [this](std::string_view) -> bool { reset_status(); return false; }); @@ -247,7 +246,7 @@ bool replica_disk_migrator::migrate_replica_checkpoint(const replica_disk_migrat // THREAD_POOL_REPLICATION_LONG bool replica_disk_migrator::migrate_replica_app_info(const replica_disk_migrate_request &req) { - FAIL_POINT_INJECT_F("migrate_replica_app_info", [this](absl::string_view) -> bool { + FAIL_POINT_INJECT_F("migrate_replica_app_info", [this](std::string_view) -> bool { reset_status(); return false; }); diff --git a/src/replica/replica_disk_migrator.h b/src/replica/replica_disk_migrator.h index a3c52fe503..0d309cf77a 100644 --- a/src/replica/replica_disk_migrator.h +++ b/src/replica/replica_disk_migrator.h @@ -24,7 +24,7 @@ #include "replica/replica_base.h" #include "replica/replica_stub.h" #include "replica_admin_types.h" -#include "runtime/task/task.h" +#include "task/task.h" namespace dsn { namespace replication { diff --git a/src/replica/replica_failover.cpp b/src/replica/replica_failover.cpp index 6cd7895f7a..dd7fa9d4bc 100644 --- a/src/replica/replica_failover.cpp +++ b/src/replica/replica_failover.cpp @@ -36,8 +36,8 @@ #include "replica.h" #include "replica/replica_context.h" #include "replica_stub.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" @@ -58,7 +58,7 @@ void replica::handle_local_failure(error_code error) } if (status() == partition_status::PS_PRIMARY) { - _stub->remove_replica_on_meta_server(_app_info, _primary_states.membership); + _stub->remove_replica_on_meta_server(_app_info, _primary_states.pc); } update_local_configuration_with_no_ballot_change(partition_status::PS_ERROR); @@ -88,7 +88,7 @@ void replica::handle_remote_failure(partition_status::type st, configuration_update_request request; SET_IP_AND_HOST_PORT_BY_DNS(request, node, node); request.type = config_type::CT_DOWNGRADE_TO_INACTIVE; - request.config = _primary_states.membership; + request.config = _primary_states.pc; downgrade_to_inactive_on_primary(request); } break; @@ -121,5 +121,5 @@ void replica::on_meta_server_disconnected() set_inactive_state_transient(true); } } -} -} // namespace +} // namespace replication +} // namespace dsn diff --git a/src/replica/replica_init.cpp b/src/replica/replica_init.cpp index 1cee376e64..1bee60eda7 100644 --- a/src/replica/replica_init.cpp +++ b/src/replica/replica_init.cpp @@ -43,8 +43,8 @@ #include "replica/prepare_list.h" #include "replica/replication_app_base.h" #include "runtime/api_layer1.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" +#include "task/async_calls.h" +#include "task/task.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/filesystem.h" @@ -157,10 +157,11 @@ error_code replica::init_app_and_prepare_list(bool create_new) err = _private_log->open( [this](int log_length, mutation_ptr &mu) { return replay_mutation(mu, true); }, [this](error_code err) { - tasking::enqueue(LPC_REPLICATION_ERROR, - &_tracker, - [this, err]() { handle_local_failure(err); }, - get_gpid().thread_hash()); + tasking::enqueue( + LPC_REPLICATION_ERROR, + &_tracker, + [this, err]() { handle_local_failure(err); }, + get_gpid().thread_hash()); }, replay_condition); @@ -227,21 +228,22 @@ error_code replica::init_app_and_prepare_list(bool create_new) LOG_INFO_PREFIX("plog_dir = {}", log_dir); err = _private_log->open(nullptr, [this](error_code err) { - tasking::enqueue(LPC_REPLICATION_ERROR, - &_tracker, - [this, err]() { handle_local_failure(err); }, - get_gpid().thread_hash()); + tasking::enqueue( + LPC_REPLICATION_ERROR, + &_tracker, + [this, err]() { handle_local_failure(err); }, + get_gpid().thread_hash()); }); } if (err == ERR_OK) { if (_checkpoint_timer == nullptr && !FLAGS_checkpoint_disabled) { - _checkpoint_timer = - tasking::enqueue_timer(LPC_PER_REPLICA_CHECKPOINT_TIMER, - &_tracker, - [this] { on_checkpoint_timer(); }, - std::chrono::seconds(FLAGS_checkpoint_interval_seconds), - get_gpid().thread_hash()); + _checkpoint_timer = tasking::enqueue_timer( + LPC_PER_REPLICA_CHECKPOINT_TIMER, + &_tracker, + [this] { on_checkpoint_timer(); }, + std::chrono::seconds(FLAGS_checkpoint_interval_seconds), + get_gpid().thread_hash()); } _backup_mgr->start_collect_backup_info(); diff --git a/src/replica/replica_learn.cpp b/src/replica/replica_learn.cpp index bb8414ce4e..cbb8866080 100644 --- a/src/replica/replica_learn.cpp +++ b/src/replica/replica_learn.cpp @@ -55,14 +55,14 @@ #include "replica/replica_context.h" #include "replica/replication_app_base.h" #include "replica_stub.h" +#include "rpc/dns_resolver.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" +#include "task/async_calls.h" +#include "task/task.h" #include "utils/autoref_ptr.h" #include "utils/binary_reader.h" #include "utils/binary_writer.h" @@ -174,13 +174,14 @@ void replica::init_learn(uint64_t signature) METRIC_VAR_INCREMENT(learn_rounds); _potential_secondary_states.learning_round_is_running = true; _potential_secondary_states.catchup_with_private_log_task = - tasking::create_task(LPC_CATCHUP_WITH_PRIVATE_LOGS, - &_tracker, - [this]() { - this->catch_up_with_private_logs( - partition_status::PS_POTENTIAL_SECONDARY); - }, - get_gpid().thread_hash()); + tasking::create_task( + LPC_CATCHUP_WITH_PRIVATE_LOGS, + &_tracker, + [this]() { + this->catch_up_with_private_logs( + partition_status::PS_POTENTIAL_SECONDARY); + }, + get_gpid().thread_hash()); _potential_secondary_states.catchup_with_private_log_task->enqueue(); return; // incomplete @@ -261,7 +262,7 @@ void replica::init_learn(uint64_t signature) dsn::dns_resolver::instance().resolve_address(primary), msg, &_tracker, - [ this, req_cap = std::move(request) ](error_code err, learn_response && resp) mutable { + [this, req_cap = std::move(request)](error_code err, learn_response &&resp) mutable { on_learn_reply(err, std::move(req_cap), std::move(resp)); }); } @@ -699,14 +700,14 @@ void replica::on_learn_reply(error_code err, learn_request &&req, learn_response } if (err != ERR_OK) { - _potential_secondary_states.learn_remote_files_task = - tasking::create_task(LPC_LEARN_REMOTE_DELTA_FILES, &_tracker, [ - this, - err, - copy_start = _potential_secondary_states.duration_ms(), - req_cap = std::move(req), - resp_cap = std::move(resp) - ]() mutable { + _potential_secondary_states.learn_remote_files_task = tasking::create_task( + LPC_LEARN_REMOTE_DELTA_FILES, + &_tracker, + [this, + err, + copy_start = _potential_secondary_states.duration_ms(), + req_cap = std::move(req), + resp_cap = std::move(resp)]() mutable { on_copy_remote_state_completed( err, 0, copy_start, std::move(req_cap), std::move(resp_cap)); }); @@ -849,14 +850,14 @@ void replica::on_learn_reply(error_code err, learn_request &&req, learn_response // go to next stage _potential_secondary_states.learning_status = learner_status::LearningWithPrepare; - _potential_secondary_states.learn_remote_files_task = - tasking::create_task(LPC_LEARN_REMOTE_DELTA_FILES, &_tracker, [ - this, - err, - copy_start = _potential_secondary_states.duration_ms(), - req_cap = std::move(req), - resp_cap = std::move(resp) - ]() mutable { + _potential_secondary_states.learn_remote_files_task = tasking::create_task( + LPC_LEARN_REMOTE_DELTA_FILES, + &_tracker, + [this, + err, + copy_start = _potential_secondary_states.duration_ms(), + req_cap = std::move(req), + resp_cap = std::move(resp)]() mutable { on_copy_remote_state_completed( err, 0, copy_start, std::move(req_cap), std::move(resp_cap)); }); @@ -876,18 +877,18 @@ void replica::on_learn_reply(error_code err, learn_request &&req, learn_response learn_dir); _potential_secondary_states.learn_remote_files_task = - tasking::create_task(LPC_LEARN_REMOTE_DELTA_FILES, &_tracker, [ - this, - copy_start = _potential_secondary_states.duration_ms(), - req_cap = std::move(req), - resp_cap = std::move(resp) - ]() mutable { - on_copy_remote_state_completed(ERR_FILE_OPERATION_FAILED, - 0, - copy_start, - std::move(req_cap), - std::move(resp_cap)); - }); + tasking::create_task(LPC_LEARN_REMOTE_DELTA_FILES, + &_tracker, + [this, + copy_start = _potential_secondary_states.duration_ms(), + req_cap = std::move(req), + resp_cap = std::move(resp)]() mutable { + on_copy_remote_state_completed(ERR_FILE_OPERATION_FAILED, + 0, + copy_start, + std::move(req_cap), + std::move(resp_cap)); + }); _potential_secondary_states.learn_remote_files_task->enqueue(); return; } @@ -915,23 +916,21 @@ void replica::on_learn_reply(error_code err, learn_request &&req, learn_response high_priority, LPC_REPLICATION_COPY_REMOTE_FILES, &_tracker, - [ - this, - copy_start = _potential_secondary_states.duration_ms(), - req_cap = std::move(req), - resp_copy = resp - ](error_code err, size_t sz) mutable { + [this, + copy_start = _potential_secondary_states.duration_ms(), + req_cap = std::move(req), + resp_copy = resp](error_code err, size_t sz) mutable { on_copy_remote_state_completed( err, sz, copy_start, std::move(req_cap), std::move(resp_copy)); }); } else { - _potential_secondary_states.learn_remote_files_task = - tasking::create_task(LPC_LEARN_REMOTE_DELTA_FILES, &_tracker, [ - this, - copy_start = _potential_secondary_states.duration_ms(), - req_cap = std::move(req), - resp_cap = std::move(resp) - ]() mutable { + _potential_secondary_states.learn_remote_files_task = tasking::create_task( + LPC_LEARN_REMOTE_DELTA_FILES, + &_tracker, + [this, + copy_start = _potential_secondary_states.duration_ms(), + req_cap = std::move(req), + resp_cap = std::move(resp)]() mutable { on_copy_remote_state_completed( ERR_OK, 0, copy_start, std::move(req_cap), std::move(resp_cap)); }); @@ -1177,11 +1176,11 @@ void replica::on_copy_remote_state_completed(error_code err, // cleanup _potential_secondary_states.learn_remote_files_task = nullptr; - _potential_secondary_states.learn_remote_files_completed_task = - tasking::create_task(LPC_LEARN_REMOTE_DELTA_FILES_COMPLETED, - &_tracker, - [this, err]() { on_learn_remote_state_completed(err); }, - get_gpid().thread_hash()); + _potential_secondary_states.learn_remote_files_completed_task = tasking::create_task( + LPC_LEARN_REMOTE_DELTA_FILES_COMPLETED, + &_tracker, + [this, err]() { on_learn_remote_state_completed(err); }, + get_gpid().thread_hash()); _potential_secondary_states.learn_remote_files_completed_task->enqueue(); } @@ -1307,11 +1306,11 @@ void replica::notify_learn_completion() host_port primary; GET_HOST_PORT(_config, primary, primary); - _potential_secondary_states.completion_notify_task = - rpc::call(dsn::dns_resolver::instance().resolve_address(primary), msg, &_tracker, [ - this, - report = std::move(report) - ](error_code err, learn_notify_response && resp) mutable { + _potential_secondary_states.completion_notify_task = rpc::call( + dsn::dns_resolver::instance().resolve_address(primary), + msg, + &_tracker, + [this, report = std::move(report)](error_code err, learn_notify_response &&resp) mutable { on_learn_completion_notification_reply(err, std::move(report), std::move(resp)); }); } @@ -1477,10 +1476,11 @@ error_code replica::apply_learned_state_from_private_log(learn_state &state) _app->learn_dir(), [](int log_length, mutation_ptr &mu) { return true; }, [this](error_code err) { - tasking::enqueue(LPC_REPLICATION_ERROR, - &_tracker, - [this, err]() { handle_local_failure(err); }, - get_gpid().thread_hash()); + tasking::enqueue( + LPC_REPLICATION_ERROR, + &_tracker, + [this, err]() { handle_local_failure(err); }, + get_gpid().thread_hash()); }); if (err != ERR_OK) { LOG_ERROR_PREFIX("failed to reset this private log with logs in learn/ dir: {}", err); @@ -1522,21 +1522,21 @@ error_code replica::apply_learned_state_from_private_log(learn_state &state) } }); - err = mutation_log::replay(state.files, - [&plist](int log_length, mutation_ptr &mu) { - auto d = mu->data.header.decree; - if (d <= plist.last_committed_decree()) - return false; - - auto old = plist.get_mutation_by_decree(d); - if (old != nullptr && - old->data.header.ballot >= mu->data.header.ballot) - return false; - - plist.prepare(mu, partition_status::PS_SECONDARY); - return true; - }, - offset); + err = mutation_log::replay( + state.files, + [&plist](int log_length, mutation_ptr &mu) { + auto d = mu->data.header.decree; + if (d <= plist.last_committed_decree()) + return false; + + auto old = plist.get_mutation_by_decree(d); + if (old != nullptr && old->data.header.ballot >= mu->data.header.ballot) + return false; + + plist.prepare(mu, partition_status::PS_SECONDARY); + return true; + }, + offset); // update first_learn_start_decree, the position where the first round of LT_LOG starts from. // we use this value to determine whether to learn back from min_confirmed_decree diff --git a/src/replica/replica_restore.cpp b/src/replica/replica_restore.cpp index 063a294c46..00b260dc0a 100644 --- a/src/replica/replica_restore.cpp +++ b/src/replica/replica_restore.cpp @@ -39,13 +39,13 @@ #include "metadata_types.h" #include "replica.h" #include "replica_stub.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_tracker.h" +#include "rpc/dns_resolver.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" +#include "task/async_calls.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_tracker.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/env.h" @@ -263,10 +263,11 @@ dsn::error_code replica::find_valid_checkpoint(const configuration_restore_reque // TODO: check the md5sum read_response r; create_response.file_handle - ->read(read_request{0, -1}, - TASK_CODE_EXEC_INLINED, - [&r](const read_response &resp) { r = resp; }, - nullptr) + ->read( + read_request{0, -1}, + TASK_CODE_EXEC_INLINED, + [&r](const read_response &resp) { r = resp; }, + nullptr) ->wait(); if (r.err != dsn::ERR_OK) { @@ -470,5 +471,5 @@ void replica::update_restore_progress(uint64_t f_size) cur_download_size, cur_porgress); } -} -} +} // namespace replication +} // namespace dsn diff --git a/src/replica/replica_stub.cpp b/src/replica/replica_stub.cpp index 3558fb7a45..de2b789730 100644 --- a/src/replica/replica_stub.cpp +++ b/src/replica/replica_stub.cpp @@ -29,6 +29,7 @@ // IWYU pragma: no_include #include #include +#include #include #include #include @@ -37,16 +38,17 @@ #include #include #include -#include #include +#include +#include #include #include -#include "absl/strings/string_view.h" #include "backup/replica_backup_server.h" #include "bulk_load/replica_bulk_loader.h" #include "common/backup_common.h" #include "common/duplication_common.h" +#include "common/json_helper.h" #include "common/replication.codes.h" #include "common/replication_enums.h" #include "disk_cleaner.h" @@ -63,12 +65,12 @@ #include "replica/replica_stub.h" #include "replica/replication_app_base.h" #include "replica_disk_migrator.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" #include "security/access_controller.h" #include "split/replica_split_manager.h" +#include "task/async_calls.h" #include "utils/command_manager.h" #include "utils/env.h" #include "utils/errors.h" @@ -518,34 +520,34 @@ void replica_stub::initialize(const replication_options &opts, bool clear /* = f continue; } - load_tasks.push_back( - tasking::create_task(LPC_REPLICATION_INIT_LOAD, - &_tracker, - [this, dn, dir, &rps, &rps_lock] { - LOG_INFO("process dir {}", dir); - - auto r = load_replica(dn, dir.c_str()); - if (r == nullptr) { - return; - } - LOG_INFO("{}@{}: load replica '{}' success, = <{}, {}>, last_prepared_decree = {}", - r->get_gpid(), - dsn_primary_host_port(), - dir, - r->last_durable_decree(), - r->last_committed_decree(), - r->last_prepared_decree()); - - utils::auto_lock l(rps_lock); - CHECK(rps.find(r->get_gpid()) == rps.end(), - "conflict replica dir: {} <--> {}", - r->dir(), - rps[r->get_gpid()]->dir()); - - rps[r->get_gpid()] = r; - }, - load_tasks.size())); + load_tasks.push_back(tasking::create_task( + LPC_REPLICATION_INIT_LOAD, + &_tracker, + [this, dn, dir, &rps, &rps_lock] { + LOG_INFO("process dir {}", dir); + + auto r = load_replica(dn, dir.c_str()); + if (r == nullptr) { + return; + } + LOG_INFO("{}@{}: load replica '{}' success, = <{}, {}>, last_prepared_decree = {}", + r->get_gpid(), + dsn_primary_host_port(), + dir, + r->last_durable_decree(), + r->last_committed_decree(), + r->last_prepared_decree()); + + utils::auto_lock l(rps_lock); + CHECK(rps.find(r->get_gpid()) == rps.end(), + "conflict replica dir: {} <--> {}", + r->dir(), + rps[r->get_gpid()]->dir()); + + rps[r->get_gpid()] = r; + }, + load_tasks.size())); load_tasks.back()->enqueue(); } } @@ -609,13 +611,13 @@ void replica_stub::initialize(const replication_options &opts, bool clear /* = f // disk stat if (!FLAGS_disk_stat_disabled) { - _disk_stat_timer_task = - ::dsn::tasking::enqueue_timer(LPC_DISK_STAT, - &_tracker, - [this]() { on_disk_stat(); }, - std::chrono::seconds(FLAGS_disk_stat_interval_seconds), - 0, - std::chrono::seconds(FLAGS_disk_stat_interval_seconds)); + _disk_stat_timer_task = ::dsn::tasking::enqueue_timer( + LPC_DISK_STAT, + &_tracker, + [this]() { on_disk_stat(); }, + std::chrono::seconds(FLAGS_disk_stat_interval_seconds), + 0, + std::chrono::seconds(FLAGS_disk_stat_interval_seconds)); } // attach rps @@ -637,11 +639,12 @@ void replica_stub::initialize(const replication_options &opts, bool clear /* = f if (now_time_ms < dsn::utils::process_start_millis() + delay_time_ms) { uint64_t delay = dsn::utils::process_start_millis() + delay_time_ms - now_time_ms; LOG_INFO("delay for {} ms to make failure detector timeout", delay); - tasking::enqueue(LPC_REPLICA_SERVER_DELAY_START, - &_tracker, - [this]() { this->initialize_start(); }, - 0, - std::chrono::milliseconds(delay)); + tasking::enqueue( + LPC_REPLICA_SERVER_DELAY_START, + &_tracker, + [this]() { this->initialize_start(); }, + 0, + std::chrono::milliseconds(delay)); } else { initialize_start(); } @@ -658,16 +661,16 @@ void replica_stub::initialize_start() // start timer for configuration sync if (!FLAGS_config_sync_disabled) { - _config_sync_timer_task = - tasking::enqueue_timer(LPC_QUERY_CONFIGURATION_ALL, - &_tracker, - [this]() { - zauto_lock l(_state_lock); - this->query_configuration_by_node(); - }, - std::chrono::milliseconds(FLAGS_config_sync_interval_ms), - 0, - std::chrono::milliseconds(FLAGS_config_sync_interval_ms)); + _config_sync_timer_task = tasking::enqueue_timer( + LPC_QUERY_CONFIGURATION_ALL, + &_tracker, + [this]() { + zauto_lock l(_state_lock); + this->query_configuration_by_node(); + }, + std::chrono::milliseconds(FLAGS_config_sync_interval_ms), + 0, + std::chrono::milliseconds(FLAGS_config_sync_interval_ms)); } #ifdef DSN_ENABLE_GPERF @@ -757,7 +760,7 @@ std::vector replica_stub::get_all_primaries() const std::vector result; { zauto_read_lock l(_replicas_lock); - for (const auto & [ _, r ] : _replicas) { + for (const auto &[_, r] : _replicas) { if (r->status() != partition_status::PS_PRIMARY) { continue; } @@ -1320,15 +1323,16 @@ void replica_stub::on_node_query_reply(error_code err, int delay_ms = 500; LOG_INFO("resend query node partitions request after {} ms for resp.err = ERR_BUSY", delay_ms); - _config_query_task = tasking::enqueue(LPC_QUERY_CONFIGURATION_ALL, - &_tracker, - [this]() { - zauto_lock l(_state_lock); - _config_query_task = nullptr; - this->query_configuration_by_node(); - }, - 0, - std::chrono::milliseconds(delay_ms)); + _config_query_task = tasking::enqueue( + LPC_QUERY_CONFIGURATION_ALL, + &_tracker, + [this]() { + zauto_lock l(_state_lock); + _config_query_task = nullptr; + this->query_configuration_by_node(); + }, + 0, + std::chrono::milliseconds(delay_ms)); return; } if (resp.err != ERR_OK) { @@ -1450,7 +1454,7 @@ void replica_stub::on_node_query_reply_scatter2(replica_stub_ptr this_, gpid id) } void replica_stub::remove_replica_on_meta_server(const app_info &info, - const partition_configuration &config) + const partition_configuration &pc) { if (FLAGS_fd_disabled) { return; @@ -1460,12 +1464,12 @@ void replica_stub::remove_replica_on_meta_server(const app_info &info, std::shared_ptr request(new configuration_update_request); request->info = info; - request->config = config; + request->config = pc; request->config.ballot++; SET_IP_AND_HOST_PORT(*request, node, primary_address(), _primary_host_port); request->type = config_type::CT_DOWNGRADE_TO_INACTIVE; - if (_primary_host_port == config.hp_primary) { + if (_primary_host_port == pc.hp_primary) { RESET_IP_AND_HOST_PORT(request->config, primary); } else if (replica_helper::remove_node(primary_address(), request->config.secondaries) && replica_helper::remove_node(_primary_host_port, request->config.hp_secondaries)) { @@ -1630,7 +1634,7 @@ void replica_stub::on_replicas_stat() uint64_t splitting_max_duration_time_ms = 0; uint64_t splitting_max_async_learn_time_ms = 0; uint64_t splitting_max_copy_file_size = 0; - for (const auto & [ _, rep_stat_info ] : rep_stat_info_by_gpid) { + for (const auto &[_, rep_stat_info] : rep_stat_info_by_gpid) { const auto &rep = rep_stat_info.rep; if (rep->status() == partition_status::PS_POTENTIAL_SECONDARY) { learning_count++; @@ -1806,7 +1810,7 @@ void replica_stub::open_replica( dsn::utils::filesystem::rename_path(origin_tmp_dir, origin_dir); rep = load_replica(origin_dn, origin_dir.c_str()); - FAIL_POINT_INJECT_F("mock_replica_load", [&](absl::string_view) -> void {}); + FAIL_POINT_INJECT_F("mock_replica_load", [&](std::string_view) -> void {}); } } } @@ -1995,7 +1999,7 @@ bool replica_stub::validate_replica_dir(const std::string &dir, replica *replica_stub::load_replica(dir_node *dn, const char *dir) { FAIL_POINT_INJECT_F("mock_replica_load", - [&](absl::string_view) -> replica * { return nullptr; }); + [&](std::string_view) -> replica * { return nullptr; }); app_info ai; gpid pid; @@ -2074,11 +2078,12 @@ task_ptr replica_stub::begin_close_replica(replica_ptr r) app_info a_info = *(r->get_app_info()); replica_info r_info; get_replica_info(r_info, r); - task_ptr task = tasking::enqueue(LPC_CLOSE_REPLICA, - &_tracker, - [=]() { close_replica(r); }, - 0, - std::chrono::milliseconds(delay_ms)); + task_ptr task = tasking::enqueue( + LPC_CLOSE_REPLICA, + &_tracker, + [=]() { close_replica(r); }, + 0, + std::chrono::milliseconds(delay_ms)); _closing_replicas[id] = std::make_tuple(task, r, std::move(a_info), std::move(r_info)); METRIC_VAR_INCREMENT(closing_replicas); return task; @@ -2335,6 +2340,22 @@ void replica_stub::register_ctrl_command() }); })); + _cmds.emplace_back(::dsn::command_manager::instance().register_single_command( + "replica.query-progress", + "Query the progress of decrees, including both local writes and duplications for " + "replicas specified by comma-separated list of 'app_id' or 'app_id.partition_id', " + "or all replicas for empty", + "[id1,id2,...]", + [this](const std::vector &args) { + return exec_command_on_replica(args, true, [](const replica_ptr &rep) { + std::ostringstream out; + rapidjson::OStreamWrapper wrapper(out); + dsn::json::PrettyJsonWriter writer(wrapper); + rep->encode_progress(writer); + return out.str(); + }); + })); + #ifdef DSN_ENABLE_GPERF _cmds.emplace_back(::dsn::command_manager::instance().register_bool_command( _release_tcmalloc_memory, @@ -2437,20 +2458,21 @@ replica_stub::exec_command_on_replica(const std::vector &args, std::map> results; // id => status,result for (auto &kv : choosed_rs) { replica_ptr rep = kv.second; - task_ptr tsk = tasking::enqueue(LPC_EXEC_COMMAND_ON_REPLICA, - rep->tracker(), - [rep, &func, &results_lock, &results]() { - partition_status::type status = rep->status(); - if (status != partition_status::PS_PRIMARY && - status != partition_status::PS_SECONDARY) - return; - std::string result = func(rep); - ::dsn::zauto_lock l(results_lock); - auto &value = results[rep->get_gpid()]; - value.first = status; - value.second = result; - }, - rep->get_gpid().thread_hash()); + task_ptr tsk = tasking::enqueue( + LPC_EXEC_COMMAND_ON_REPLICA, + rep->tracker(), + [rep, &func, &results_lock, &results]() { + partition_status::type status = rep->status(); + if (status != partition_status::PS_PRIMARY && + status != partition_status::PS_SECONDARY) + return; + std::string result = func(rep); + ::dsn::zauto_lock l(results_lock); + auto &value = results[rep->get_gpid()]; + value.first = status; + value.second = result; + }, + rep->get_gpid().thread_hash()); tasks.emplace_back(std::move(tsk)); } @@ -2649,7 +2671,7 @@ replica_ptr replica_stub::create_child_replica_if_not_found(gpid child_pid, const std::string &parent_dir) { FAIL_POINT_INJECT_F( - "replica_stub_create_child_replica_if_not_found", [=](absl::string_view) -> replica_ptr { + "replica_stub_create_child_replica_if_not_found", [=](std::string_view) -> replica_ptr { const auto dn = _fs_manager.create_child_replica_dir(app->app_type, child_pid, parent_dir); CHECK_NOTNULL(dn, ""); @@ -2694,14 +2716,14 @@ void replica_stub::split_replica_error_handler(gpid pid, local_execution handler dsn::error_code replica_stub::split_replica_exec(dsn::task_code code, gpid pid, local_execution handler) { - FAIL_POINT_INJECT_F("replica_stub_split_replica_exec", - [](absl::string_view) { return ERR_OK; }); + FAIL_POINT_INJECT_F("replica_stub_split_replica_exec", [](std::string_view) { return ERR_OK; }); replica_ptr replica = pid.get_app_id() == 0 ? nullptr : get_replica(pid); if (replica && handler) { - tasking::enqueue(code, - replica.get()->tracker(), - [handler, replica]() { handler(replica->get_split_manager()); }, - pid.thread_hash()); + tasking::enqueue( + code, + replica.get()->tracker(), + [handler, replica]() { handler(replica->get_split_manager()); }, + pid.thread_hash()); return ERR_OK; } LOG_WARNING("replica({}) is invalid", pid); diff --git a/src/replica/replica_stub.h b/src/replica/replica_stub.h index 516bc2303a..09f1e624bb 100644 --- a/src/replica/replica_stub.h +++ b/src/replica/replica_stub.h @@ -54,15 +54,15 @@ #include "replica.h" #include "replica/mutation_log.h" #include "replica_admin_types.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/dns_resolver.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_host_port.h" #include "runtime/serverlet.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_tracker.h" #include "security/access_controller.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_tracker.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/flags.h" @@ -76,6 +76,7 @@ namespace dsn { class command_deregister; class message_ex; class nfs_node; + namespace security { class kms_key_provider; } // namespace security @@ -92,8 +93,9 @@ class configuration_query_by_node_response; class configuration_update_request; class potential_secondary_context; -typedef rpc_holder learn_completion_notification_rpc; typedef rpc_holder group_check_rpc; +typedef rpc_holder learn_completion_notification_rpc; + typedef rpc_holder query_replica_decree_rpc; typedef rpc_holder query_last_checkpoint_info_rpc; @@ -113,12 +115,11 @@ class test_checker; } class cold_backup_context; class replica_split_manager; - -typedef std::unordered_map replicas; typedef std::function replica_state_subscriber; +typedef std::unordered_map replicas; class replica_stub; @@ -129,6 +130,7 @@ class replica_backup_server; // The replica_stub is the *singleton* entry to access all replica managed in the same process // replica_stub(singleton) --> replica --> replication_app_base + class replica_stub : public serverlet, public ref_counter { public: @@ -341,7 +343,7 @@ class replica_stub : public serverlet, public ref_counter void on_node_query_reply_scatter(replica_stub_ptr this_, const configuration_update_request &config); void on_node_query_reply_scatter2(replica_stub_ptr this_, gpid id); - void remove_replica_on_meta_server(const app_info &info, const partition_configuration &config); + void remove_replica_on_meta_server(const app_info &info, const partition_configuration &pc); task_ptr begin_open_replica(const app_info &app, gpid id, const std::shared_ptr &req, diff --git a/src/replica/replica_throttle.cpp b/src/replica/replica_throttle.cpp index 1d5ee36efa..ada61d89ee 100644 --- a/src/replica/replica_throttle.cpp +++ b/src/replica/replica_throttle.cpp @@ -26,8 +26,8 @@ #include "common/replication.codes.h" #include "dsn.layer2_types.h" #include "replica.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/async_calls.h" +#include "rpc/rpc_message.h" +#include "task/async_calls.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" @@ -47,19 +47,20 @@ namespace replication { tasking::enqueue( \ LPC_##op_type##_THROTTLING_DELAY, \ &_tracker, \ - [ this, req = message_ptr(request) ]() { on_client_##op_type(req, true); }, \ + [this, req = message_ptr(request)]() { on_client_##op_type(req, true); }, \ get_gpid().thread_hash(), \ std::chrono::milliseconds(delay_ms)); \ METRIC_VAR_INCREMENT(throttling_delayed_##op_type##_requests); \ } else { /** type == utils::throttling_controller::REJECT **/ \ if (delay_ms > 0) { \ - tasking::enqueue(LPC_##op_type##_THROTTLING_DELAY, \ - &_tracker, \ - [ this, req = message_ptr(request) ]() { \ - response_client_##op_type(req, ERR_BUSY); \ - }, \ - get_gpid().thread_hash(), \ - std::chrono::milliseconds(delay_ms)); \ + tasking::enqueue( \ + LPC_##op_type##_THROTTLING_DELAY, \ + &_tracker, \ + [this, req = message_ptr(request)]() { \ + response_client_##op_type(req, ERR_BUSY); \ + }, \ + get_gpid().thread_hash(), \ + std::chrono::milliseconds(delay_ms)); \ } else { \ response_client_##op_type(request, ERR_BUSY); \ } \ @@ -89,11 +90,12 @@ bool replica::throttle_backup_request(message_ex *request) request->header->client.timeout_ms, 1, delay_ms); if (type != utils::throttling_controller::PASS) { if (type == utils::throttling_controller::DELAY) { - tasking::enqueue(LPC_read_THROTTLING_DELAY, - &_tracker, - [ this, req = message_ptr(request) ]() { on_client_read(req, true); }, - get_gpid().thread_hash(), - std::chrono::milliseconds(delay_ms)); + tasking::enqueue( + LPC_read_THROTTLING_DELAY, + &_tracker, + [this, req = message_ptr(request)]() { on_client_read(req, true); }, + get_gpid().thread_hash(), + std::chrono::milliseconds(delay_ms)); METRIC_VAR_INCREMENT(throttling_delayed_backup_requests); } else { /** type == utils::throttling_controller::REJECT **/ METRIC_VAR_INCREMENT(throttling_rejected_backup_requests); diff --git a/src/replica/replication_app_base.cpp b/src/replica/replication_app_base.cpp index 0455772599..0c82513dce 100644 --- a/src/replica/replication_app_base.cpp +++ b/src/replica/replication_app_base.cpp @@ -29,10 +29,10 @@ #include #include #include +#include #include #include -#include "absl/strings/string_view.h" #include "common/bulk_load_common.h" #include "common/duplication_common.h" #include "common/replica_envs.h" @@ -43,10 +43,10 @@ #include "mutation.h" #include "replica.h" #include "replica/replication_app_base.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/autoref_ptr.h" #include "utils/binary_reader.h" #include "utils/binary_writer.h" @@ -281,7 +281,7 @@ int replication_app_base::on_batched_write_requests(int64_t decree, error_code replication_app_base::apply_mutation(const mutation *mu) { FAIL_POINT_INJECT_F("replication_app_base_apply_mutation", - [](absl::string_view) { return ERR_OK; }); + [](std::string_view) { return ERR_OK; }); CHECK_EQ_PREFIX(mu->data.header.decree, last_committed_decree() + 1); CHECK_EQ_PREFIX(mu->data.updates.size(), mu->client_requests.size()); diff --git a/src/replica/replication_app_base.h b/src/replica/replication_app_base.h index c3559c095d..2a88618f64 100644 --- a/src/replica/replication_app_base.h +++ b/src/replica/replication_app_base.h @@ -238,7 +238,13 @@ class replication_app_base : public replica_base // // Query methods. // + + // Get the decree of the last flushed mutation. -1 means failed to get. + virtual replication::decree last_flushed_decree() const = 0; + + // Get the decree of the last created checkpoint. virtual replication::decree last_durable_decree() const = 0; + // The return type is generated by storage engine, e.g. rocksdb::Status::Code, 0 always mean OK. virtual int on_request(message_ex *request) WARN_UNUSED_RESULT = 0; diff --git a/src/replica/split/replica_split_manager.cpp b/src/replica/split/replica_split_manager.cpp index 9844c8ee68..0e2c9422bb 100644 --- a/src/replica/split/replica_split_manager.cpp +++ b/src/replica/split/replica_split_manager.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include "common/partition_split_common.h" @@ -34,13 +35,13 @@ #include "replica/replica_context.h" #include "replica/replica_stub.h" #include "replica/replication_app_base.h" +#include "rpc/dns_resolver.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_host_port.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" +#include "task/async_calls.h" +#include "task/task.h" #include "utils/autoref_ptr.h" #include "utils/chrono_literals.h" #include "utils/defer.h" @@ -48,7 +49,6 @@ #include "utils/filesystem.h" #include "utils/flags.h" #include "utils/fmt_logging.h" -#include "absl/strings/string_view.h" #include "utils/thread_access_checker.h" METRIC_DEFINE_counter(replica, @@ -172,7 +172,7 @@ void replica_split_manager::child_init_replica(gpid parent_gpid, const host_port &primary_host_port, ballot init_ballot) // on child partition { - FAIL_POINT_INJECT_F("replica_child_init_replica", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("replica_child_init_replica", [](std::string_view) {}); if (status() != partition_status::PS_INACTIVE) { LOG_WARNING_PREFIX("wrong status({})", enum_to_string(status())); @@ -218,7 +218,7 @@ void replica_split_manager::child_init_replica(gpid parent_gpid, // ThreadPool: THREAD_POOL_REPLICATION void replica_split_manager::child_check_split_context() // on child partition { - FAIL_POINT_INJECT_F("replica_child_check_split_context", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("replica_child_check_split_context", [](std::string_view) {}); if (status() != partition_status::PS_PARTITION_SPLIT) { LOG_ERROR_PREFIX("wrong status({})", enum_to_string(status())); @@ -246,7 +246,7 @@ void replica_split_manager::child_check_split_context() // on child partition // ThreadPool: THREAD_POOL_REPLICATION bool replica_split_manager::parent_check_states() // on parent partition { - FAIL_POINT_INJECT_F("replica_parent_check_states", [](absl::string_view) { return true; }); + FAIL_POINT_INJECT_F("replica_parent_check_states", [](std::string_view) { return true; }); if (_split_status != split_status::SPLITTING || _child_init_ballot != get_ballot() || _child_gpid.get_app_id() == 0 || @@ -387,7 +387,7 @@ void replica_split_manager::child_learn_states(learn_state lstate, uint64_t total_file_size, decree last_committed_decree) // on child partition { - FAIL_POINT_INJECT_F("replica_child_learn_states", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("replica_child_learn_states", [](std::string_view) {}); if (status() != partition_status::PS_PARTITION_SPLIT) { LOG_ERROR_PREFIX("wrong status({})", enum_to_string(status())); @@ -455,7 +455,7 @@ replica_split_manager::child_apply_private_logs(std::vector plog_fi uint64_t total_file_size, decree last_committed_decree) // on child partition { - FAIL_POINT_INJECT_F("replica_child_apply_private_logs", [](absl::string_view arg) { + FAIL_POINT_INJECT_F("replica_child_apply_private_logs", [](std::string_view arg) { return error_code::try_get(arg.data(), ERR_OK); }); @@ -484,21 +484,21 @@ replica_split_manager::child_apply_private_logs(std::vector plog_fi }); // replay private log - ec = mutation_log::replay(plog_files, - [&plist](int log_length, mutation_ptr &mu) { - decree d = mu->data.header.decree; - if (d <= plist.last_committed_decree()) { - return false; - } - mutation_ptr origin_mu = plist.get_mutation_by_decree(d); - if (origin_mu != nullptr && - origin_mu->data.header.ballot >= mu->data.header.ballot) { - return false; - } - plist.prepare(mu, partition_status::PS_SECONDARY); - return true; - }, - offset); + ec = mutation_log::replay( + plog_files, + [&plist](int log_length, mutation_ptr &mu) { + decree d = mu->data.header.decree; + if (d <= plist.last_committed_decree()) { + return false; + } + mutation_ptr origin_mu = plist.get_mutation_by_decree(d); + if (origin_mu != nullptr && origin_mu->data.header.ballot >= mu->data.header.ballot) { + return false; + } + plist.prepare(mu, partition_status::PS_SECONDARY); + return true; + }, + offset); if (ec != ERR_OK) { LOG_ERROR_PREFIX( "replay private_log files failed, file count={}, app last_committed_decree={}", @@ -547,7 +547,7 @@ replica_split_manager::child_apply_private_logs(std::vector plog_fi // ThreadPool: THREAD_POOL_REPLICATION void replica_split_manager::child_catch_up_states() // on child partition { - FAIL_POINT_INJECT_F("replica_child_catch_up_states", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("replica_child_catch_up_states", [](std::string_view) {}); if (status() != partition_status::PS_PARTITION_SPLIT) { LOG_ERROR_PREFIX("wrong status, status is {}", enum_to_string(status())); @@ -612,7 +612,7 @@ void replica_split_manager::child_catch_up_states() // on child partition // ThreadPool: THREAD_POOL_REPLICATION void replica_split_manager::child_notify_catch_up() // on child partition { - FAIL_POINT_INJECT_F("replica_child_notify_catch_up", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("replica_child_notify_catch_up", [](std::string_view) {}); std::unique_ptr request = std::make_unique(); request->parent_gpid = _replica->_split_states.parent_gpid; @@ -737,7 +737,7 @@ void replica_split_manager::parent_handle_child_catch_up( // ThreadPool: THREAD_POOL_REPLICATION void replica_split_manager::parent_check_sync_point_commit(decree sync_point) // on primary parent { - FAIL_POINT_INJECT_F("replica_parent_check_sync_point_commit", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("replica_parent_check_sync_point_commit", [](std::string_view) {}); if (status() != partition_status::PS_PRIMARY) { LOG_ERROR_PREFIX("wrong status({})", enum_to_string(status())); parent_handle_split_error("check_sync_point_commit failed, primary changed", false); @@ -776,11 +776,11 @@ void replica_split_manager::update_child_group_partition_count( } if (!_replica->_primary_states.learners.empty() || - _replica->_primary_states.membership.hp_secondaries.size() + 1 < - _replica->_primary_states.membership.max_replica_count) { + _replica->_primary_states.pc.hp_secondaries.size() + 1 < + _replica->_primary_states.pc.max_replica_count) { LOG_ERROR_PREFIX("there are {} learners or not have enough secondaries(count is {})", _replica->_primary_states.learners.size(), - _replica->_primary_states.membership.hp_secondaries.size()); + _replica->_primary_states.pc.hp_secondaries.size()); parent_handle_split_error( "update_child_group_partition_count failed, have learner or lack of secondary", true); return; @@ -803,7 +803,7 @@ void replica_split_manager::parent_send_update_partition_count_request( int32_t new_partition_count, std::shared_ptr> ¬_replied_addresses) // on primary parent { - FAIL_POINT_INJECT_F("replica_parent_update_partition_count_request", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("replica_parent_update_partition_count_request", [](std::string_view) {}); CHECK_EQ_PREFIX(status(), partition_status::PS_PRIMARY); @@ -964,7 +964,7 @@ void replica_split_manager::on_update_child_group_partition_count_reply( // ThreadPool: THREAD_POOL_REPLICATION void replica_split_manager::register_child_on_meta(ballot b) // on primary parent { - FAIL_POINT_INJECT_F("replica_register_child_on_meta", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("replica_register_child_on_meta", [](std::string_view) {}); if (status() != partition_status::PS_PRIMARY || _split_status != split_status::SPLITTING) { LOG_ERROR_PREFIX( @@ -987,17 +987,17 @@ void replica_split_manager::register_child_on_meta(ballot b) // on primary paren return; } - partition_configuration child_config = _replica->_primary_states.membership; - child_config.ballot++; - child_config.last_committed_decree = 0; - CLEAR_IP_AND_HOST_PORT(child_config, last_drops); - child_config.pid.set_partition_index(_replica->_app_info.partition_count + - get_gpid().get_partition_index()); + auto child_pc = _replica->_primary_states.pc; + child_pc.ballot++; + child_pc.last_committed_decree = 0; + CLEAR_IP_AND_HOST_PORT(child_pc, last_drops); + child_pc.pid.set_partition_index(_replica->_app_info.partition_count + + get_gpid().get_partition_index()); register_child_request request; request.app = _replica->_app_info; - request.child_config = child_config; - request.parent_config = _replica->_primary_states.membership; + request.child_config = child_pc; + request.parent_config = _replica->_primary_states.pc; SET_IP_AND_HOST_PORT(request, primary, _stub->primary_address(), _stub->primary_host_port()); // reject client request @@ -1013,7 +1013,7 @@ void replica_split_manager::register_child_on_meta(ballot b) // on primary paren void replica_split_manager::parent_send_register_request( const register_child_request &request) // on primary parent { - FAIL_POINT_INJECT_F("replica_parent_send_register_request", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("replica_parent_send_register_request", [](std::string_view) {}); CHECK_EQ_PREFIX(status(), partition_status::PS_INACTIVE); LOG_INFO_PREFIX( @@ -1043,7 +1043,7 @@ void replica_split_manager::on_register_child_on_meta_reply( const register_child_request &request, const register_child_response &response) // on primary parent { - FAIL_POINT_INJECT_F("replica_on_register_child_on_meta_reply", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("replica_on_register_child_on_meta_reply", [](std::string_view) {}); _replica->_checker.only_one_thread_access(); @@ -1141,8 +1141,7 @@ void replica_split_manager::on_register_child_on_meta_reply( } // ThreadPool: THREAD_POOL_REPLICATION -void replica_split_manager::child_partition_active( - const partition_configuration &config) // on child +void replica_split_manager::child_partition_active(const partition_configuration &pc) // on child { if (status() != partition_status::PS_PARTITION_SPLIT) { LOG_WARNING_PREFIX("child partition has been active, status={}", enum_to_string(status())); @@ -1151,7 +1150,7 @@ void replica_split_manager::child_partition_active( _replica->_primary_states.last_prepare_decree_on_new_primary = _replica->_prepare_list->max_decree(); - _replica->update_configuration(config); + _replica->update_configuration(pc); METRIC_VAR_INCREMENT(splitting_successful_count); LOG_INFO_PREFIX("child partition is active, status={}", enum_to_string(status())); } @@ -1223,13 +1222,13 @@ void replica_split_manager::trigger_primary_parent_split( _meta_split_status = meta_split_status; if (meta_split_status == split_status::SPLITTING) { if (!_replica->_primary_states.learners.empty() || - _replica->_primary_states.membership.hp_secondaries.size() + 1 < - _replica->_primary_states.membership.max_replica_count) { + _replica->_primary_states.pc.hp_secondaries.size() + 1 < + _replica->_primary_states.pc.max_replica_count) { LOG_WARNING_PREFIX( "there are {} learners or not have enough secondaries(count is {}), wait for " "next round", _replica->_primary_states.learners.size(), - _replica->_primary_states.membership.hp_secondaries.size()); + _replica->_primary_states.pc.hp_secondaries.size()); return; } @@ -1508,7 +1507,7 @@ void replica_split_manager::primary_parent_handle_stop_split( } } // all secondaries have already stop split succeed - if (count == _replica->_primary_states.membership.max_replica_count - 1) { + if (count == _replica->_primary_states.pc.max_replica_count - 1) { _replica->_primary_states.cleanup_split_states(); parent_send_notify_stop_request(req->meta_split_status); } @@ -1518,7 +1517,7 @@ void replica_split_manager::primary_parent_handle_stop_split( void replica_split_manager::parent_send_notify_stop_request( split_status::type meta_split_status) // on primary parent { - FAIL_POINT_INJECT_F("replica_parent_send_notify_stop_request", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("replica_parent_send_notify_stop_request", [](std::string_view) {}); auto meta_address = dsn::dns_resolver::instance().resolve_address(_stub->_failure_detector->get_servers()); std::unique_ptr req = std::make_unique(); diff --git a/src/replica/split/replica_split_manager.h b/src/replica/split/replica_split_manager.h index 982e1c2cb3..09c04c3a37 100644 --- a/src/replica/split/replica_split_manager.h +++ b/src/replica/split/replica_split_manager.h @@ -150,7 +150,7 @@ class replica_split_manager : replica_base void parent_send_register_request(const register_child_request &request); // child partition has been registered on meta_server, could be active - void child_partition_active(const partition_configuration &config); + void child_partition_active(const partition_configuration &pc); // return true if parent status is valid bool parent_check_states(); diff --git a/src/replica/split/test/replica_split_test.cpp b/src/replica/split/test/replica_split_test.cpp index 61f8010788..2f18fe751e 100644 --- a/src/replica/split/test/replica_split_test.cpp +++ b/src/replica/split/test/replica_split_test.cpp @@ -40,10 +40,10 @@ #include "replica/split/replica_split_manager.h" #include "replica/test/mock_utils.h" #include "replica/test/replica_test_base.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task.h" -#include "runtime/task/task_tracker.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "task/task.h" +#include "task/task_tracker.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/fail_point.h" @@ -186,16 +186,16 @@ class replica_split_test : public replica_test_base void mock_parent_primary_configuration(bool lack_of_secondary = false) { - partition_configuration config; - config.max_replica_count = 3; - config.pid = PARENT_GPID; - config.ballot = INIT_BALLOT; - SET_IP_AND_HOST_PORT_BY_DNS(config, primary, PRIMARY); - ADD_IP_AND_HOST_PORT_BY_DNS(config, secondaries, SECONDARY); + partition_configuration pc; + pc.max_replica_count = 3; + pc.pid = PARENT_GPID; + pc.ballot = INIT_BALLOT; + SET_IP_AND_HOST_PORT_BY_DNS(pc, primary, PRIMARY); + ADD_IP_AND_HOST_PORT_BY_DNS(pc, secondaries, SECONDARY); if (!lack_of_secondary) { - ADD_IP_AND_HOST_PORT_BY_DNS(config, secondaries, SECONDARY2); + ADD_IP_AND_HOST_PORT_BY_DNS(pc, secondaries, SECONDARY2); } - _parent_replica->set_primary_partition_configuration(config); + _parent_replica->set_primary_partition_configuration(pc); } void mock_update_child_partition_count_request(update_child_group_partition_count_request &req, @@ -453,15 +453,15 @@ class replica_split_test : public replica_test_base req.partition_count = OLD_PARTITION_COUNT; req.pid = PARENT_GPID; - partition_configuration child_config; - child_config.pid = CHILD_GPID; - child_config.ballot = INIT_BALLOT + 1; - child_config.last_committed_decree = 0; + partition_configuration child_pc; + child_pc.pid = CHILD_GPID; + child_pc.ballot = INIT_BALLOT + 1; + child_pc.last_committed_decree = 0; query_child_state_response resp; resp.err = ERR_OK; resp.__set_partition_count(NEW_PARTITION_COUNT); - resp.__set_child_config(child_config); + resp.__set_child_config(child_pc); _parent_split_mgr->on_query_child_state_reply(ERR_OK, req, resp); _parent_split_mgr->tracker()->wait_outstanding_tasks(); diff --git a/src/replica/split/test/run.sh b/src/replica/split/test/run.sh index bd75b96198..66d4340166 100755 --- a/src/replica/split/test/run.sh +++ b/src/replica/split/test/run.sh @@ -45,7 +45,7 @@ fi ./dsn_replica_split_test if [ $? -ne 0 ]; then - tail -n 100 data/log/log.1.txt + tail -n 100 `find . -name pegasus.log.*` if [ -f core ]; then gdb ./dsn_replica_split_test core -ex "bt" fi diff --git a/src/replica/storage/simple_kv/run.sh b/src/replica/storage/simple_kv/run.sh index cf7c0380c2..debaf5ba88 100755 --- a/src/replica/storage/simple_kv/run.sh +++ b/src/replica/storage/simple_kv/run.sh @@ -62,9 +62,9 @@ if [ -f core ] || ! grep ERR_OK out > /dev/null ; then ls -l echo "---- head -n 100 out ----" head -n 100 out - if [ -f data/logs/log.1.txt ]; then - echo "---- tail -n 100 log.1.txt ----" - tail -n 100 data/logs/log.1.txt + if [ `find data/logs -name pegasus.log.* | wc -l` -ne 0 ]; then + echo "---- tail -n 100 pegasus.log.* ----" + tail -n 100 `find data/logs -name pegasus.log.*` fi if [ -f core ]; then echo "---- gdb ./dsn.replication.simple_kv core ----" diff --git a/src/replica/storage/simple_kv/simple_kv.app.example.h b/src/replica/storage/simple_kv/simple_kv.app.example.h index d1345b79ec..97478d27b5 100644 --- a/src/replica/storage/simple_kv/simple_kv.app.example.h +++ b/src/replica/storage/simple_kv/simple_kv.app.example.h @@ -48,10 +48,11 @@ class simple_kv_client_app : public ::dsn::service_app const auto hp = host_port::from_string(args[2]); _simple_kv_client.reset(new simple_kv_client(args[1].c_str(), {hp}, args[3].c_str())); - _timer = ::dsn::tasking::enqueue_timer(LPC_SIMPLE_KV_TEST_TIMER, - &_tracker, - [this] { on_test_timer(); }, - std::chrono::seconds(1)); + _timer = ::dsn::tasking::enqueue_timer( + LPC_SIMPLE_KV_TEST_TIMER, + &_tracker, + [this] { on_test_timer(); }, + std::chrono::seconds(1)); return ::dsn::ERR_OK; } diff --git a/src/replica/storage/simple_kv/simple_kv.client.h b/src/replica/storage/simple_kv/simple_kv.client.h index f6bc2c2f8b..3fc12cd919 100644 --- a/src/replica/storage/simple_kv/simple_kv.client.h +++ b/src/replica/storage/simple_kv/simple_kv.client.h @@ -25,13 +25,15 @@ */ #pragma once + #include -#include "utils/optional.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/task/async_calls.h" + #include "client/partition_resolver.h" +#include "rpc/dns_resolver.h" #include "simple_kv.code.definition.h" #include "simple_kv_types.h" +#include "task/async_calls.h" +#include "utils/optional.h" namespace dsn { namespace replication { diff --git a/src/replica/storage/simple_kv/simple_kv.code.definition.h b/src/replica/storage/simple_kv/simple_kv.code.definition.h index b668f3a950..370a2eaba6 100644 --- a/src/replica/storage/simple_kv/simple_kv.code.definition.h +++ b/src/replica/storage/simple_kv/simple_kv.code.definition.h @@ -37,6 +37,6 @@ DEFINE_STORAGE_WRITE_RPC_CODE(RPC_SIMPLE_KV_SIMPLE_KV_APPEND, ALLOW_BATCH, NOT_I // test timer task code DEFINE_TASK_CODE(LPC_SIMPLE_KV_TEST_TIMER, TASK_PRIORITY_COMMON, ::dsn::THREAD_POOL_DEFAULT) -} -} -} +} // namespace application +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/simple_kv.server.h b/src/replica/storage/simple_kv/simple_kv.server.h index 84d6ab2c82..6acfc85a05 100644 --- a/src/replica/storage/simple_kv/simple_kv.server.h +++ b/src/replica/storage/simple_kv/simple_kv.server.h @@ -95,6 +95,6 @@ class simple_kv_service : public replication_app_base, public storage_serverlet< svc->on_append(pr, reply); } }; -} -} -} +} // namespace application +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/simple_kv.server.impl.cpp b/src/replica/storage/simple_kv/simple_kv.server.impl.cpp index a0002bc54a..473d322818 100644 --- a/src/replica/storage/simple_kv/simple_kv.server.impl.cpp +++ b/src/replica/storage/simple_kv/simple_kv.server.impl.cpp @@ -329,6 +329,6 @@ ::dsn::error_code simple_kv_service_impl::storage_apply_checkpoint(chkpt_apply_m } } } -} -} -} // namespace +} // namespace application +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/simple_kv.server.impl.h b/src/replica/storage/simple_kv/simple_kv.server.impl.h index 240ed87899..d27608df19 100644 --- a/src/replica/storage/simple_kv/simple_kv.server.impl.h +++ b/src/replica/storage/simple_kv/simple_kv.server.impl.h @@ -70,7 +70,9 @@ class simple_kv_service_impl : public simple_kv_service virtual ::dsn::error_code stop(bool cleanup = false) override; - virtual int64_t last_durable_decree() const override { return _last_durable_decree; } + int64_t last_flushed_decree() const override { return _last_durable_decree; } + + int64_t last_durable_decree() const override { return _last_durable_decree; } virtual ::dsn::error_code sync_checkpoint() override; @@ -118,6 +120,6 @@ class simple_kv_service_impl : public simple_kv_service simple_kv _store; int64_t _last_durable_decree; }; -} -} -} // namespace +} // namespace application +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/test/case.cpp b/src/replica/storage/simple_kv/test/case.cpp index 730b04e93c..0689c13315 100644 --- a/src/replica/storage/simple_kv/test/case.cpp +++ b/src/replica/storage/simple_kv/test/case.cpp @@ -46,10 +46,10 @@ #include "checker.h" #include "replica/replica_stub.h" #include "replica/storage/simple_kv/test/common.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/rpc_message.h" #include "runtime/service_engine.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "task/task.h" +#include "task/task_code.h" #include "simple_kv.server.impl.h" #include "utils/fmt_logging.h" #include "utils/ports.h" @@ -534,7 +534,9 @@ void event_on_rpc::init(message_ex *msg, task *tsk) if (msg != nullptr) { _trace_id = fmt::sprintf("%016llx", msg->header->trace_id); _rpc_name = msg->header->rpc_name; - _from = address_to_node(host_port::from_address(msg->header->from_address)); + const auto hp = host_port::from_address(msg->header->from_address); + CHECK(hp, "'{}' can not be reverse resolved", msg->header->from_address); + _from = address_to_node(hp); _to = address_to_node(msg->to_host_port); } } @@ -1393,6 +1395,6 @@ void test_case::internal_register_creator(const std::string &name, case_line_cre CHECK(_creators.find(name) == _creators.end(), ""); _creators[name] = creator; } -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/test/case.h b/src/replica/storage/simple_kv/test/case.h index 4e22a3a9b5..1ecd57d9c3 100644 --- a/src/replica/storage/simple_kv/test/case.h +++ b/src/replica/storage/simple_kv/test/case.h @@ -35,7 +35,7 @@ #include "common.h" #include "meta_admin_types.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "utils/error_code.h" #include "utils/fmt_utils.h" #include "utils/singleton.h" @@ -43,6 +43,7 @@ namespace dsn { class aio_task; + class message_ex; class rpc_request_task; class rpc_response_task; @@ -511,9 +512,9 @@ class test_case : public dsn::utils::singleton int _null_loop_count; dsn::zsemaphore _client_sema; }; -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn USER_DEFINED_STRUCTURE_FORMATTER(::dsn::replication::test::case_line); USER_DEFINED_STRUCTURE_FORMATTER(::dsn::replication::test::event); diff --git a/src/replica/storage/simple_kv/test/checker.cpp b/src/replica/storage/simple_kv/test/checker.cpp index ae59ec4958..1f97cabb1d 100644 --- a/src/replica/storage/simple_kv/test/checker.cpp +++ b/src/replica/storage/simple_kv/test/checker.cpp @@ -50,8 +50,8 @@ #include "replica/replica_stub.h" #include "replica/replication_service_app.h" #include "replica/storage/simple_kv/test/common.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_engine.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_engine.h" #include "runtime/service_app.h" #include "runtime/service_engine.h" #include "runtime/tool_api.h" @@ -323,7 +323,7 @@ bool test_checker::get_current_config(parti_config &config) meta_service_app *meta = meta_leader(); if (meta == nullptr) return false; - partition_configuration c; + partition_configuration pc; // we should never try to acquire lock when we are in checker. Because we are the only // thread that is running. @@ -332,11 +332,8 @@ bool test_checker::get_current_config(parti_config &config) // the rDSN's //"enqueue,dequeue and lock..." - // meta->_service->_state->query_configuration_by_gpid(g_default_gpid, c); const meta_view view = meta->_service->_state->get_meta_view(); - const partition_configuration *pc = get_config(*(view.apps), g_default_gpid); - c = *pc; - config.convert_from(c); + config.convert_from(*get_config(*(view.apps), g_default_gpid)); return true; } diff --git a/src/replica/storage/simple_kv/test/checker.h b/src/replica/storage/simple_kv/test/checker.h index c94059edda..951b459068 100644 --- a/src/replica/storage/simple_kv/test/checker.h +++ b/src/replica/storage/simple_kv/test/checker.h @@ -33,18 +33,20 @@ #include "common.h" #include "meta/meta_data.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "runtime/simulator.h" #include "utils/singleton.h" namespace dsn { class service_app; + namespace service { class meta_service_app; } // namespace service namespace replication { class replica_configuration; + class replication_service_app; namespace test { @@ -116,6 +118,6 @@ class wrap_checker : public dsn::tools::checker }; void install_checkers(); -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/test/client.cpp b/src/replica/storage/simple_kv/test/client.cpp index dbafc10d20..23b20c6b49 100644 --- a/src/replica/storage/simple_kv/test/client.cpp +++ b/src/replica/storage/simple_kv/test/client.cpp @@ -37,15 +37,15 @@ #include "common/replication_other_types.h" #include "replica/storage/simple_kv/simple_kv.client.h" #include "replica/storage/simple_kv/test/common.h" +#include "rpc/dns_resolver.h" +#include "rpc/group_host_port.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/group_host_port.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task_code.h" #include "simple_kv_types.h" +#include "task/async_calls.h" +#include "task/task_code.h" #include "utils/flags.h" #include "utils/fmt_logging.h" #include "utils/threadpool_code.h" @@ -141,11 +141,12 @@ void simple_kv_client_app::begin_write(int id, ctx->req.value = value; ctx->timeout_ms = timeout_ms; auto &req = ctx->req; - _simple_kv_client->write(req, - [ctx](error_code err, int32_t resp) { - test_case::instance().on_end_write(ctx->id, err, resp); - }, - std::chrono::milliseconds(timeout_ms)); + _simple_kv_client->write( + req, + [ctx](error_code err, int32_t resp) { + test_case::instance().on_end_write(ctx->id, err, resp); + }, + std::chrono::milliseconds(timeout_ms)); } void simple_kv_client_app::send_config_to_meta(const host_port &receiver, @@ -183,11 +184,12 @@ void simple_kv_client_app::begin_read(int id, const std::string &key, int timeou ctx->id = id; ctx->key = key; ctx->timeout_ms = timeout_ms; - _simple_kv_client->read(key, - [ctx](error_code err, std::string &&resp) { - test_case::instance().on_end_read(ctx->id, err, resp); - }, - std::chrono::milliseconds(timeout_ms)); + _simple_kv_client->read( + key, + [ctx](error_code err, std::string &&resp) { + test_case::instance().on_end_read(ctx->id, err, resp); + }, + std::chrono::milliseconds(timeout_ms)); } } // namespace test } // namespace replication diff --git a/src/replica/storage/simple_kv/test/client.h b/src/replica/storage/simple_kv/test/client.h index 623863d4ca..75d6311461 100644 --- a/src/replica/storage/simple_kv/test/client.h +++ b/src/replica/storage/simple_kv/test/client.h @@ -31,9 +31,9 @@ #include #include "meta_admin_types.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "runtime/service_app.h" -#include "runtime/task/task_tracker.h" +#include "task/task_tracker.h" #include "utils/error_code.h" namespace dsn { @@ -68,6 +68,6 @@ class simple_kv_client_app : public ::dsn::service_app host_port _service_addr; dsn::task_tracker _tracker; }; -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/test/common.cpp b/src/replica/storage/simple_kv/test/common.cpp index 5d2e8ced29..61dd9a5131 100644 --- a/src/replica/storage/simple_kv/test/common.cpp +++ b/src/replica/storage/simple_kv/test/common.cpp @@ -315,16 +315,16 @@ bool parti_config::from_string(const std::string &str) return true; } -void parti_config::convert_from(const partition_configuration &c) +void parti_config::convert_from(const partition_configuration &pc) { - pid = c.pid; - ballot = c.ballot; - primary = address_to_node(c.hp_primary); - for (auto &s : c.hp_secondaries) { - secondaries.push_back(address_to_node(s)); + pid = pc.pid; + ballot = pc.ballot; + primary = address_to_node(pc.hp_primary); + for (const auto &secondary : pc.hp_secondaries) { + secondaries.push_back(address_to_node(secondary)); } std::sort(secondaries.begin(), secondaries.end()); } -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/test/common.h b/src/replica/storage/simple_kv/test/common.h index f120b167da..c46731c3c6 100644 --- a/src/replica/storage/simple_kv/test/common.h +++ b/src/replica/storage/simple_kv/test/common.h @@ -37,8 +37,8 @@ #include "common/gpid.h" #include "common/replication_other_types.h" #include "metadata_types.h" +#include "rpc/rpc_host_port.h" #include "utils/fmt_utils.h" -#include "runtime/rpc/rpc_host_port.h" namespace dsn { class partition_configuration; @@ -198,16 +198,16 @@ struct parti_config bool operator<(const parti_config &o) const { return pid == o.pid && ballot < o.ballot; } std::string to_string() const; bool from_string(const std::string &str); - void convert_from(const partition_configuration &c); + void convert_from(const partition_configuration &pc); friend std::ostream &operator<<(std::ostream &os, const parti_config &pc) { return os << pc.to_string(); } }; -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn USER_DEFINED_STRUCTURE_FORMATTER(::dsn::replication::test::parti_config); USER_DEFINED_STRUCTURE_FORMATTER(::dsn::replication::test::replica_id); diff --git a/src/replica/storage/simple_kv/test/injector.cpp b/src/replica/storage/simple_kv/test/injector.cpp index 0b3f856ba6..d1c12374b0 100644 --- a/src/replica/storage/simple_kv/test/injector.cpp +++ b/src/replica/storage/simple_kv/test/injector.cpp @@ -30,8 +30,8 @@ #include "case.h" #include "checker.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/join_point.h" namespace dsn { @@ -198,6 +198,6 @@ void test_injector::install(service_spec &svc_spec) } test_injector::test_injector(const char *name) : toollet(name) {} -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/test/injector.h b/src/replica/storage/simple_kv/test/injector.h index b1fc30ff06..7419da9f91 100644 --- a/src/replica/storage/simple_kv/test/injector.h +++ b/src/replica/storage/simple_kv/test/injector.h @@ -40,6 +40,6 @@ class test_injector : public dsn::tools::toollet test_injector(const char *name); virtual void install(service_spec &spec); }; -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/test/run.sh b/src/replica/storage/simple_kv/test/run.sh index 9c8d07328b..d09bf716d0 100755 --- a/src/replica/storage/simple_kv/test/run.sh +++ b/src/replica/storage/simple_kv/test/run.sh @@ -52,8 +52,8 @@ function run_single() echo "${bin} ${prefix}.ini ${prefix}.act" ${bin} ${prefix}.ini ${prefix}.act ret=$? - if find . -name log.1.txt &>/dev/null; then - log=`find . -name log.1.txt` + if [ `find . -name pegasus.log.* | wc -l` -ne 0 ]; then + log=`find . -name pegasus.log.*` cat ${log} | grep -v FAILURE_DETECT | grep -v BEACON | grep -v beacon | grep -v THREAD_POOL_FD >${prefix}.log rm ${log} fi diff --git a/src/replica/storage/simple_kv/test/simple_kv.server.impl.cpp b/src/replica/storage/simple_kv/test/simple_kv.server.impl.cpp index 19db55f6a5..56fc195bf9 100644 --- a/src/replica/storage/simple_kv/test/simple_kv.server.impl.cpp +++ b/src/replica/storage/simple_kv/test/simple_kv.server.impl.cpp @@ -1,28 +1,28 @@ /* -* The MIT License (MIT) -* -* Copyright (c) 2015 Microsoft Corporation -* -* -=- Robust Distributed System Nucleus (rDSN) -=- -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in -* all copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -* THE SOFTWARE. -*/ + * The MIT License (MIT) + * + * Copyright (c) 2015 Microsoft Corporation + * + * -=- Robust Distributed System Nucleus (rDSN) -=- + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ #include "simple_kv.server.impl.h" @@ -362,6 +362,6 @@ ::dsn::error_code simple_kv_service_impl::storage_apply_checkpoint(chkpt_apply_m } } } -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn diff --git a/src/replica/storage/simple_kv/test/simple_kv.server.impl.h b/src/replica/storage/simple_kv/test/simple_kv.server.impl.h index 1235cdbc68..a66c30583e 100644 --- a/src/replica/storage/simple_kv/test/simple_kv.server.impl.h +++ b/src/replica/storage/simple_kv/test/simple_kv.server.impl.h @@ -1,28 +1,28 @@ /* -* The MIT License (MIT) -* -* Copyright (c) 2015 Microsoft Corporation -* -* -=- Robust Distributed System Nucleus (rDSN) -=- -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in -* all copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -* THE SOFTWARE. -*/ + * The MIT License (MIT) + * + * Copyright (c) 2015 Microsoft Corporation + * + * -=- Robust Distributed System Nucleus (rDSN) -=- + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ #pragma once @@ -82,7 +82,9 @@ class simple_kv_service_impl : public application::simple_kv_service virtual ::dsn::error_code stop(bool cleanup = false) override; - virtual int64_t last_durable_decree() const override { return _last_durable_decree; } + int64_t last_flushed_decree() const override { return _last_durable_decree; } + + int64_t last_durable_decree() const override { return _last_durable_decree; } virtual ::dsn::error_code sync_checkpoint() override; @@ -131,6 +133,6 @@ class simple_kv_service_impl : public application::simple_kv_service int64_t _last_durable_decree; }; -} -} -} +} // namespace test +} // namespace replication +} // namespace dsn diff --git a/src/replica/test/backup_block_service_mock.h b/src/replica/test/backup_block_service_mock.h index 4ac8bc95fe..36a2671182 100644 --- a/src/replica/test/backup_block_service_mock.h +++ b/src/replica/test/backup_block_service_mock.h @@ -21,9 +21,9 @@ #include "utils/filesystem.h" #include "utils/error_code.h" #include "utils/threadpool_code.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "common/gpid.h" -#include "runtime/task/task_tracker.h" +#include "task/task_tracker.h" #include "block_service/block_service.h" #include "replica/replica_context.h" #include "replication_service_test_app.h" diff --git a/src/replica/test/log_block_test.cpp b/src/replica/test/log_block_test.cpp index e667a59cef..7499e13c87 100644 --- a/src/replica/test/log_block_test.cpp +++ b/src/replica/test/log_block_test.cpp @@ -112,7 +112,8 @@ TEST_P(log_appender_test, log_block_full) { log_appender appender(10); for (int i = 0; i < 1024; i++) { // more than DEFAULT_MAX_BLOCK_BYTES - appender.append_mutation(create_test_mutation(1 + i, std::string(1024, 'a')), nullptr); + appender.append_mutation(create_test_mutation(1 + i, std::string(1024, 'a').c_str()), + nullptr); } ASSERT_EQ(appender.mutations().size(), 1024); // two log_block_header blobs @@ -136,7 +137,8 @@ TEST_P(log_appender_test, read_log_block) { log_appender appender(10); for (int i = 0; i < 1024; i++) { // more than DEFAULT_MAX_BLOCK_BYTES - appender.append_mutation(create_test_mutation(1 + i, std::string(1024, 'a')), nullptr); + appender.append_mutation(create_test_mutation(1 + i, std::string(1024, 'a').c_str()), + nullptr); } ASSERT_EQ(appender.all_blocks().size(), 2); diff --git a/src/replica/test/log_file_test.cpp b/src/replica/test/log_file_test.cpp index 32f491b567..c4abb02b20 100644 --- a/src/replica/test/log_file_test.cpp +++ b/src/replica/test/log_file_test.cpp @@ -63,14 +63,15 @@ TEST_P(log_file_test, commit_log_blocks) for (int i = 0; i < 5; i++) { appender->append_mutation(create_test_mutation(1 + i, "test"), nullptr); } - auto tsk = _logf->commit_log_blocks(*appender, - LPC_WRITE_REPLICATION_LOG_PRIVATE, - nullptr, - [&](error_code err, size_t sz) { - ASSERT_EQ(err, ERR_OK); - ASSERT_EQ(sz, appender->size()); - }, - 0); + auto tsk = _logf->commit_log_blocks( + *appender, + LPC_WRITE_REPLICATION_LOG_PRIVATE, + nullptr, + [&](error_code err, size_t sz) { + ASSERT_EQ(err, ERR_OK); + ASSERT_EQ(sz, appender->size()); + }, + 0); tsk->wait(); ASSERT_EQ(tsk->get_aio_context()->buffer_size, appender->size()); ASSERT_EQ(tsk->get_aio_context()->file_offset, @@ -80,17 +81,19 @@ TEST_P(log_file_test, commit_log_blocks) size_t written_sz = appender->size(); appender = std::make_shared(_start_offset + written_sz); for (int i = 0; i < 1024; i++) { // more than DEFAULT_MAX_BLOCK_BYTES - appender->append_mutation(create_test_mutation(1 + i, std::string(1024, 'a')), nullptr); + appender->append_mutation(create_test_mutation(1 + i, std::string(1024, 'a').c_str()), + nullptr); } ASSERT_GT(appender->all_blocks().size(), 1); - tsk = _logf->commit_log_blocks(*appender, - LPC_WRITE_REPLICATION_LOG_PRIVATE, - nullptr, - [&](error_code err, size_t sz) { - ASSERT_EQ(err, ERR_OK); - ASSERT_EQ(sz, appender->size()); - }, - 0); + tsk = _logf->commit_log_blocks( + *appender, + LPC_WRITE_REPLICATION_LOG_PRIVATE, + nullptr, + [&](error_code err, size_t sz) { + ASSERT_EQ(err, ERR_OK); + ASSERT_EQ(sz, appender->size()); + }, + 0); tsk->wait(); ASSERT_EQ(tsk->get_aio_context()->buffer_size, appender->size()); ASSERT_EQ(tsk->get_aio_context()->file_offset, appender->start_offset() - _start_offset); diff --git a/src/replica/test/mock_utils.h b/src/replica/test/mock_utils.h index 6d7725b787..0ae2dd96ae 100644 --- a/src/replica/test/mock_utils.h +++ b/src/replica/test/mock_utils.h @@ -34,7 +34,7 @@ #include "replica/replica.h" #include "replica/replica_stub.h" #include "replica/backup/cold_backup_context.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" DSN_DECLARE_int32(log_private_file_size_mb); @@ -83,6 +83,8 @@ class mock_replication_app_base : public replication_app_base // we mock the followings void update_app_envs(const std::map &envs) override { _envs = envs; } void query_app_envs(std::map &out) override { out = _envs; } + + decree last_flushed_decree() const override { return _last_durable_decree; } decree last_durable_decree() const override { return _last_durable_decree; } // TODO(heyuchen): implement this function in further pull request @@ -98,6 +100,8 @@ class mock_replication_app_base : public replication_app_base return manual_compaction_status::IDLE; } + void set_last_applied_decree(decree d) { _last_committed_decree.store(d); } + void set_last_durable_decree(decree d) { _last_durable_decree = d; } void set_expect_last_durable_decree(decree d) { _expect_last_durable_decree = d; } @@ -176,9 +180,9 @@ class mock_replica : public replica void prepare_list_commit_hard(decree d) { _prepare_list->commit(d, COMMIT_TO_DECREE_HARD); } decree get_app_last_committed_decree() { return _app->last_committed_decree(); } void set_app_last_committed_decree(decree d) { _app->_last_committed_decree = d; } - void set_primary_partition_configuration(partition_configuration &pconfig) + void set_primary_partition_configuration(partition_configuration &pc) { - _primary_states.membership = pconfig; + _primary_states.pc = pc; } partition_bulk_load_state get_secondary_bulk_load_state(const host_port &node) { @@ -216,6 +220,11 @@ class mock_replica : public replica backup_context->complete_checkpoint(); } + void update_last_applied_decree(decree decree) + { + dynamic_cast(_app.get())->set_last_applied_decree(decree); + } + void update_last_durable_decree(decree decree) { dynamic_cast(_app.get())->set_last_durable_decree(decree); @@ -414,8 +423,8 @@ class mock_mutation_log_private : public mutation_log_private std::vector &mutation_list) const override { for (auto &mu : _mu_list) { - ballot current_ballot = - (start_ballot == invalid_ballot) ? invalid_ballot : mu->get_ballot(); + ballot current_ballot = (start_ballot == invalid_ballot) ? invalid_ballot + : mu->get_ballot(); if ((mu->get_decree() >= start_decree && start_ballot == current_ballot) || current_ballot > start_ballot) { mutation_list.push_back(mu); diff --git a/src/replica/test/mutation_log_learn_test.cpp b/src/replica/test/mutation_log_learn_test.cpp index a3e67d9f20..f269c79d1c 100644 --- a/src/replica/test/mutation_log_learn_test.cpp +++ b/src/replica/test/mutation_log_learn_test.cpp @@ -41,7 +41,7 @@ #include "replica/mutation_log.h" #include "replica/test/mock_utils.h" #include "replica_test_base.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/binary_writer.h" #include "utils/blob.h" diff --git a/src/replica/test/mutation_log_test.cpp b/src/replica/test/mutation_log_test.cpp index 5d2f339b7a..6d92e042bf 100644 --- a/src/replica/test/mutation_log_test.cpp +++ b/src/replica/test/mutation_log_test.cpp @@ -291,13 +291,14 @@ class mutation_log_test : public replica_test_base void TearDown() override { utils::filesystem::remove_path(_log_dir); } - mutation_ptr create_test_mutation(decree d, const std::string &data) override + mutation_ptr + create_test_mutation(int64_t decree, int64_t last_committed_decree, const char *data) override { mutation_ptr mu(new mutation()); mu->data.header.ballot = 1; - mu->data.header.decree = d; + mu->data.header.decree = decree; mu->data.header.pid = get_gpid(); - mu->data.header.last_committed_decree = d - 1; + mu->data.header.last_committed_decree = last_committed_decree; mu->data.header.log_offset = 0; binary_writer writer; @@ -313,6 +314,11 @@ class mutation_log_test : public replica_test_base return mu; } + mutation_ptr create_test_mutation(int64_t decree, const char *data) override + { + return mutation_log_test::create_test_mutation(decree, decree - 1, data); + } + static void ASSERT_BLOB_EQ(const blob &lhs, const blob &rhs) { ASSERT_EQ(std::string(lhs.data(), lhs.length()), std::string(rhs.data(), rhs.length())); @@ -326,8 +332,7 @@ class mutation_log_test : public replica_test_base // each round mlog will replay the former logs, and create new file mutation_log_ptr mlog = create_private_log(); for (int i = 1; i <= 10; i++) { - std::string msg = "hello!"; - mutation_ptr mu = create_test_mutation(10 * f + i, msg); + auto mu = create_test_mutation(10 * f + i, "hello!"); mlog->append(mu, LPC_AIO_IMMEDIATE_CALLBACK, nullptr, nullptr, 0); } mlog->tracker()->wait_outstanding_tasks(); @@ -540,12 +545,13 @@ TEST_P(mutation_log_test, reset_from) // reset from the tmp log dir. std::vector actual; - auto err = mlog->reset_from(_log_dir + ".tmp", - [&](int, mutation_ptr &mu) -> bool { - actual.push_back(mu); - return true; - }, - [](error_code err) { ASSERT_EQ(err, ERR_OK); }); + auto err = mlog->reset_from( + _log_dir + ".tmp", + [&](int, mutation_ptr &mu) -> bool { + actual.push_back(mu); + return true; + }, + [](error_code err) { ASSERT_EQ(err, ERR_OK); }); ASSERT_EQ(err, ERR_OK); ASSERT_EQ(actual.size(), expected.size()); @@ -586,12 +592,13 @@ TEST_P(mutation_log_test, reset_from_while_writing) // reset from the tmp log dir. std::vector actual; - auto err = mlog->reset_from(_log_dir + ".test", - [&](int, mutation_ptr &mu) -> bool { - actual.push_back(mu); - return true; - }, - [](error_code err) { ASSERT_EQ(err, ERR_OK); }); + auto err = mlog->reset_from( + _log_dir + ".test", + [&](int, mutation_ptr &mu) -> bool { + actual.push_back(mu); + return true; + }, + [](error_code err) { ASSERT_EQ(err, ERR_OK); }); ASSERT_EQ(err, ERR_OK); mlog->flush(); diff --git a/src/replica/test/open_replica_test.cpp b/src/replica/test/open_replica_test.cpp index 4df92dda5a..c476a0d26d 100644 --- a/src/replica/test/open_replica_test.cpp +++ b/src/replica/test/open_replica_test.cpp @@ -31,9 +31,9 @@ #include "mock_utils.h" #include "replica/replica_stub.h" #include "replica_test_base.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "task/task.h" #include "utils/filesystem.h" namespace dsn { @@ -72,15 +72,15 @@ TEST_P(open_replica_test, open_replica_add_decree_and_ballot_check) _replica->register_service(); - partition_configuration config; - config.pid = pid; - config.ballot = test.b; - config.last_committed_decree = test.last_committed_decree; + partition_configuration pc; + pc.pid = pid; + pc.ballot = test.b; + pc.last_committed_decree = test.last_committed_decree; auto as = app_state::create(ai); auto req = std::make_shared(); req->info = *as; - req->config = config; + req->config = pc; req->type = config_type::CT_ASSIGN_PRIMARY; SET_IP_AND_HOST_PORT_BY_DNS(*req, node, node); if (test.expect_crash) { diff --git a/src/replica/test/replica_disk_migrate_test.cpp b/src/replica/test/replica_disk_migrate_test.cpp index ce2ed30d42..df410925ef 100644 --- a/src/replica/test/replica_disk_migrate_test.cpp +++ b/src/replica/test/replica_disk_migrate_test.cpp @@ -39,9 +39,9 @@ #include "replica/test/mock_utils.h" #include "replica/test/replica_disk_test_base.h" #include "replica_admin_types.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/task/task.h" -#include "runtime/task/task_tracker.h" +#include "rpc/rpc_holder.h" +#include "task/task.h" +#include "task/task_tracker.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/fail_point.h" diff --git a/src/replica/test/replica_disk_test.cpp b/src/replica/test/replica_disk_test.cpp index c072556d31..a3f4ff0181 100644 --- a/src/replica/test/replica_disk_test.cpp +++ b/src/replica/test/replica_disk_test.cpp @@ -41,7 +41,7 @@ #include "replica_admin_types.h" #include "replica_disk_test_base.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_holder.h" +#include "rpc/rpc_holder.h" #include "test_util/test_util.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" diff --git a/src/replica/test/replica_learn_test.cpp b/src/replica/test/replica_learn_test.cpp index 6e3a206d0f..b7ff94bc5c 100644 --- a/src/replica/test/replica_learn_test.cpp +++ b/src/replica/test/replica_learn_test.cpp @@ -131,7 +131,6 @@ class replica_learn_test : public duplication_test_base {0, invalid_decree, 5, 2, invalid_decree, 1}, // learn_start_decree_for_dup(3) > learn_start_decree_no_dup(2) {1, invalid_decree, 5, 2, invalid_decree, 2}, - }; int id = 1; @@ -149,8 +148,8 @@ class replica_learn_test : public duplication_test_base auto dup = create_test_duplicator(tt.min_confirmed_decree); add_dup(_replica.get(), std::move(dup)); - ASSERT_EQ(_replica->get_learn_start_decree(req), tt.wlearn_start_decree) << "case #" - << id; + ASSERT_EQ(_replica->get_learn_start_decree(req), tt.wlearn_start_decree) + << "case #" << id; id++; } } diff --git a/src/replica/test/replica_test.cpp b/src/replica/test/replica_test.cpp index 7123dd85b7..711d364feb 100644 --- a/src/replica/test/replica_test.cpp +++ b/src/replica/test/replica_test.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -50,12 +51,13 @@ #include "replica/replication_app_base.h" #include "replica/test/mock_utils.h" #include "replica_test_base.h" +#include "rpc/network.sim.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_message.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/network.sim.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_tracker.h" +#include "task/task_code.h" +#include "task/task_tracker.h" +#include "test_util/test_util.h" #include "utils/autoref_ptr.h" #include "utils/defer.h" #include "utils/env.h" @@ -65,10 +67,14 @@ #include "utils/fmt_logging.h" #include "utils/metrics.h" #include "utils/string_conv.h" +#include "utils/synchronize.h" #include "utils/test_macros.h" DSN_DECLARE_bool(fd_disabled); DSN_DECLARE_string(cold_backup_root); +DSN_DECLARE_uint32(mutation_2pc_min_replica_count); + +using pegasus::AssertEventually; namespace dsn { namespace replication { @@ -90,6 +96,7 @@ class replica_test : public replica_test_base mock_app_info(); _mock_replica = stub->generate_replica_ptr(_app_info, _pid, partition_status::PS_PRIMARY, 1); + _mock_replica->init_private_log(_log_dir); // set FLAGS_cold_backup_root manually. // FLAGS_cold_backup_root is set by configuration "replication.cold_backup_root", @@ -204,6 +211,25 @@ class replica_test : public replica_test_base bool is_checkpointing() { return _mock_replica->_is_manual_emergency_checkpointing; } + void test_trigger_manual_emergency_checkpoint(const decree min_checkpoint_decree, + const error_code expected_err, + std::function callback = {}) + { + dsn::utils::notify_event op_completed; + _mock_replica->async_trigger_manual_emergency_checkpoint( + min_checkpoint_decree, 0, [&](error_code actual_err) { + ASSERT_EQ(expected_err, actual_err); + + if (callback) { + callback(); + } + + op_completed.notify(); + }); + + op_completed.wait(); + } + bool has_gpid(gpid &pid) const { for (const auto &node : stub->_fs_manager.get_dir_nodes()) { @@ -314,9 +340,7 @@ TEST_P(replica_test, query_data_version_test) std::string expected_response_json; } tests[] = {{"", http_status_code::kBadRequest, "app_id should not be empty"}, {"wrong", http_status_code::kBadRequest, "invalid app_id=wrong"}, - {"2", - http_status_code::kOk, - R"({"1":{"data_version":"1"}})"}, + {"2", http_status_code::kOk, R"({"1":{"data_version":"1"}})"}, {"4", http_status_code::kNotFound, "app_id=4 not found"}}; for (const auto &test : tests) { http_request req; @@ -426,28 +450,50 @@ TEST_P(replica_test, test_replica_backup_and_restore_with_specific_path) TEST_P(replica_test, test_trigger_manual_emergency_checkpoint) { - ASSERT_EQ(_mock_replica->trigger_manual_emergency_checkpoint(100), ERR_OK); - ASSERT_TRUE(is_checkpointing()); + // There is only one replica for the unit test. + PRESERVE_FLAG(mutation_2pc_min_replica_count); + FLAGS_mutation_2pc_min_replica_count = 1; + + // Initially the mutation log is empty. + ASSERT_EQ(0, _mock_replica->last_applied_decree()); + ASSERT_EQ(0, _mock_replica->last_durable_decree()); + + // Commit at least an empty write to make the replica become non-empty. + _mock_replica->update_expect_last_durable_decree(1); + test_trigger_manual_emergency_checkpoint(1, ERR_OK); + _mock_replica->tracker()->wait_outstanding_tasks(); + + // Committing multiple empty writes (retry multiple times) might make the last + // applied decree greater than 1. + ASSERT_LE(1, _mock_replica->last_applied_decree()); + ASSERT_EQ(1, _mock_replica->last_durable_decree()); + + test_trigger_manual_emergency_checkpoint( + 100, ERR_OK, [this]() { ASSERT_TRUE(is_checkpointing()); }); _mock_replica->update_last_durable_decree(100); - // test no need start checkpoint because `old_decree` < `last_durable` - ASSERT_EQ(_mock_replica->trigger_manual_emergency_checkpoint(100), ERR_OK); - ASSERT_FALSE(is_checkpointing()); + // There's no need to trigger checkpoint since min_checkpoint_decree <= last_durable_decree. + test_trigger_manual_emergency_checkpoint( + 100, ERR_OK, [this]() { ASSERT_FALSE(is_checkpointing()); }); - // test has existed running task + // There's already an existing running manual emergency checkpoint task. force_update_checkpointing(true); - ASSERT_EQ(_mock_replica->trigger_manual_emergency_checkpoint(101), ERR_BUSY); - ASSERT_TRUE(is_checkpointing()); - // test running task completed + test_trigger_manual_emergency_checkpoint( + 101, ERR_BUSY, [this]() { ASSERT_TRUE(is_checkpointing()); }); + + // Wait until the running task is completed. _mock_replica->tracker()->wait_outstanding_tasks(); ASSERT_FALSE(is_checkpointing()); - // test exceed max concurrent count - ASSERT_EQ(_mock_replica->trigger_manual_emergency_checkpoint(101), ERR_OK); + // The number of concurrent tasks exceeds the limit. + test_trigger_manual_emergency_checkpoint(101, ERR_OK); force_update_checkpointing(false); + + PRESERVE_FLAG(max_concurrent_manual_emergency_checkpointing_count); FLAGS_max_concurrent_manual_emergency_checkpointing_count = 1; - ASSERT_EQ(_mock_replica->trigger_manual_emergency_checkpoint(101), ERR_TRY_AGAIN); - ASSERT_FALSE(is_checkpointing()); + + test_trigger_manual_emergency_checkpoint( + 101, ERR_TRY_AGAIN, [this]() { ASSERT_FALSE(is_checkpointing()); }); _mock_replica->tracker()->wait_outstanding_tasks(); } @@ -545,8 +591,8 @@ void replica_test::test_auto_trash(error_code ec) } ASSERT_EQ(moved_to_err_path, found_err_path); ASSERT_FALSE(has_gpid(_pid)); - ASSERT_EQ(moved_to_err_path, dn->status == disk_status::NORMAL) << moved_to_err_path << ", " - << enum_to_string(dn->status); + ASSERT_EQ(moved_to_err_path, dn->status == disk_status::NORMAL) + << moved_to_err_path << ", " << enum_to_string(dn->status); ASSERT_EQ(!moved_to_err_path, dn->status == disk_status::IO_ERROR) << moved_to_err_path << ", " << enum_to_string(dn->status); diff --git a/src/replica/test/replica_test_base.h b/src/replica/test/replica_test_base.h index 9296e2a4e0..374e87e82e 100644 --- a/src/replica/test/replica_test_base.h +++ b/src/replica/test/replica_test_base.h @@ -61,20 +61,23 @@ class replica_test_base : public replica_stub_test_base _log_dir = _replica->dir(); } - virtual mutation_ptr create_test_mutation(int64_t decree, const std::string &data) + virtual mutation_ptr + create_test_mutation(int64_t decree, int64_t last_committed_decree, const char *data) { mutation_ptr mu(new mutation()); mu->data.header.ballot = 1; mu->data.header.decree = decree; mu->data.header.pid = _replica->get_gpid(); - mu->data.header.last_committed_decree = decree - 1; + mu->data.header.last_committed_decree = last_committed_decree; mu->data.header.log_offset = 0; mu->data.header.timestamp = decree; mu->data.updates.emplace_back(mutation_update()); mu->data.updates.back().code = RPC_COLD_BACKUP; // whatever code it is, but never be WRITE_EMPTY - mu->data.updates.back().data = blob::create_from_bytes(std::string(data)); + if (data != nullptr) { + mu->data.updates.back().data = blob::create_from_bytes(data); + } mu->client_requests.push_back(nullptr); // replica_duplicator always loads from hard disk, @@ -84,7 +87,14 @@ class replica_test_base : public replica_stub_test_base return mu; } + virtual mutation_ptr create_test_mutation(int64_t decree, const char *data) + { + return replica_test_base::create_test_mutation(decree, decree - 1, data); + } + gpid get_gpid() const { return _replica->get_gpid(); } + + void set_last_applied_decree(decree d) { _replica->set_app_last_committed_decree(d); } }; } // namespace replication diff --git a/src/replica/test/replication_service_test_app.h b/src/replica/test/replication_service_test_app.h index 9d3edc3c42..3417b2ce28 100644 --- a/src/replica/test/replication_service_test_app.h +++ b/src/replica/test/replication_service_test_app.h @@ -27,8 +27,8 @@ #pragma once #include "replica/replication_service_app.h" -using ::dsn::replication::replication_service_app; using ::dsn::error_code; +using ::dsn::replication::replication_service_app; class replication_service_test_app : public replication_service_app { diff --git a/src/replica/test/throttling_controller_test.cpp b/src/replica/test/throttling_controller_test.cpp index 120ad0b4f8..499eaed51d 100644 --- a/src/replica/test/throttling_controller_test.cpp +++ b/src/replica/test/throttling_controller_test.cpp @@ -110,7 +110,11 @@ class throttling_controller_test : public ::testing::Test // invalid argument std::string test_cases_2[] = { - "20m*delay*100", "20B*delay*100", "20KB*delay*100", "20Mb*delay*100", "20MB*delay*100", + "20m*delay*100", + "20B*delay*100", + "20KB*delay*100", + "20Mb*delay*100", + "20MB*delay*100", }; for (const std::string &tc : test_cases_2) { ASSERT_FALSE(cntl.parse_from_env(tc, 4, parse_err, env_changed, old_value)); diff --git a/src/rpc/CMakeLists.txt b/src/rpc/CMakeLists.txt new file mode 100644 index 0000000000..403ab2f04b --- /dev/null +++ b/src/rpc/CMakeLists.txt @@ -0,0 +1,31 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set(MY_PROJ_NAME dsn_rpc) +set(MY_SRC_SEARCH_MODE "GLOB") +thrift_generate_cpp( + REQUEST_META_THRIFT_SRCS + REQUEST_META_THRIFT_HDRS + ${PROJECT_ROOT}/src/rpc/request_meta.thrift) +set(MY_PROJ_SRC ${REQUEST_META_THRIFT_SRCS}) +set(MY_PROJ_LIBS + dsn_meta_server + dsn_replication_common + dsn_utils) +dsn_add_static_library() + +add_subdirectory(test) diff --git a/src/runtime/rpc/asio_net_provider.cpp b/src/rpc/asio_net_provider.cpp similarity index 98% rename from src/runtime/rpc/asio_net_provider.cpp rename to src/rpc/asio_net_provider.cpp index 1bc85f2d5b..ea8ce0db47 100644 --- a/src/runtime/rpc/asio_net_provider.cpp +++ b/src/rpc/asio_net_provider.cpp @@ -71,8 +71,8 @@ // IWYU pragma: no_include "boost/asio/ip/impl/address_v4.ipp" // IWYU pragma: no_include "boost/asio/socket_base.hpp" // IWYU pragma: no_include "boost/system/error_code.hpp -#include "runtime/task/task.h" -#include "runtime/task/task_worker.h" +#include "task/task.h" +#include "task/task_worker.h" #include "runtime/tool_api.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" @@ -147,6 +147,7 @@ error_code asio_network_provider::start(rpc_channel channel, int port, bool clie _address = rpc_address(get_local_ipv4(), port); _hp = ::dsn::host_port::from_address(_address); + LOG_WARNING_IF(!_hp, "'{}' can not be reverse resolved", _address); if (!client_only) { auto v4_addr = boost::asio::ip::address_v4::any(); //(ntohl(_address.ip)); @@ -162,7 +163,8 @@ error_code asio_network_provider::start(rpc_channel channel, int port, bool clie _acceptor->set_option(boost::asio::socket_base::reuse_address(true)); _acceptor->bind(endpoint, ec); if (ec) { - LOG_ERROR("asio tcp acceptor bind failed, error = {}", ec.message()); + LOG_ERROR( + "asio tcp acceptor bind address '{}' failed, error = {}", _address, ec.message()); _acceptor.reset(); return ERR_NETWORK_INIT_FAILED; } @@ -456,6 +458,7 @@ error_code asio_udp_provider::start(rpc_channel channel, int port, bool client_o } _hp = ::dsn::host_port::from_address(_address); + LOG_WARNING_IF(!_hp, "'{}' can not be reverse resolved", _address); for (int i = 0; i < FLAGS_io_service_worker_count; i++) { _workers.push_back(std::make_shared([this, i]() { diff --git a/src/runtime/rpc/asio_net_provider.h b/src/rpc/asio_net_provider.h similarity index 95% rename from src/runtime/rpc/asio_net_provider.h rename to src/rpc/asio_net_provider.h index 20eb6f9d91..3b214c71df 100644 --- a/src/runtime/rpc/asio_net_provider.h +++ b/src/rpc/asio_net_provider.h @@ -34,12 +34,12 @@ #include "boost/asio/io_service.hpp" #include "boost/asio/ip/tcp.hpp" #include "boost/asio/ip/udp.hpp" -#include "runtime/rpc/message_parser.h" -#include "runtime/rpc/network.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task_spec.h" +#include "rpc/message_parser.h" +#include "rpc/network.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "task/task_spec.h" #include "utils/error_code.h" #include "utils/synchronize.h" @@ -95,6 +95,7 @@ class asio_network_provider : public connection_oriented_network std::vector> _io_services; std::vector> _workers; ::dsn::rpc_address _address; + // NOTE: '_hp' is possible to be invalid if '_address' can not be reverse resolved. ::dsn::host_port _hp; }; diff --git a/src/runtime/rpc/asio_rpc_session.cpp b/src/rpc/asio_rpc_session.cpp similarity index 93% rename from src/runtime/rpc/asio_rpc_session.cpp rename to src/rpc/asio_rpc_session.cpp index 39ac3e6c04..6cf7c6f808 100644 --- a/src/runtime/rpc/asio_rpc_session.cpp +++ b/src/rpc/asio_rpc_session.cpp @@ -58,8 +58,8 @@ // IWYU pragma: no_include "boost/asio/ip/impl/address_v4.ipp" // IWYU pragma: no_include "boost/asio/socket_base.hpp" // IWYU pragma: no_include "boost/system/error_code.hpp" -#include "runtime/rpc/asio_net_provider.h" -#include "runtime/rpc/rpc_address.h" +#include "rpc/asio_net_provider.h" +#include "rpc/rpc_address.h" #include "utils/autoref_ptr.h" #include "utils/fmt_logging.h" @@ -130,7 +130,7 @@ void asio_rpc_session::do_read(int read_next) } else { LOG_ERROR("asio read from {} failed: {}", _remote_addr, ec.message()); } - on_failure(); + on_failure(false); } else { _reader.mark_read(length); @@ -151,7 +151,7 @@ void asio_rpc_session::do_read(int read_next) if (read_next == -1) { LOG_ERROR("asio read from {} failed", _remote_addr); - on_failure(); + on_failure(false); } else { start_read_next(read_next); } @@ -197,16 +197,25 @@ asio_rpc_session::asio_rpc_session(asio_network_provider &net, set_options(); } -void asio_rpc_session::close() +asio_rpc_session::~asio_rpc_session() { + // Because every async_* invoking adds the reference counter and releases the reference counter + // in corresponding callback, it's certain that the reference counter is zero in its + // destructor, which means there is no inflight invoking, then it's safe to close the socket. + asio_rpc_session::close(); +} +void asio_rpc_session::close() +{ boost::system::error_code ec; _socket->shutdown(boost::asio::socket_base::shutdown_type::shutdown_both, ec); - if (ec) + if (ec) { LOG_WARNING("asio socket shutdown failed, error = {}", ec.message()); + } _socket->close(ec); - if (ec) + if (ec) { LOG_WARNING("asio socket close failed, error = {}", ec.message()); + } } void asio_rpc_session::connect() @@ -222,7 +231,7 @@ void asio_rpc_session::connect() set_options(); set_connected(); - on_send_completed(); + on_send_completed(0); start_read_next(); } else { LOG_ERROR( diff --git a/src/runtime/rpc/asio_rpc_session.h b/src/rpc/asio_rpc_session.h similarity index 88% rename from src/runtime/rpc/asio_rpc_session.h rename to src/rpc/asio_rpc_session.h index e3f5da4e21..bf715fc448 100644 --- a/src/runtime/rpc/asio_rpc_session.h +++ b/src/rpc/asio_rpc_session.h @@ -30,8 +30,8 @@ #include #include "boost/asio/ip/tcp.hpp" -#include "runtime/rpc/message_parser.h" -#include "runtime/rpc/network.h" +#include "rpc/message_parser.h" +#include "rpc/network.h" namespace dsn { class message_ex; @@ -51,10 +51,14 @@ class asio_rpc_session : public rpc_session message_parser_ptr &parser, bool is_client); - ~asio_rpc_session() override = default; + ~asio_rpc_session() override; void send(uint64_t signature) override; + // The under layer socket will be invalidated after being closed. + // + // It's needed to prevent the '_socket' to be closed while the socket's async_* interfaces are + // in flight. void close() override; void connect() override; @@ -69,9 +73,6 @@ class asio_rpc_session : public rpc_session } } -private: - // boost::asio::socket is thread-unsafe, must use lock to prevent a - // reading/writing socket being modified or closed concurrently. std::shared_ptr _socket; }; diff --git a/src/runtime/rpc/dns_resolver.cpp b/src/rpc/dns_resolver.cpp similarity index 97% rename from src/runtime/rpc/dns_resolver.cpp rename to src/rpc/dns_resolver.cpp index 2eab9d8327..9ddc10a7fe 100644 --- a/src/runtime/rpc/dns_resolver.cpp +++ b/src/rpc/dns_resolver.cpp @@ -20,14 +20,14 @@ #include #include #include +#include #include -#include "absl/strings/string_view.h" #include "fmt/core.h" #include "fmt/format.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/group_address.h" -#include "runtime/rpc/group_host_port.h" +#include "rpc/dns_resolver.h" +#include "rpc/group_address.h" +#include "rpc/group_host_port.h" #include "utils/autoref_ptr.h" #include "utils/fmt_logging.h" #include "utils/ports.h" diff --git a/src/runtime/rpc/dns_resolver.h b/src/rpc/dns_resolver.h similarity index 97% rename from src/runtime/rpc/dns_resolver.h rename to src/rpc/dns_resolver.h index f173db212d..e80640c138 100644 --- a/src/runtime/rpc/dns_resolver.h +++ b/src/rpc/dns_resolver.h @@ -23,8 +23,8 @@ #include #include -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" #include "utils/errors.h" #include "utils/metrics.h" #include "utils/singleton.h" diff --git a/src/runtime/rpc/dsn_message_parser.cpp b/src/rpc/dsn_message_parser.cpp similarity index 98% rename from src/runtime/rpc/dsn_message_parser.cpp rename to src/rpc/dsn_message_parser.cpp index b9dc05524d..178a602738 100644 --- a/src/runtime/rpc/dsn_message_parser.cpp +++ b/src/rpc/dsn_message_parser.cpp @@ -31,9 +31,9 @@ #include #include -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "rpc/rpc_message.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/blob.h" #include "utils/crc.h" #include "utils/fmt_logging.h" @@ -211,4 +211,4 @@ int dsn_message_parser::get_buffers_on_send(message_ex *msg, /*out*/ send_buf *b return true; } } -} +} // namespace dsn diff --git a/src/rpc/dsn_message_parser.h b/src/rpc/dsn_message_parser.h new file mode 100644 index 0000000000..7dbcdf3404 --- /dev/null +++ b/src/rpc/dsn_message_parser.h @@ -0,0 +1,58 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2015 Microsoft Corporation + * + * -=- Robust Distributed System Nucleus (rDSN) -=- + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#pragma once + +#include "rpc/message_parser.h" + +namespace dsn { +class message_ex; + +// Message parser for browser-generated http request. +class dsn_message_parser : public message_parser +{ +public: + dsn_message_parser() : _header_checked(false) {} + virtual ~dsn_message_parser() {} + + virtual void reset() override; + + virtual message_ex *get_message_on_receive(message_reader *reader, + /*out*/ int &read_next) override; + + virtual void prepare_on_send(message_ex *msg) override; + + virtual int get_buffers_on_send(message_ex *msg, /*out*/ send_buf *buffers) override; + +private: + static bool is_right_header(char *hdr); + + static bool is_right_body(message_ex *msg); + +private: + bool _header_checked; +}; +} // namespace dsn diff --git a/src/runtime/rpc/group_address.h b/src/rpc/group_address.h similarity index 99% rename from src/runtime/rpc/group_address.h rename to src/rpc/group_address.h index 73475de008..916515c1fc 100644 --- a/src/runtime/rpc/group_address.h +++ b/src/rpc/group_address.h @@ -29,7 +29,7 @@ #include #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_address.h" +#include "rpc/rpc_address.h" #include "utils/api_utilities.h" #include "utils/autoref_ptr.h" #include "utils/fmt_logging.h" diff --git a/src/runtime/rpc/group_host_port.h b/src/rpc/group_host_port.h similarity index 93% rename from src/runtime/rpc/group_host_port.h rename to src/rpc/group_host_port.h index f8ceea212f..199edffacf 100644 --- a/src/runtime/rpc/group_host_port.h +++ b/src/rpc/group_host_port.h @@ -22,8 +22,11 @@ #include #include -#include "runtime/rpc/group_address.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/group_address.h" +#include "rpc/rpc_host_port.h" +#include "rpc/group_address.h" +#include "rpc/group_host_port.h" +#include "rpc/rpc_host_port.h" #include "utils/autoref_ptr.h" #include "utils/fmt_logging.h" #include "utils/rand.h" @@ -127,10 +130,16 @@ inline rpc_group_host_port::rpc_group_host_port(const rpc_group_address *g_addr) { _name = g_addr->name(); for (const auto &addr : g_addr->members()) { - CHECK_TRUE(add(host_port::from_address(addr))); + const auto hp = host_port::from_address(addr); + CHECK(hp, "'{}' can not be reverse resolved", addr); + CHECK_TRUE(add(hp)); } _update_leader_automatically = g_addr->is_update_leader_automatically(); - set_leader(host_port::from_address(g_addr->leader())); + if (g_addr->leader()) { + const auto hp = host_port::from_address(g_addr->leader()); + CHECK(hp, "'{}' can not be reverse resolved", g_addr->leader()); + set_leader(hp); + } } inline rpc_group_host_port &rpc_group_host_port::operator=(const rpc_group_host_port &other) diff --git a/src/runtime/rpc/message_parser.cpp b/src/rpc/message_parser.cpp similarity index 98% rename from src/runtime/rpc/message_parser.cpp rename to src/rpc/message_parser.cpp index e9c0da964c..575fef6d03 100644 --- a/src/runtime/rpc/message_parser.cpp +++ b/src/rpc/message_parser.cpp @@ -35,8 +35,8 @@ #include #include "message_parser_manager.h" -#include "runtime/rpc/message_parser.h" -#include "runtime/task/task_spec.h" +#include "rpc/message_parser.h" +#include "task/task_spec.h" #include "utils/blob.h" #include "utils/fmt_logging.h" #include "utils/utils.h" @@ -200,4 +200,4 @@ message_parser *message_parser_manager::create_parser(network_header_format fmt) else return nullptr; } -} +} // namespace dsn diff --git a/src/runtime/rpc/message_parser.h b/src/rpc/message_parser.h similarity index 98% rename from src/runtime/rpc/message_parser.h rename to src/rpc/message_parser.h index 614e50f22d..bc67536a2d 100644 --- a/src/runtime/rpc/message_parser.h +++ b/src/rpc/message_parser.h @@ -30,8 +30,8 @@ #include #include -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task_spec.h" +#include "rpc/rpc_message.h" +#include "task/task_spec.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" diff --git a/src/runtime/rpc/message_parser_manager.h b/src/rpc/message_parser_manager.h similarity index 97% rename from src/runtime/rpc/message_parser_manager.h rename to src/rpc/message_parser_manager.h index b3fc7fa4c9..31622c6efe 100644 --- a/src/runtime/rpc/message_parser_manager.h +++ b/src/rpc/message_parser_manager.h @@ -26,7 +26,7 @@ #pragma once -#include "runtime/rpc/message_parser.h" +#include "rpc/message_parser.h" namespace dsn { class message_parser_manager : public utils::singleton @@ -58,4 +58,4 @@ class message_parser_manager : public utils::singleton std::vector _factory_vec; }; -} +} // namespace dsn diff --git a/src/runtime/rpc/network.cpp b/src/rpc/network.cpp similarity index 96% rename from src/runtime/rpc/network.cpp rename to src/rpc/network.cpp index 72a9c55e9b..52b8963494 100644 --- a/src/runtime/rpc/network.cpp +++ b/src/rpc/network.cpp @@ -30,15 +30,15 @@ #include #include #include +#include #include -#include "absl/strings/string_view.h" #include "message_parser_manager.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_engine.h" #include "runtime/api_task.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_engine.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "task/task.h" +#include "task/task_code.h" #include "utils/blob.h" #include "utils/customizable_id.h" #include "utils/errors.h" @@ -388,7 +388,9 @@ rpc_session::rpc_session(connection_oriented_network &net, _message_sent(0), _net(net), _remote_addr(remote_addr), - _remote_host_port(host_port::from_address(remote_addr)), + // TODO(yingchun): '_remote_host_port' is possible to be invalid after this! + // TODO(yingchun): It's too cost to reverse resolve host in constructor. + _remote_host_port(host_port::from_address(_remote_addr)), _max_buffer_block_count_per_send(net.max_buffer_block_count_per_send()), _reader(net.message_buffer_block_size()), _parser(parser), @@ -396,6 +398,7 @@ rpc_session::rpc_session(connection_oriented_network &net, _matcher(_net.engine()->matcher()), _delay_server_receive_ms(0) { + LOG_WARNING_IF(!_remote_host_port, "'{}' can not be reverse resolved", _remote_addr); if (!is_client) { on_rpc_session_connected.execute(this); } @@ -426,8 +429,14 @@ bool rpc_session::on_disconnected(bool is_write) void rpc_session::on_failure(bool is_write) { + // Just update the state machine here. if (on_disconnected(is_write)) { - close(); + // The under layer socket may be used by async_* interfaces concurrently, it's not thread + // safe to invalidate the '_socket', it should be invalidated when the session is + // destroyed. + LOG_WARNING("disconnect to remote {}, the socket will be lazily closed when the session " + "destroyed", + _remote_addr); } } diff --git a/src/runtime/rpc/network.h b/src/rpc/network.h similarity index 97% rename from src/runtime/rpc/network.h rename to src/rpc/network.h index 5a9bb06094..ce638299f6 100644 --- a/src/runtime/rpc/network.h +++ b/src/rpc/network.h @@ -32,11 +32,11 @@ #include #include +#include "rpc/message_parser.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" #include "rpc_address.h" -#include "runtime/rpc/message_parser.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task_spec.h" +#include "task/task_spec.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/fmt_utils.h" @@ -204,6 +204,7 @@ class connection_oriented_network : public network class rpc_client_matcher; class rpc_session : public ref_counter + { public: /*! @@ -274,8 +275,8 @@ class rpc_session : public ref_counter // should always be called in lock bool unlink_message_for_send(); virtual void send(uint64_t signature) = 0; - void on_send_completed(uint64_t signature = 0); - virtual void on_failure(bool is_write = false); + void on_send_completed(uint64_t signature); + virtual void on_failure(bool is_write); protected: /// @@ -314,7 +315,6 @@ class rpc_session : public ref_counter uint64_t _message_sent; // ] -protected: /// /// change status and check status /// @@ -327,7 +327,6 @@ class rpc_session : public ref_counter void clear_send_queue(bool resend_msgs); bool on_disconnected(bool is_write); -protected: // constant info connection_oriented_network &_net; dsn::rpc_address _remote_addr; diff --git a/src/runtime/rpc/network.sim.cpp b/src/rpc/network.sim.cpp similarity index 96% rename from src/runtime/rpc/network.sim.cpp rename to src/rpc/network.sim.cpp index 4aad6b933a..359afc8ebc 100644 --- a/src/runtime/rpc/network.sim.cpp +++ b/src/rpc/network.sim.cpp @@ -33,7 +33,7 @@ #include "boost/asio/ip/impl/host_name.ipp" #include "network.sim.h" #include "runtime/node_scoper.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/flags.h" @@ -83,7 +83,7 @@ static message_ex *virtual_send_message(message_ex *msg) tmp += buf.length(); } - blob bb(buffer, 0, msg->header->body_length + sizeof(message_header)); + blob bb(buffer, msg->header->body_length + sizeof(message_header)); message_ex *recv_msg = message_ex::create_receive_message(bb); recv_msg->to_address = msg->to_address; recv_msg->to_host_port = msg->to_host_port; @@ -162,6 +162,7 @@ sim_network_provider::sim_network_provider(rpc_engine *rpc, network *inner_provi { _address = rpc_address::from_host_port("localhost", 1); _hp = ::dsn::host_port::from_address(_address); + LOG_WARNING_IF(!_hp, "'{}' can not be reverse resolved", _address); } error_code sim_network_provider::start(rpc_channel channel, int port, bool client_only) @@ -172,6 +173,7 @@ error_code sim_network_provider::start(rpc_channel channel, int port, bool clien _address = dsn::rpc_address::from_host_port("localhost", port); _hp = ::dsn::host_port::from_address(_address); + LOG_WARNING_IF(!_hp, "'{}' can not be reverse resolved", _address); auto hostname = boost::asio::ip::host_name(); if (!client_only) { for (int i = NET_HDR_INVALID + 1; i <= network_header_format::max_value(); i++) { @@ -193,5 +195,5 @@ uint32_t sim_network_provider::net_delay_milliseconds() const FLAGS_max_message_delay_microseconds) / 1000; } -} -} // end namespace +} // namespace tools +} // namespace dsn diff --git a/src/runtime/rpc/network.sim.h b/src/rpc/network.sim.h similarity index 82% rename from src/runtime/rpc/network.sim.h rename to src/rpc/network.sim.h index f7954afbf1..1064039335 100644 --- a/src/runtime/rpc/network.sim.h +++ b/src/rpc/network.sim.h @@ -28,12 +28,12 @@ #include -#include "runtime/rpc/message_parser.h" -#include "runtime/rpc/network.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task_spec.h" +#include "rpc/message_parser.h" +#include "rpc/network.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "task/task_spec.h" #include "utils/error_code.h" namespace dsn { @@ -50,15 +50,15 @@ class sim_client_session : public rpc_session ::dsn::rpc_address remote_addr, message_parser_ptr &parser); - virtual void connect(); + void connect() override; - virtual void send(uint64_t signature) override; + void send(uint64_t signature) override; - virtual void do_read(int sz) override {} + void do_read(int sz) override {} - virtual void close() override {} + void close() override {} - virtual void on_failure(bool is_write = false) override {} + void on_failure(bool is_write) override {} }; class sim_server_session : public rpc_session @@ -69,15 +69,15 @@ class sim_server_session : public rpc_session rpc_session_ptr &client, message_parser_ptr &parser); - virtual void send(uint64_t signature) override; + void send(uint64_t signature) override; - virtual void connect() {} + void connect() override {} - virtual void do_read(int sz) override {} + void do_read(int sz) override {} - virtual void close() override {} + void close() override {} - virtual void on_failure(bool is_write = false) override {} + void on_failure(bool is_write) override {} private: rpc_session_ptr _client; diff --git a/src/runtime/rpc/raw_message_parser.cpp b/src/rpc/raw_message_parser.cpp similarity index 97% rename from src/runtime/rpc/raw_message_parser.cpp rename to src/rpc/raw_message_parser.cpp index 53cde8de6c..cd57d37c13 100644 --- a/src/runtime/rpc/raw_message_parser.cpp +++ b/src/rpc/raw_message_parser.cpp @@ -32,10 +32,10 @@ #include "common/gpid.h" #include "network.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_message.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/blob.h" #include "utils/fmt_logging.h" #include "utils/join_point.h" @@ -130,4 +130,4 @@ int raw_message_parser::get_buffers_on_send(message_ex *msg, send_buf *buffers) } return i; } -} +} // namespace dsn diff --git a/src/rpc/raw_message_parser.h b/src/rpc/raw_message_parser.h new file mode 100644 index 0000000000..5fcdd28ebb --- /dev/null +++ b/src/rpc/raw_message_parser.h @@ -0,0 +1,55 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2015 Microsoft Corporation + * + * -=- Robust Distributed System Nucleus (rDSN) -=- + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef RAW_MESSAGE_PARSER_H +#define RAW_MESSAGE_PARSER_H + +#include "rpc/message_parser.h" +#include "task/task_spec.h" +#include "utils/customizable_id.h" + +namespace dsn { +class message_ex; + +DEFINE_CUSTOMIZED_ID(network_header_format, NET_HDR_RAW) + +class rpc_session; + +// Message parser for user customed request. +class raw_message_parser : public message_parser +{ +private: + static void notify_rpc_session_disconnected(rpc_session *sp); + +public: + raw_message_parser(); + virtual ~raw_message_parser() {} + virtual message_ex *get_message_on_receive(message_reader *reader, + /*out*/ int &read_next) override; + virtual int get_buffers_on_send(message_ex *msg, /*out*/ send_buf *buffers) override; +}; +} // namespace dsn +#endif // RAW_MESSAGE_PARSER_H diff --git a/src/runtime/rpc/request_meta.thrift b/src/rpc/request_meta.thrift similarity index 100% rename from src/runtime/rpc/request_meta.thrift rename to src/rpc/request_meta.thrift diff --git a/src/runtime/rpc/rpc_address.cpp b/src/rpc/rpc_address.cpp similarity index 98% rename from src/runtime/rpc/rpc_address.cpp rename to src/rpc/rpc_address.cpp index f8d02d7c66..29b015749a 100644 --- a/src/runtime/rpc/rpc_address.cpp +++ b/src/rpc/rpc_address.cpp @@ -24,7 +24,7 @@ * THE SOFTWARE. */ -#include "runtime/rpc/rpc_address.h" +#include "rpc/rpc_address.h" #include #include @@ -34,9 +34,9 @@ #include #include #include +#include -#include "absl/strings/string_view.h" -#include "runtime/rpc/group_address.h" +#include "rpc/group_address.h" #include "utils/error_code.h" #include "utils/fixed_size_buffer_pool.h" #include "utils/fmt_logging.h" @@ -143,7 +143,7 @@ bool rpc_address::is_site_local_address(uint32_t ip_net) /*static*/ bool rpc_address::is_docker_netcard(const char *netcard_interface, uint32_t ip_net) { - if (absl::string_view(netcard_interface).find("docker") != absl::string_view::npos) { + if (std::string_view(netcard_interface).find("docker") != std::string_view::npos) { return true; } uint32_t iphost = ntohl(ip_net); diff --git a/src/runtime/rpc/rpc_address.h b/src/rpc/rpc_address.h similarity index 99% rename from src/runtime/rpc/rpc_address.h rename to src/rpc/rpc_address.h index be36315843..6f1a216dcd 100644 --- a/src/runtime/rpc/rpc_address.h +++ b/src/rpc/rpc_address.h @@ -48,7 +48,8 @@ class TProtocol; } // namespace thrift } // namespace apache -typedef enum dsn_host_type_t { +typedef enum dsn_host_type_t +{ HOST_TYPE_INVALID = 0, HOST_TYPE_IPV4 = 1, HOST_TYPE_GROUP = 2, diff --git a/src/runtime/rpc/rpc_engine.cpp b/src/rpc/rpc_engine.cpp similarity index 98% rename from src/runtime/rpc/rpc_engine.cpp rename to src/rpc/rpc_engine.cpp index 0e80e7fd1e..e44f7154e2 100644 --- a/src/runtime/rpc/rpc_engine.cpp +++ b/src/rpc/rpc_engine.cpp @@ -33,12 +33,12 @@ #include #include "common/gpid.h" +#include "rpc/group_address.h" +#include "rpc/group_host_port.h" +#include "rpc/network.h" +#include "rpc/serialization.h" #include "runtime/api_layer1.h" #include "runtime/global_config.h" -#include "runtime/rpc/group_address.h" -#include "runtime/rpc/group_host_port.h" -#include "runtime/rpc/network.h" -#include "runtime/rpc/serialization.h" #include "runtime/service_engine.h" #include "utils/customizable_id.h" #include "utils/factory_store.h" @@ -150,8 +150,9 @@ bool rpc_client_matcher::on_recv_reply(network *net, uint64_t key, message_ex *r case GRPC_TO_LEADER: if (req->server_address.group_address()->is_update_leader_automatically()) { req->server_address.group_address()->set_leader(addr); - req->server_host_port.group_host_port()->set_leader( - host_port::from_address(addr)); + const auto hp = host_port::from_address(addr); + CHECK(hp, "'{}' can not be reverse resolved", addr); + req->server_host_port.group_host_port()->set_leader(hp); } break; default: @@ -180,8 +181,9 @@ bool rpc_client_matcher::on_recv_reply(network *net, uint64_t key, message_ex *r req->server_address.group_address()->is_update_leader_automatically()) { req->server_address.group_address()->set_leader( reply->header->from_address); - req->server_host_port.group_host_port()->set_leader( - host_port::from_address(reply->header->from_address)); + const auto hp = host_port::from_address(reply->header->from_address); + CHECK(hp, "'{}' can not be reverse resolved", reply->header->from_address); + req->server_host_port.group_host_port()->set_leader(hp); } break; default: @@ -523,6 +525,7 @@ error_code rpc_engine::start(const service_app_spec &aspec) _local_primary_address = _client_nets[NET_HDR_DSN][0]->address(); _local_primary_address.set_port(aspec.ports.size() > 0 ? *aspec.ports.begin() : aspec.id); _local_primary_host_port = host_port::from_address(_local_primary_address); + CHECK(_local_primary_host_port, "'{}' can not be reverse resolved", _local_primary_address); LOG_INFO("=== service_node=[{}], primary_address=[{}({})] ===", _node->full_name(), diff --git a/src/runtime/rpc/rpc_engine.h b/src/rpc/rpc_engine.h similarity index 97% rename from src/runtime/rpc/rpc_engine.h rename to src/rpc/rpc_engine.h index 47980afbde..0447977709 100644 --- a/src/runtime/rpc/rpc_engine.h +++ b/src/rpc/rpc_engine.h @@ -34,13 +34,13 @@ #include #include "network.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" #include "runtime/api_task.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" @@ -50,10 +50,10 @@ namespace dsn { class rpc_engine; class service_node; -struct network_server_config; -struct service_app_spec; #define MAX_CLIENT_PORT 1023 +struct network_server_config; +struct service_app_spec; // // client matcher for matching RPC request and RPC response, and handling timeout diff --git a/src/runtime/rpc/rpc_holder.h b/src/rpc/rpc_holder.h similarity index 96% rename from src/runtime/rpc/rpc_holder.h rename to src/rpc/rpc_holder.h index 2aba608910..9d753354cd 100644 --- a/src/runtime/rpc/rpc_holder.h +++ b/src/rpc/rpc_holder.h @@ -27,13 +27,13 @@ #include "client/partition_resolver.h" #include "dsn.layer2_types.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "rpc_address.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "task/async_calls.h" +#include "task/task.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/chrono_literals.h" #include "utils/error_code.h" @@ -168,8 +168,8 @@ class rpc_holder rpc_response_task_ptr t = rpc::create_rpc_response_task( dsn_request(), tracker, - [ cb_fwd = std::forward(callback), - rpc = *this ](error_code err, message_ex * req, message_ex * resp) mutable { + [cb_fwd = std::forward(callback), + rpc = *this](error_code err, message_ex *req, message_ex *resp) mutable { if (err == ERR_OK) { unmarshall(resp, rpc.response()); } @@ -200,8 +200,8 @@ class rpc_holder rpc_response_task_ptr t = rpc::create_rpc_response_task( dsn_request(), tracker, - [ cb_fwd = std::forward(callback), - rpc = *this ](error_code err, message_ex * req, message_ex * resp) mutable { + [cb_fwd = std::forward(callback), + rpc = *this](error_code err, message_ex *req, message_ex *resp) mutable { if (err == ERR_OK) { unmarshall(resp, rpc.response()); } diff --git a/src/runtime/rpc/rpc_host_port.cpp b/src/rpc/rpc_host_port.cpp similarity index 80% rename from src/runtime/rpc/rpc_host_port.cpp rename to src/rpc/rpc_host_port.cpp index 01a0474179..d7e4bd2b60 100644 --- a/src/runtime/rpc/rpc_host_port.cpp +++ b/src/rpc/rpc_host_port.cpp @@ -18,6 +18,7 @@ */ #include +#include #include #include #include @@ -27,14 +28,14 @@ #include #include "fmt/core.h" -#include "runtime/rpc/group_host_port.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/group_host_port.h" +#include "rpc/rpc_host_port.h" #include "utils/api_utilities.h" #include "utils/error_code.h" #include "utils/ports.h" +#include "utils/safe_strerror_posix.h" #include "utils/string_conv.h" #include "utils/timer.h" -#include "utils/utils.h" namespace dsn { @@ -57,9 +58,11 @@ host_port host_port::from_address(rpc_address addr) WARNING, 100, "construct host_port '{}' from rpc_address '{}'", hp, addr); switch (addr.type()) { case HOST_TYPE_IPV4: { - CHECK(utils::hostname_from_ip(htonl(addr.ip()), &hp._host), - "invalid host_port {}", - addr.ipv4_str()); + const auto s = lookup_hostname(htonl(addr.ip()), &hp._host); + if (dsn_unlikely(!s)) { + LOG_WARNING("lookup_hostname failed for {}: {}", addr.ipv4_str(), s.description()); + return hp; + } hp._port = addr.port(); } break; case HOST_TYPE_GROUP: { @@ -69,7 +72,7 @@ host_port host_port::from_address(rpc_address addr) break; } - // Now is valid. + // 'hp' become valid now. hp._type = addr.type(); return hp; } @@ -216,4 +219,34 @@ error_s host_port::resolve_addresses(std::vector &addresses) const return error_s::ok(); } +error_s host_port::lookup_hostname(uint32_t ip, std::string *hostname) +{ + struct sockaddr_in addr_in; + addr_in.sin_family = AF_INET; + addr_in.sin_port = 0; + addr_in.sin_addr.s_addr = ip; + char host[NI_MAXHOST]; + int rc = ::getnameinfo((struct sockaddr *)(&addr_in), + sizeof(struct sockaddr), + host, + sizeof(host), + nullptr, + 0, + NI_NAMEREQD); + if (dsn_unlikely(rc != 0)) { + if (rc == EAI_SYSTEM) { + return error_s::make(dsn::ERR_NETWORK_FAILURE, + fmt::format("{}: {}: getnameinfo failed", + gai_strerror(rc), + dsn::utils::safe_strerror(errno))); + } + + return error_s::make(dsn::ERR_NETWORK_FAILURE, + fmt::format("{}: getnameinfo failed", gai_strerror(rc))); + } + + *hostname = host; + return error_s::ok(); +} + } // namespace dsn diff --git a/src/runtime/rpc/rpc_host_port.h b/src/rpc/rpc_host_port.h similarity index 99% rename from src/runtime/rpc/rpc_host_port.h rename to src/rpc/rpc_host_port.h index 67fae88435..42685efe86 100644 --- a/src/runtime/rpc/rpc_host_port.h +++ b/src/rpc/rpc_host_port.h @@ -31,7 +31,7 @@ #include -#include "runtime/rpc/rpc_address.h" +#include "rpc/rpc_address.h" #include "utils/errors.h" #include "utils/fmt_logging.h" #include "utils/fmt_utils.h" @@ -313,6 +313,7 @@ class host_port private: friend class dns_resolver; friend class rpc_group_host_port; + FRIEND_TEST(host_port_test, lookup_hostname); FRIEND_TEST(host_port_test, transfer_rpc_address); static const host_port s_invalid_host_port; @@ -321,6 +322,9 @@ class host_port // There may be multiple rpc_addresses for one host_port. error_s resolve_addresses(std::vector &addresses) const; + // Does reverse DNS lookup of the address and stores it in hostname. + static error_s lookup_hostname(uint32_t ip, std::string *hostname); + std::string _host; uint16_t _port = 0; dsn_host_type_t _type = HOST_TYPE_INVALID; diff --git a/src/runtime/rpc/rpc_message.cpp b/src/rpc/rpc_message.cpp similarity index 99% rename from src/runtime/rpc/rpc_message.cpp rename to src/rpc/rpc_message.cpp index 00412db721..394abf3ed4 100644 --- a/src/runtime/rpc/rpc_message.cpp +++ b/src/rpc/rpc_message.cpp @@ -34,9 +34,9 @@ #include #include "network.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" #include "utils/crc.h" #include "utils/flags.h" #include "utils/fmt_logging.h" diff --git a/src/runtime/rpc/rpc_message.h b/src/rpc/rpc_message.h similarity index 95% rename from src/runtime/rpc/rpc_message.h rename to src/rpc/rpc_message.h index 63e4ecb5be..9e1d73717d 100644 --- a/src/runtime/rpc/rpc_message.h +++ b/src/rpc/rpc_message.h @@ -34,8 +34,8 @@ #include "common/gpid.h" #include "rpc_address.h" #include "rpc_host_port.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/error_code.h" @@ -138,8 +138,12 @@ class message_ex : public ref_counter, public extensible_object rpc_session_ptr io_session; // send/recv session rpc_address to_address; // always ipv4/v6 address, it is the to_node's net address rpc_address server_address; // used by requests, and may be of uri/group address - host_port to_host_port; // fqdn from 'to_address' - host_port server_host_port; // fqdn from 'server_address' + // hostname from 'to_address'. It's possible to be invalid if 'to_address' can not be reverse + // resolved. + host_port to_host_port; + // hostname from 'server_address'. It's possible to be invalid if 'server_address' can not be + // reverse resolved. + host_port server_host_port; dsn::task_code local_rpc_code; network_header_format hdr_format; int send_retry_count; diff --git a/src/runtime/rpc/rpc_stream.h b/src/rpc/rpc_stream.h similarity index 98% rename from src/runtime/rpc/rpc_stream.h rename to src/rpc/rpc_stream.h index 62dfc82284..f9953e8d89 100644 --- a/src/runtime/rpc/rpc_stream.h +++ b/src/rpc/rpc_stream.h @@ -30,8 +30,8 @@ #include "runtime/api_layer1.h" #include "runtime/api_task.h" #include "runtime/app_model.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task_code.h" +#include "rpc/rpc_message.h" +#include "task/task_code.h" #include "utils/api_utilities.h" #include "utils/binary_reader.h" #include "utils/binary_writer.h" diff --git a/src/runtime/rpc/rpc_task.cpp b/src/rpc/rpc_task.cpp similarity index 95% rename from src/runtime/rpc/rpc_task.cpp rename to src/rpc/rpc_task.cpp index 50b7e5e289..d0d94e3b40 100644 --- a/src/runtime/rpc/rpc_task.cpp +++ b/src/rpc/rpc_task.cpp @@ -30,13 +30,13 @@ #include "runtime/api_layer1.h" #include "runtime/api_task.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/rpc_message.h" #include "runtime/service_engine.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_engine.h" -#include "runtime/task/task_spec.h" -#include "runtime/task/task_worker.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_engine.h" +#include "task/task_spec.h" +#include "task/task_worker.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" #include "utils/join_point.h" diff --git a/src/runtime/rpc/serialization.h b/src/rpc/serialization.h similarity index 97% rename from src/runtime/rpc/serialization.h rename to src/rpc/serialization.h index 7aa5738a11..c8cf50cc05 100644 --- a/src/runtime/rpc/serialization.h +++ b/src/rpc/serialization.h @@ -27,9 +27,9 @@ #pragma once #include "utils/utils.h" -#include "rpc_address.h" -#include "rpc_host_port.h" -#include "runtime/rpc/rpc_stream.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_stream.h" #include "common/serialization_helper/thrift_helper.h" namespace dsn { diff --git a/src/rpc/test/CMakeLists.txt b/src/rpc/test/CMakeLists.txt new file mode 100644 index 0000000000..5a1a30d01f --- /dev/null +++ b/src/rpc/test/CMakeLists.txt @@ -0,0 +1,30 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set(MY_PROJ_NAME dsn_rpc_tests) +set(MY_PROJ_SRC "") +set(MY_SRC_SEARCH_MODE "GLOB") +set(MY_PROJ_LIBS + dsn_meta_server + dsn_replication_common + dsn_runtime + dsn_utils + gtest) +set(MY_BINPLACES + config.ini + run.sh) +dsn_add_test() diff --git a/src/runtime/test/address_test.cpp b/src/rpc/test/address_test.cpp similarity index 99% rename from src/runtime/test/address_test.cpp rename to src/rpc/test/address_test.cpp index 01edb34a07..cb6fc0db2c 100644 --- a/src/runtime/test/address_test.cpp +++ b/src/rpc/test/address_test.cpp @@ -36,8 +36,8 @@ #include #include "gtest/gtest.h" -#include "runtime/rpc/group_address.h" -#include "runtime/rpc/rpc_address.h" +#include "rpc/group_address.h" +#include "rpc/rpc_address.h" #include "utils/errors.h" namespace dsn { diff --git a/src/rpc/test/config.ini b/src/rpc/test/config.ini new file mode 100644 index 0000000000..5b8763e033 --- /dev/null +++ b/src/rpc/test/config.ini @@ -0,0 +1,125 @@ +; Licensed to the Apache Software Foundation (ASF) under one +; or more contributor license agreements. See the NOTICE file +; distributed with this work for additional information +; regarding copyright ownership. The ASF licenses this file +; to you under the Apache License, Version 2.0 (the +; "License"); you may not use this file except in compliance +; with the License. You may obtain a copy of the License at +; +; http://www.apache.org/licenses/LICENSE-2.0 +; +; Unless required by applicable law or agreed to in writing, +; software distributed under the License is distributed on an +; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +; KIND, either express or implied. See the License for the +; specific language governing permissions and limitations +; under the License. + +[apps..default] +run = true +count = 1 + +[apps.client] +type = test +arguments = localhost 20101 +run = true +ports = 20001 +count = 1 +pools = THREAD_POOL_DEFAULT, THREAD_POOL_TEST_SERVER + +[apps.server] +type = test +arguments = +ports = 20101,20102 +run = true +count = 1 +pools = THREAD_POOL_DEFAULT, THREAD_POOL_TEST_SERVER + +[apps.server_group] +type = test +arguments = +ports = 20201 +run = true +count = 3 +pools = THREAD_POOL_DEFAULT, THREAD_POOL_TEST_SERVER + +[apps.server_not_run] +type = test +arguments = +ports = 20301 +run = false +count = 1 +pools = THREAD_POOL_DEFAULT, THREAD_POOL_TEST_SERVER + +[core] +tool = nativerun +toollets = tracer, profiler, fault_injector +pause_on_start = false +logging_start_level = LOG_LEVEL_DEBUG +logging_factory_name = dsn::tools::screen_logger + +[tools.simulator] +random_seed = 0 + +[network] +; how many network threads for network library (used by asio) +io_service_worker_count = 2 + +[task..default] +is_trace = true +is_profile = true +allow_inline = false +rpc_call_channel = RPC_CHANNEL_TCP +rpc_message_header_format = dsn +rpc_timeout_milliseconds = 1000 + +[task.RPC_TEST_HASH1_ACK] +is_trace = true +rpc_message_crc_required = true +rpc_request_drop_ratio = 0 +rpc_timeout_milliseconds = 1000 +rpc_request_data_corrupted_ratio = 1 +rpc_message_data_corrupted_type = header + +[task.RPC_TEST_HASH2_ACK] +is_trace = true +rpc_message_crc_required = true +rpc_request_drop_ratio = 0 +rpc_timeout_milliseconds = 1000 +rpc_request_data_corrupted_ratio = 1 +rpc_message_data_corrupted_type = body + +[task.RPC_TEST_HASH3_ACK] +is_trace = true +rpc_message_crc_required = true +rpc_response_drop_ratio = 0 +rpc_timeout_milliseconds = 1000 +rpc_response_data_corrupted_ratio = 1 +rpc_message_data_corrupted_type = header + +[task.RPC_TEST_HASH4_ACK] +is_trace = true +rpc_message_crc_required = true +rpc_response_drop_ratio = 0 +rpc_timeout_milliseconds = 1000 +rpc_response_data_corrupted_ratio = 1 +rpc_message_data_corrupted_type = body + +[task.LPC_RPC_TIMEOUT] +is_trace = false +is_profile = false + +[task.RPC_TEST_UDP] +rpc_call_channel = RPC_CHANNEL_UDP +rpc_message_crc_required = true + +; specification for each thread pool +[threadpool..default] +worker_count = 2 + +[threadpool.THREAD_POOL_DEFAULT] +partitioned = false +worker_priority = THREAD_xPRIORITY_NORMAL + +[threadpool.THREAD_POOL_TEST_SERVER] +partitioned = false diff --git a/src/runtime/test/corrupt_message.cpp b/src/rpc/test/corrupt_message_test.cpp similarity index 67% rename from src/runtime/test/corrupt_message.cpp rename to src/rpc/test/corrupt_message_test.cpp index 6b1200fa65..a97818f00c 100644 --- a/src/runtime/test/corrupt_message.cpp +++ b/src/rpc/test/corrupt_message_test.cpp @@ -28,32 +28,27 @@ #include #include #include +#include #include "gtest/gtest.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/task/async_calls.h" +#include "rpc/rpc_address.h" #include "runtime/test_utils.h" +#include "task/async_calls.h" +#include "task/task_code.h" #include "utils/error_code.h" +// TODO(yingchun): the tests are failed because the fault injector is not work well as expected. +// Now just disable the tests before we fix it. // this only works with the fault injector -TEST(core, corrupt_message) +TEST(corrupt_message_test, DISABLED_basic) { int req = 0; const auto server = dsn::rpc_address::from_host_port("localhost", 20101); - - auto result = ::dsn::rpc::call_wait( - server, RPC_TEST_HASH1, req, std::chrono::milliseconds(0), 1); - ASSERT_EQ(result.first, dsn::ERR_TIMEOUT); - - result = ::dsn::rpc::call_wait( - server, RPC_TEST_HASH2, req, std::chrono::milliseconds(0), 1); - ASSERT_EQ(result.first, dsn::ERR_TIMEOUT); - - result = ::dsn::rpc::call_wait( - server, RPC_TEST_HASH3, req, std::chrono::milliseconds(0), 1); - ASSERT_EQ(result.first, dsn::ERR_TIMEOUT); - - result = ::dsn::rpc::call_wait( - server, RPC_TEST_HASH4, req, std::chrono::milliseconds(0), 1); - ASSERT_EQ(result.first, dsn::ERR_TIMEOUT); + std::vector codes( + {RPC_TEST_HASH1, RPC_TEST_HASH2, RPC_TEST_HASH3, RPC_TEST_HASH4}); + for (const auto &code : codes) { + const auto result = + ::dsn::rpc::call_wait(server, code, req, std::chrono::milliseconds(1), 1); + EXPECT_EQ(dsn::ERR_TIMEOUT, result.first) << code; + } } diff --git a/src/runtime/test/host_port_test.cpp b/src/rpc/test/host_port_test.cpp similarity index 93% rename from src/runtime/test/host_port_test.cpp rename to src/rpc/test/host_port_test.cpp index a6e7334cfe..8fdb7a4e21 100644 --- a/src/runtime/test/host_port_test.cpp +++ b/src/rpc/test/host_port_test.cpp @@ -17,7 +17,9 @@ * under the License. */ +#include // IWYU pragma: keep #include +#include #include #include #include @@ -29,18 +31,18 @@ #include "fd_types.h" #include "gtest/gtest.h" #include "meta_admin_types.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/group_address.h" -#include "runtime/rpc/group_host_port.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" -#include "runtime/task/task_spec.h" -#include "runtime/task/task_tracker.h" +#include "rpc/dns_resolver.h" +#include "rpc/group_address.h" +#include "rpc/group_host_port.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "runtime/test_utils.h" +#include "task/async_calls.h" +#include "task/task.h" +#include "task/task_spec.h" +#include "task/task_tracker.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/errors.h" @@ -69,8 +71,7 @@ TEST(host_port_test, host_port_build) { const auto addr = rpc_address::from_host_port("localhost", 8080); - host_port hp1 = host_port::from_address(addr); - ASSERT_EQ(hp, hp1); + ASSERT_EQ(hp, host_port::from_address(addr)); } } @@ -201,8 +202,7 @@ TEST(host_port_test, rpc_group_host_port) ASSERT_EQ(addr2, g_addr->leader()); ASSERT_EQ(2, g_addr->count()); - host_port hp_grp2; - hp_grp2 = host_port::from_address(addr_grp); + host_port hp_grp2 = host_port::from_address(addr_grp); ASSERT_EQ(HOST_TYPE_GROUP, hp_grp2.type()); auto g_hp = hp_grp2.group_host_port(); @@ -262,6 +262,21 @@ TEST(host_port_test, thrift_parser) send_and_check_host_port_by_serialize(hp2, DSF_THRIFT_JSON); } +TEST(host_port_test, lookup_hostname) +{ + const std::string valid_ip = "127.0.0.1"; + const std::string expected_hostname = "localhost"; + + const auto rpc_example_valid = rpc_address::from_ip_port(valid_ip, 23010); + std::string hostname; + auto es = host_port::lookup_hostname(htonl(rpc_example_valid.ip()), &hostname); + ASSERT_TRUE(es.is_ok()) << es.description(); + ASSERT_EQ(expected_hostname, hostname); + + es = host_port::lookup_hostname(12321, &hostname); + ASSERT_FALSE(es.is_ok()); +} + TEST(host_port_test, test_macros) { static const host_port kHp1("localhost", 8081); diff --git a/src/utils/test/hostname_test.cpp b/src/rpc/test/main.cpp similarity index 54% rename from src/utils/test/hostname_test.cpp rename to src/rpc/test/main.cpp index 29c94aa2df..e43ee68ee5 100644 --- a/src/utils/test/hostname_test.cpp +++ b/src/rpc/test/main.cpp @@ -15,28 +15,29 @@ // specific language governing permissions and limitations // under the License. -#include -#include +#include +#include +#include -#include "gtest/gtest.h" -#include "runtime/rpc/rpc_address.h" -#include "utils/utils.h" +#include "runtime/app_model.h" +#include "runtime/service_app.h" +#include "runtime/test_utils.h" -namespace dsn { -namespace replication { +int g_test_count = 0; +int g_test_ret = 0; -TEST(ip_to_hostname, localhost) +GTEST_API_ int main(int argc, char **argv) { - const std::string valid_ip = "127.0.0.1"; - const std::string expected_hostname = "localhost"; + testing::InitGoogleTest(&argc, argv); - const auto rpc_example_valid = rpc_address::from_ip_port(valid_ip, 23010); + dsn::service_app::register_factory("test"); + dsn_run(argc, argv, false); + while (g_test_count == 0) { + std::this_thread::sleep_for(std::chrono::seconds(1)); + } - // bool hostname_from_ip(uint32_t ip, std::string *hostname_result) - std::string hostname_result; - ASSERT_TRUE(dsn::utils::hostname_from_ip(htonl(rpc_example_valid.ip()), &hostname_result)); - ASSERT_EQ(expected_hostname, hostname_result); +#ifndef ENABLE_GCOV + dsn_exit(g_test_ret); +#endif + return g_test_ret; } - -} // namespace replication -} // namespace dsn diff --git a/src/runtime/test/message_reader_test.cpp b/src/rpc/test/message_reader_test.cpp similarity index 98% rename from src/runtime/test/message_reader_test.cpp rename to src/rpc/test/message_reader_test.cpp index 515dcf8370..3dcf1e2840 100644 --- a/src/runtime/test/message_reader_test.cpp +++ b/src/rpc/test/message_reader_test.cpp @@ -19,7 +19,7 @@ #include #include "gtest/gtest.h" -#include "runtime/rpc/message_parser.h" +#include "rpc/message_parser.h" #include "utils/blob.h" namespace dsn { diff --git a/src/runtime/test/message_utils_test.cpp b/src/rpc/test/message_utils_test.cpp similarity index 92% rename from src/runtime/test/message_utils_test.cpp rename to src/rpc/test/message_utils_test.cpp index 7d280db8b6..cffd9b6bfd 100644 --- a/src/runtime/test/message_utils_test.cpp +++ b/src/rpc/test/message_utils_test.cpp @@ -32,8 +32,8 @@ #include "common/replication.codes.h" #include "dsn.layer2_types.h" #include "gtest/gtest.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_message.h" #include "utils/autoref_ptr.h" #include "utils/threadpool_code.h" @@ -43,7 +43,7 @@ DEFINE_TASK_CODE_RPC(RPC_CODE_FOR_TEST, TASK_PRIORITY_COMMON, THREAD_POOL_DEFAUL typedef rpc_holder t_rpc; -TEST(message_utils, msg_blob_convertion) +TEST(message_utils_test, msg_blob_convertion) { std::string data = "hello"; @@ -54,7 +54,7 @@ TEST(message_utils, msg_blob_convertion) ASSERT_EQ(b.to_string(), move_message_to_blob(m.get()).to_string()); } -TEST(message_utils, thrift_msg_convertion) +TEST(message_utils_test, thrift_msg_convertion) { query_cfg_request request; request.app_name = "haha"; @@ -66,7 +66,7 @@ TEST(message_utils, thrift_msg_convertion) ASSERT_EQ(rpc.request().app_name, "haha"); } -TEST(message_utils, complex_convertion) +TEST(message_utils_test, complex_convertion) { query_cfg_request request; request.app_name = "haha"; diff --git a/src/runtime/test/netprovider.cpp b/src/rpc/test/net_provider_test.cpp similarity index 88% rename from src/runtime/test/netprovider.cpp rename to src/rpc/test/net_provider_test.cpp index 4de70ed00e..68c80432be 100644 --- a/src/runtime/test/netprovider.cpp +++ b/src/rpc/test/net_provider_test.cpp @@ -35,17 +35,17 @@ #include "runtime/api_layer1.h" #include "runtime/api_task.h" #include "runtime/global_config.h" -#include "runtime/rpc/asio_net_provider.h" -#include "runtime/rpc/network.h" -#include "runtime/rpc/network.sim.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_engine.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" +#include "rpc/asio_net_provider.h" +#include "rpc/network.h" +#include "rpc/network.sim.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_engine.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "runtime/service_engine.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "runtime/test_utils.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" @@ -133,11 +133,13 @@ void rpc_client_session_send(rpc_session_ptr client_session, bool reject = false wait_response(); } -TEST(tools_common, asio_net_provider) +TEST(net_provider_test, asio_net_provider) { if (dsn::service_engine::instance().spec().semaphore_factory_name == - "dsn::tools::sim_semaphore_provider") - return; + "dsn::tools::sim_semaphore_provider") { + GTEST_SKIP() << "Skip the test in simulator mode, set 'tool = nativerun' in '[core]' " + "section in config file to enable it."; + } ASSERT_TRUE(dsn_rpc_register_handler( RPC_TEST_NETPROVIDER, "rpc.test.netprovider", rpc_server_response)); @@ -180,11 +182,13 @@ TEST(tools_common, asio_net_provider) TEST_PORT++; } -TEST(tools_common, asio_udp_provider) +TEST(net_provider_test, asio_udp_provider) { if (dsn::service_engine::instance().spec().semaphore_factory_name == - "dsn::tools::sim_semaphore_provider") - return; + "dsn::tools::sim_semaphore_provider") { + GTEST_SKIP() << "Skip the test in simulator mode, set 'tool = nativerun' in '[core]' " + "section in config file to enable it."; + } ASSERT_TRUE(dsn_rpc_register_handler( RPC_TEST_NETPROVIDER, "rpc.test.netprovider", rpc_server_response)); @@ -223,11 +227,13 @@ TEST(tools_common, asio_udp_provider) TEST_PORT++; } -TEST(tools_common, sim_net_provider) +TEST(net_provider_test, sim_net_provider) { if (dsn::service_engine::instance().spec().semaphore_factory_name == - "dsn::tools::sim_semaphore_provider") - return; + "dsn::tools::sim_semaphore_provider") { + GTEST_SKIP() << "Skip the test in simulator mode, set 'tool = nativerun' in '[core]' " + "section in config file to enable it."; + } ASSERT_TRUE(dsn_rpc_register_handler( RPC_TEST_NETPROVIDER, "rpc.test.netprovider", rpc_server_response)); @@ -253,11 +259,13 @@ TEST(tools_common, sim_net_provider) TEST_PORT++; } -TEST(tools_common, asio_network_provider_connection_threshold) +TEST(net_provider_test, asio_network_provider_connection_threshold) { if (dsn::service_engine::instance().spec().semaphore_factory_name == - "dsn::tools::sim_semaphore_provider") - return; + "dsn::tools::sim_semaphore_provider") { + GTEST_SKIP() << "Skip the test in simulator mode, set 'tool = nativerun' in '[core]' " + "section in config file to enable it."; + } ASSERT_TRUE(dsn_rpc_register_handler( RPC_TEST_NETPROVIDER, "rpc.test.netprovider", rpc_server_response)); diff --git a/src/runtime/test/rpc_holder_test.cpp b/src/rpc/test/rpc_holder_test.cpp similarity index 95% rename from src/runtime/test/rpc_holder_test.cpp rename to src/rpc/test/rpc_holder_test.cpp index 88eb3b31e5..37a7bea11a 100644 --- a/src/runtime/test/rpc_holder_test.cpp +++ b/src/rpc/test/rpc_holder_test.cpp @@ -24,7 +24,7 @@ * THE SOFTWARE. */ -#include "runtime/rpc/rpc_holder.h" +#include "rpc/rpc_holder.h" #include #include @@ -33,7 +33,7 @@ #include "common/serialization_helper/dsn.layer2_types.h" #include "gtest/gtest.h" #include "runtime/message_utils.h" -#include "runtime/rpc/rpc_address.h" +#include "rpc/rpc_address.h" #include "utils/threadpool_code.h" using namespace dsn; @@ -44,13 +44,13 @@ DEFINE_TASK_CODE_RPC(RPC_CM_QUERY_PARTITION_CONFIG_BY_INDEX, TASK_PRIORITY_COMMON, THREAD_POOL_DEFAULT) -TEST(rpc_holder, type_traits) +TEST(rpc_holder_test, type_traits) { ASSERT_FALSE(is_rpc_holder::value); ASSERT_TRUE(is_rpc_holder::value); } -TEST(rpc_holder, construct) +TEST(rpc_holder_test, construct) { { t_rpc rpc; @@ -90,7 +90,7 @@ TEST(rpc_holder, construct) } } -TEST(rpc_holder, mock_rpc_call) +TEST(rpc_holder_test, mock_rpc_call) { RPC_MOCKING(t_rpc) { @@ -140,7 +140,7 @@ TEST(rpc_holder, mock_rpc_call) } } -TEST(rpc_holder, mock_rpc_reply) +TEST(rpc_holder_test, mock_rpc_reply) { RPC_MOCKING(t_rpc) { @@ -160,7 +160,7 @@ TEST(rpc_holder, mock_rpc_reply) } } -TEST(rpc_holder, mock_rpc_forward) +TEST(rpc_holder_test, mock_rpc_forward) { RPC_MOCKING(t_rpc) { diff --git a/src/runtime/test/rpc_message.cpp b/src/rpc/test/rpc_message_test.cpp similarity index 95% rename from src/runtime/test/rpc_message.cpp rename to src/rpc/test/rpc_message_test.cpp index 3c87913938..19bde1e2d5 100644 --- a/src/runtime/test/rpc_message.cpp +++ b/src/rpc/test/rpc_message_test.cpp @@ -33,13 +33,12 @@ #include "common/gpid.h" #include "dsn.layer2_types.h" #include "gtest/gtest.h" -#include "runtime/message_utils.cpp" +#include "rpc/rpc_address.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "runtime/message_utils.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/crc.h" @@ -49,7 +48,7 @@ using namespace ::dsn; DEFINE_TASK_CODE_RPC(RPC_CODE_FOR_TEST, TASK_PRIORITY_COMMON, ::dsn::THREAD_POOL_DEFAULT) -TEST(core, message_ex) +TEST(rpc_message_test, message_ex) { msg_context_t ctx0, ctx1; ctx0.context = 0; @@ -192,7 +191,7 @@ TEST(core, message_ex) } } -TEST(rpc_message, restore_read) +TEST(rpc_message_test, restore_read) { using namespace dsn; query_cfg_request request, result; @@ -203,7 +202,7 @@ TEST(rpc_message, restore_read) } } -TEST(rpc_message, create_receive_message_with_standalone_header) +TEST(rpc_message_test, create_receive_message_with_standalone_header) { auto data = blob::create_from_bytes("10086"); @@ -213,7 +212,7 @@ TEST(rpc_message, create_receive_message_with_standalone_header) ASSERT_EQ(msg->header->body_length, data.length()); } -TEST(rpc_message, copy_message_no_reply) +TEST(rpc_message_test, copy_message_no_reply) { auto data = blob::create_from_bytes("10086"); message_ptr old_msg = message_ex::create_receive_message_with_standalone_header(data); diff --git a/src/runtime/test/rpc.cpp b/src/rpc/test/rpc_test.cpp similarity index 95% rename from src/runtime/test/rpc.cpp rename to src/rpc/test/rpc_test.cpp index 3682ca5789..71a0f82d80 100644 --- a/src/runtime/test/rpc.cpp +++ b/src/rpc/test/rpc_test.cpp @@ -32,14 +32,14 @@ #include #include "gtest/gtest.h" -#include "runtime/rpc/group_address.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" +#include "rpc/group_address.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "runtime/test_utils.h" +#include "task/async_calls.h" +#include "task/task.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" @@ -61,7 +61,7 @@ static dsn::rpc_address build_group() return server_group; } -TEST(core, rpc) +TEST(rpc_test, rpc) { int req = 0; const auto server = ::dsn::rpc_address::from_host_port("localhost", 20101); @@ -74,7 +74,7 @@ TEST(core, rpc) "server.THREAD_POOL_TEST_SERVER"); } -TEST(core, group_address_talk_to_others) +TEST(rpc_test, group_address_talk_to_others) { ::dsn::rpc_address addr = build_group(); @@ -93,7 +93,7 @@ TEST(core, group_address_talk_to_others) resp->wait(); } -TEST(core, group_address_change_leader) +TEST(rpc_test, group_address_change_leader) { ::dsn::rpc_address addr = build_group(); @@ -200,7 +200,7 @@ static void send_message(::dsn::rpc_address addr, } } -TEST(core, group_address_no_response_2) +TEST(rpc_test, group_address_no_response_2) { ::dsn::rpc_address addr = build_group(); rpc_reply_handler action_on_succeed = @@ -222,7 +222,7 @@ TEST(core, group_address_no_response_2) send_message(addr, std::string("expect_no_reply"), 1, action_on_succeed, action_on_failure); } -TEST(core, send_to_invalid_address) +TEST(rpc_test, send_to_invalid_address) { ::dsn::rpc_address group = build_group(); /* here we assume 127.0.0.1:32766 is not assigned */ diff --git a/src/rpc/test/run.sh b/src/rpc/test/run.sh new file mode 100755 index 0000000000..be3542e599 --- /dev/null +++ b/src/rpc/test/run.sh @@ -0,0 +1,25 @@ +#!/bin/sh +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +if [ -z "${REPORT_DIR}" ]; then + REPORT_DIR="." +fi + +rm -rf data dsn_rpc_tests.xml +output_xml="${REPORT_DIR}/dsn_rpc_tests.xml" +GTEST_OUTPUT="xml:${output_xml}" ./dsn_rpc_tests config.ini diff --git a/src/runtime/test/thrift_message_parser_test.cpp b/src/rpc/test/thrift_message_parser_test.cpp similarity index 98% rename from src/runtime/test/thrift_message_parser_test.cpp rename to src/rpc/test/thrift_message_parser_test.cpp index 722b2860b6..acc59d088a 100644 --- a/src/runtime/test/thrift_message_parser_test.cpp +++ b/src/rpc/test/thrift_message_parser_test.cpp @@ -28,12 +28,12 @@ #include "common/serialization_helper/thrift_helper.h" #include "gtest/gtest.h" #include "request_meta_types.h" -#include "runtime/rpc/message_parser.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/rpc_stream.h" -#include "runtime/rpc/thrift_message_parser.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "rpc/message_parser.h" +#include "rpc/rpc_message.h" +#include "rpc/rpc_stream.h" +#include "rpc/thrift_message_parser.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/autoref_ptr.h" #include "utils/binary_writer.h" #include "utils/blob.h" diff --git a/src/runtime/rpc/thrift_message_parser.cpp b/src/rpc/thrift_message_parser.cpp similarity index 98% rename from src/runtime/rpc/thrift_message_parser.cpp rename to src/rpc/thrift_message_parser.cpp index 08bbfcb872..e9731413b7 100644 --- a/src/runtime/rpc/thrift_message_parser.cpp +++ b/src/rpc/thrift_message_parser.cpp @@ -34,8 +34,8 @@ #include "boost/smart_ptr/shared_ptr.hpp" #include "common/gpid.h" #include "common/serialization_helper/thrift_helper.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/rpc_stream.h" +#include "rpc/rpc_message.h" +#include "rpc/rpc_stream.h" #include "thrift/protocol/TBinaryProtocol.h" #include "thrift/protocol/TBinaryProtocol.tcc" #include "thrift/protocol/TProtocol.h" @@ -46,7 +46,7 @@ #include "utils/endians.h" #include "utils/fmt_logging.h" #include "utils/fmt_utils.h" -#include "absl/strings/string_view.h" +#include #include "utils/strings.h" namespace dsn { @@ -340,7 +340,7 @@ void thrift_message_parser::prepare_on_send(message_ex *msg) // first total length, but we don't know the length, so firstly we put a placeholder header_proto.writeI32(0); // then the error_message - header_proto.writeString(absl::string_view(header->server.error_name)); + header_proto.writeString(std::string_view(header->server.error_name)); // then the thrift message begin header_proto.writeMessageBegin( header->rpc_name, ::apache::thrift::protocol::T_REPLY, header->id); diff --git a/src/runtime/rpc/thrift_message_parser.h b/src/rpc/thrift_message_parser.h similarity index 98% rename from src/runtime/rpc/thrift_message_parser.h rename to src/rpc/thrift_message_parser.h index dc76e41dd4..71b1ec222a 100644 --- a/src/runtime/rpc/thrift_message_parser.h +++ b/src/rpc/thrift_message_parser.h @@ -31,8 +31,8 @@ #include #include "request_meta_types.h" -#include "runtime/rpc/message_parser.h" -#include "runtime/task/task_spec.h" +#include "rpc/message_parser.h" +#include "task/task_spec.h" #include "utils/customizable_id.h" namespace dsn { diff --git a/src/runtime/CMakeLists.txt b/src/runtime/CMakeLists.txt index 6527ffb17f..5fd2fbe8e6 100644 --- a/src/runtime/CMakeLists.txt +++ b/src/runtime/CMakeLists.txt @@ -23,13 +23,9 @@ # THE SOFTWARE. add_subdirectory(test) -add_subdirectory(rpc) -add_subdirectory(task) # TODO(zlw) remove perf_counter from dsn_runtime after the refactor by WuTao add_library(dsn_runtime STATIC - $ - $ $ core_main.cpp dsn.layer2_types.cpp @@ -48,6 +44,6 @@ add_library(dsn_runtime STATIC tool_api.cpp tracer.cpp zlocks.cpp) -target_link_libraries(dsn_runtime PRIVATE dsn_security dsn_utils sasl2 gssapi_krb5 krb5) +target_link_libraries(dsn_runtime PRIVATE dsn_rpc dsn_security dsn_task dsn_utils sasl2 gssapi_krb5 krb5) define_file_basename_for_sources(dsn_runtime) install(TARGETS dsn_runtime DESTINATION "lib") diff --git a/src/runtime/api_layer1.h b/src/runtime/api_layer1.h index 6c2d675a13..307982f76f 100644 --- a/src/runtime/api_layer1.h +++ b/src/runtime/api_layer1.h @@ -30,11 +30,11 @@ #pragma once -#include "runtime/api_task.h" #include "common/gpid.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task_tracker.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "runtime/api_task.h" +#include "task/task_tracker.h" /*! @defgroup service-api-c Core Service API diff --git a/src/runtime/api_task.h b/src/runtime/api_task.h index 477dc4bef0..7900028738 100644 --- a/src/runtime/api_task.h +++ b/src/runtime/api_task.h @@ -33,7 +33,7 @@ #include "utils/error_code.h" #include "utils/threadpool_code.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" /*! @addtogroup task-common @@ -70,7 +70,7 @@ class raw_task; class rpc_request_task; class rpc_response_task; class aio_task; -} +} // namespace dsn /*! apps updates the value at dsn_task_queue_virtual_length_ptr(..) to control the length of a vitual queue (bound to current code + hash) to diff --git a/src/runtime/env.sim.cpp b/src/runtime/env.sim.cpp index 3b29d56624..1b19fd707a 100644 --- a/src/runtime/env.sim.cpp +++ b/src/runtime/env.sim.cpp @@ -28,7 +28,7 @@ #include -#include "runtime/task/task_worker.h" +#include "task/task_worker.h" #include "utils/flags.h" #include "utils/fmt_logging.h" #include "utils/join_point.h" diff --git a/src/runtime/env.sim.h b/src/runtime/env.sim.h index 2785bb102e..4846d75618 100644 --- a/src/runtime/env.sim.h +++ b/src/runtime/env.sim.h @@ -41,5 +41,5 @@ class sim_env_provider : public env_provider private: static void on_worker_start(task_worker *worker); }; -} -} // end namespace +} // namespace tools +} // namespace dsn diff --git a/src/runtime/fault_injector.cpp b/src/runtime/fault_injector.cpp index 653c052a45..f14bcebdcc 100644 --- a/src/runtime/fault_injector.cpp +++ b/src/runtime/fault_injector.cpp @@ -34,10 +34,10 @@ #include "aio/aio_task.h" #include "fmt/core.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "rpc/rpc_message.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/blob.h" #include "utils/config_api.h" #include "utils/config_helper.h" @@ -354,5 +354,5 @@ void fault_injector::install(service_spec &spec) } fault_injector::fault_injector(const char *name) : toollet(name) {} -} -} +} // namespace tools +} // namespace dsn diff --git a/src/runtime/fault_injector.h b/src/runtime/fault_injector.h index cae753acf5..00ebb1e051 100644 --- a/src/runtime/fault_injector.h +++ b/src/runtime/fault_injector.h @@ -92,5 +92,5 @@ class fault_injector : public toollet explicit fault_injector(const char *name); void install(service_spec &spec) override; }; -} -} +} // namespace tools +} // namespace dsn diff --git a/src/runtime/global_config.cpp b/src/runtime/global_config.cpp index a113969a49..3238708296 100644 --- a/src/runtime/global_config.cpp +++ b/src/runtime/global_config.cpp @@ -33,7 +33,7 @@ #include "runtime/global_config.h" #include "runtime/service_app.h" -#include "runtime/task/task_spec.h" +#include "task/task_spec.h" #include "utils/config_api.h" #include "utils/filesystem.h" #include "utils/fmt_logging.h" diff --git a/src/runtime/global_config.h b/src/runtime/global_config.h index 65d62bdddd..87c36cb975 100644 --- a/src/runtime/global_config.h +++ b/src/runtime/global_config.h @@ -36,7 +36,7 @@ #include #include -#include "runtime/task/task_spec.h" +#include "task/task_spec.h" #include "utils/config_api.h" #include "utils/config_helper.h" #include "utils/customizable_id.h" @@ -169,8 +169,7 @@ struct service_spec std::vector threadpool_specs; std::vector app_specs; - // auto-set - std::string dir_log; + std::string log_dir; service_spec() {} bool init(); @@ -186,6 +185,11 @@ CONFIG_FLD_STRING_LIST(toollets, CONFIG_FLD_STRING(data_dir, "./data", "The default directory to place the all the data, logs, coredump files, and etc.") +CONFIG_FLD_STRING_BY_KEY( + log_dir, + "log_dir", + "", + "The directory to place the logs especially. 'data_dir' will be used if it's empty.") CONFIG_FLD( bool, bool, diff --git a/src/runtime/message_utils.cpp b/src/runtime/message_utils.cpp index 35bd29c778..65c9459e26 100644 --- a/src/runtime/message_utils.cpp +++ b/src/runtime/message_utils.cpp @@ -28,7 +28,7 @@ #include -#include "runtime/rpc/rpc_message.h" +#include "rpc/rpc_message.h" namespace dsn { diff --git a/src/runtime/message_utils.h b/src/runtime/message_utils.h index 954b54cd2f..0ba08f74f5 100644 --- a/src/runtime/message_utils.h +++ b/src/runtime/message_utils.h @@ -19,9 +19,9 @@ #include -#include "runtime/rpc/rpc_stream.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "rpc/rpc_stream.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "thrift_helper.h" #include "utils/binary_reader.h" #include "utils/binary_writer.h" diff --git a/src/runtime/nativerun.cpp b/src/runtime/nativerun.cpp index 93eb9e967f..dbaa880732 100644 --- a/src/runtime/nativerun.cpp +++ b/src/runtime/nativerun.cpp @@ -31,7 +31,7 @@ #include #include "runtime/global_config.h" -#include "runtime/task/task_spec.h" +#include "task/task_spec.h" #include "utils/flags.h" #include "utils/threadpool_spec.h" diff --git a/src/runtime/nativerun.h b/src/runtime/nativerun.h index 2acdae6cca..d57b541699 100644 --- a/src/runtime/nativerun.h +++ b/src/runtime/nativerun.h @@ -43,5 +43,5 @@ class nativerun : public tool_app virtual void run() override; }; -} -} // end namespace dsn::tools +} // namespace tools +} // namespace dsn diff --git a/src/runtime/node_scoper.h b/src/runtime/node_scoper.h index 5b43e2ca5b..d6000911b0 100644 --- a/src/runtime/node_scoper.h +++ b/src/runtime/node_scoper.h @@ -47,5 +47,5 @@ class node_scoper }; // ---- inline implementation ------ -} -} // end namespace dsn::tools +} // namespace tools +} // namespace dsn diff --git a/src/runtime/pipeline.h b/src/runtime/pipeline.h index e4dbcd8d84..4c555c1f77 100644 --- a/src/runtime/pipeline.h +++ b/src/runtime/pipeline.h @@ -20,9 +20,9 @@ #pragma once #include -#include "runtime/task/task_code.h" -#include "runtime/task/task_tracker.h" -#include "runtime/task/async_calls.h" +#include "task/task_code.h" +#include "task/task_tracker.h" +#include "task/async_calls.h" #include "utils/chrono_literals.h" namespace dsn { @@ -106,7 +106,7 @@ struct result // }); // ``` // - void step_down_next_stage(Args &&... args) + void step_down_next_stage(Args &&...args) { CHECK_NOTNULL(__func, "no next stage is linked"); __func(std::make_tuple(std::forward(args)...)); @@ -181,16 +181,14 @@ struct base : environment // link to node of existing pipeline if (next.__pipeline != nullptr) { - this_stage->__func = [next_ptr = &next](ArgsTupleType && args) mutable - { + this_stage->__func = [next_ptr = &next](ArgsTupleType &&args) mutable { absl::apply(&NextStage::async, std::tuple_cat(std::make_tuple(next_ptr), std::move(args))); }; } else { next.__conf = this_stage->__conf; next.__pipeline = this_stage->__pipeline; - this_stage->__func = [next_ptr = &next](ArgsTupleType && args) mutable - { + this_stage->__func = [next_ptr = &next](ArgsTupleType &&args) mutable { if (next_ptr->paused()) { return; } @@ -240,22 +238,23 @@ template struct when : environment { /// Run this stage within current context. - virtual void run(Args &&... in) = 0; + virtual void run(Args &&...in) = 0; - void repeat(Args &&... in, std::chrono::milliseconds delay_ms = 0_ms) + void repeat(Args &&...in, std::chrono::milliseconds delay_ms = 0_ms) { auto arg_tuple = std::make_tuple(this, std::forward(in)...); - schedule([ this, args = std::move(arg_tuple) ]() mutable { - if (paused()) { - return; - } - absl::apply(&when::run, std::move(args)); - }, - delay_ms); + schedule( + [this, args = std::move(arg_tuple)]() mutable { + if (paused()) { + return; + } + absl::apply(&when::run, std::move(args)); + }, + delay_ms); } /// Run this stage asynchronously in its environment. - void async(Args &&... in) { repeat(std::forward(in)...); } + void async(Args &&...in) { repeat(std::forward(in)...); } bool paused() const { return __pipeline->paused(); } @@ -279,9 +278,9 @@ inline void base::run_pipeline() template struct do_when : when { - explicit do_when(std::function &&func) : _cb(std::move(func)) {} + explicit do_when(std::function &&func) : _cb(std::move(func)) {} - void run(Args &&... args) override { _cb(std::forward(args)...); } + void run(Args &&...args) override { _cb(std::forward(args)...); } virtual ~do_when() = default; diff --git a/src/runtime/profiler.cpp b/src/runtime/profiler.cpp index 7571714d89..0c06fc4649 100644 --- a/src/runtime/profiler.cpp +++ b/src/runtime/profiler.cpp @@ -49,23 +49,22 @@ START<== queue(server) == ENQUEUE <===== net(reply) ======= REPLY <============= */ #include "runtime/profiler.h" -#include #include #include #include #include #include +#include #include -#include "absl/strings/string_view.h" #include "aio/aio_task.h" #include "fmt/core.h" #include "profiler_header.h" +#include "rpc/rpc_message.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/config_api.h" #include "utils/extensible_object.h" #include "utils/flags.h" @@ -370,7 +369,7 @@ metric_entity_ptr instantiate_profiler_metric_entity(const std::string &task_nam task_spec_profiler::task_spec_profiler(int code) : collect_call_count(false), is_profile(false), - call_counts(new std::atomic[ s_task_code_max + 1 ]), + call_counts(new std::atomic[s_task_code_max + 1]), _task_name(dsn::task_code(code).to_string()), _profiler_metric_entity(instantiate_profiler_metric_entity(_task_name)) { diff --git a/src/runtime/providers.common.cpp b/src/runtime/providers.common.cpp index 17a8b195d7..e449782e7f 100644 --- a/src/runtime/providers.common.cpp +++ b/src/runtime/providers.common.cpp @@ -26,18 +26,18 @@ #include +#include "rpc/asio_net_provider.h" +#include "rpc/dsn_message_parser.h" +#include "rpc/network.sim.h" +#include "rpc/raw_message_parser.h" +#include "rpc/thrift_message_parser.h" #include "runtime/env_provider.h" #include "runtime/providers.common.h" -#include "runtime/rpc/asio_net_provider.h" -#include "runtime/rpc/dsn_message_parser.h" -#include "runtime/rpc/network.sim.h" -#include "runtime/rpc/raw_message_parser.h" -#include "runtime/rpc/thrift_message_parser.h" -#include "runtime/task/hpc_task_queue.h" -#include "runtime/task/simple_task_queue.h" -#include "runtime/task/task_spec.h" -#include "runtime/task/task_worker.h" #include "runtime/tool_api.h" +#include "task/hpc_task_queue.h" +#include "task/simple_task_queue.h" +#include "task/task_spec.h" +#include "task/task_worker.h" #include "utils/flags.h" #include "utils/lockp.std.h" #include "utils/zlock_provider.h" diff --git a/src/runtime/providers.common.h b/src/runtime/providers.common.h index 08190ae037..da2a3c36ec 100644 --- a/src/runtime/providers.common.h +++ b/src/runtime/providers.common.h @@ -30,4 +30,4 @@ namespace dsn { namespace tools { extern void register_common_providers(); } -} +} // namespace dsn diff --git a/src/runtime/rpc/CMakeLists.txt b/src/runtime/rpc/CMakeLists.txt deleted file mode 100644 index 08de0852f6..0000000000 --- a/src/runtime/rpc/CMakeLists.txt +++ /dev/null @@ -1,45 +0,0 @@ -# The MIT License (MIT) -# -# Copyright (c) 2015 Microsoft Corporation -# -# -=- Robust Distributed System Nucleus (rDSN) -=- -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. - -set(MY_PROJ_NAME dsn.rpc) - -# Search mode for source files under CURRENT project directory? -# "GLOB_RECURSE" for recursive search -# "GLOB" for non-recursive search -set(MY_SRC_SEARCH_MODE "GLOB") - -set(MY_PROJ_LIBS "") - -# Extra files that will be installed -set(MY_BINPLACES "") - -thrift_generate_cpp( - REQUEST_META_THRIFT_SRCS - REQUEST_META_THRIFT_HDRS - ${CMAKE_CURRENT_SOURCE_DIR}/request_meta.thrift -) - -set(MY_PROJ_SRC ${REQUEST_META_THRIFT_SRCS}) - -dsn_add_object() diff --git a/src/runtime/rpc/dsn_message_parser.h b/src/runtime/rpc/dsn_message_parser.h deleted file mode 100644 index bdc16672de..0000000000 --- a/src/runtime/rpc/dsn_message_parser.h +++ /dev/null @@ -1,58 +0,0 @@ -/* -* The MIT License (MIT) -* -* Copyright (c) 2015 Microsoft Corporation -* -* -=- Robust Distributed System Nucleus (rDSN) -=- -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in -* all copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -* THE SOFTWARE. -*/ - -#pragma once - -#include "runtime/rpc/message_parser.h" - -namespace dsn { -class message_ex; - -// Message parser for browser-generated http request. -class dsn_message_parser : public message_parser -{ -public: - dsn_message_parser() : _header_checked(false) {} - virtual ~dsn_message_parser() {} - - virtual void reset() override; - - virtual message_ex *get_message_on_receive(message_reader *reader, - /*out*/ int &read_next) override; - - virtual void prepare_on_send(message_ex *msg) override; - - virtual int get_buffers_on_send(message_ex *msg, /*out*/ send_buf *buffers) override; - -private: - static bool is_right_header(char *hdr); - - static bool is_right_body(message_ex *msg); - -private: - bool _header_checked; -}; -} diff --git a/src/runtime/rpc/raw_message_parser.h b/src/runtime/rpc/raw_message_parser.h deleted file mode 100644 index 1b39a41768..0000000000 --- a/src/runtime/rpc/raw_message_parser.h +++ /dev/null @@ -1,55 +0,0 @@ -/* -* The MIT License (MIT) -* -* Copyright (c) 2015 Microsoft Corporation -* -* -=- Robust Distributed System Nucleus (rDSN) -=- -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in -* all copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -* THE SOFTWARE. -*/ - -#ifndef RAW_MESSAGE_PARSER_H -#define RAW_MESSAGE_PARSER_H - -#include "runtime/rpc/message_parser.h" -#include "runtime/task/task_spec.h" -#include "utils/customizable_id.h" - -namespace dsn { -class message_ex; - -DEFINE_CUSTOMIZED_ID(network_header_format, NET_HDR_RAW) - -class rpc_session; - -// Message parser for user customed request. -class raw_message_parser : public message_parser -{ -private: - static void notify_rpc_session_disconnected(rpc_session *sp); - -public: - raw_message_parser(); - virtual ~raw_message_parser() {} - virtual message_ex *get_message_on_receive(message_reader *reader, - /*out*/ int &read_next) override; - virtual int get_buffers_on_send(message_ex *msg, /*out*/ send_buf *buffers) override; -}; -} -#endif // RAW_MESSAGE_PARSER_H diff --git a/src/runtime/scheduler.cpp b/src/runtime/scheduler.cpp index cfb6051880..db9c79b2e7 100644 --- a/src/runtime/scheduler.cpp +++ b/src/runtime/scheduler.cpp @@ -35,9 +35,9 @@ #include "runtime/node_scoper.h" #include "runtime/service_app.h" #include "runtime/simulator.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_queue.h" -#include "runtime/task/task_spec.h" +#include "task/task_code.h" +#include "task/task_queue.h" +#include "task/task_spec.h" #include "scheduler.h" #include "utils/fmt_logging.h" #include "utils/join_point.h" @@ -297,5 +297,5 @@ void scheduler::schedule() _is_scheduling = false; } -} -} // end namespace +} // namespace tools +} // namespace dsn diff --git a/src/runtime/scheduler.h b/src/runtime/scheduler.h index 422690f2a9..7505522083 100644 --- a/src/runtime/scheduler.h +++ b/src/runtime/scheduler.h @@ -35,8 +35,8 @@ #include #include "runtime/simulator.h" -#include "runtime/task/task.h" -#include "runtime/task/task_worker.h" +#include "task/task.h" +#include "task/task_worker.h" #include "utils/extensible_object.h" #include "utils/singleton.h" #include "utils/synchronize.h" diff --git a/src/runtime/serverlet.h b/src/runtime/serverlet.h index 347d5f384a..e5a7df0b38 100644 --- a/src/runtime/serverlet.h +++ b/src/runtime/serverlet.h @@ -27,8 +27,8 @@ #pragma once #include "runtime/service_app.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/serialization.h" +#include "rpc/rpc_holder.h" +#include "rpc/serialization.h" namespace dsn { /*! @@ -247,4 +247,4 @@ inline void serverlet::reply(dsn::message_ex *request, const TResponse &resp) dsn_rpc_reply(msg); } /*@}*/ -} // end namespace +} // namespace dsn diff --git a/src/runtime/service_api_c.cpp b/src/runtime/service_api_c.cpp index 44fbd7096b..ecf2aa564c 100644 --- a/src/runtime/service_api_c.cpp +++ b/src/runtime/service_api_c.cpp @@ -44,25 +44,27 @@ #include #endif +#include "fmt/core.h" +#include "fmt/format.h" #include "perf_counter/perf_counters.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_engine.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_message.h" #include "runtime/api_layer1.h" #include "runtime/api_task.h" #include "runtime/app_model.h" #include "runtime/global_config.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_engine.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/rpc/rpc_message.h" -#include "security/init.h" -#include "security/negotiation_manager.h" #include "runtime/service_app.h" #include "runtime/service_engine.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_engine.h" -#include "runtime/task/task_spec.h" -#include "runtime/task/task_worker.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_engine.h" +#include "task/task_spec.h" +#include "task/task_worker.h" #include "runtime/tool_api.h" +#include "security/init.h" +#include "security/negotiation_manager.h" #include "utils/api_utilities.h" #include "utils/command_manager.h" #include "utils/config_api.h" @@ -75,6 +77,7 @@ #include "utils/join_point.h" #include "utils/logging_provider.h" #include "utils/process_utils.h" +#include "utils/string_conv.h" #include "utils/strings.h" #include "utils/sys_exit_hook.h" #include "utils/threadpool_spec.h" @@ -162,7 +165,7 @@ void dsn_rpc_call(dsn::rpc_address server, dsn::rpc_response_task *rpc_call) auto msg = rpc_call->get_request(); msg->server_address = server; - msg->server_host_port = dsn::host_port::from_address(server); + msg->server_host_port = dsn::host_port::from_address(msg->server_address); ::dsn::task::get_current_rpc()->call(msg, dsn::rpc_response_task_ptr(rpc_call)); } @@ -170,7 +173,7 @@ dsn::message_ex *dsn_rpc_call_wait(dsn::rpc_address server, dsn::message_ex *req { auto msg = ((::dsn::message_ex *)request); msg->server_address = server; - msg->server_host_port = dsn::host_port::from_address(server); + msg->server_host_port = dsn::host_port::from_address(msg->server_address); ::dsn::rpc_response_task *rtask = new ::dsn::rpc_response_task(msg, nullptr, 0); rtask->add_ref(); @@ -342,45 +345,29 @@ inline void dsn_global_init() static std::string dsn_log_prefixed_message_func() { - std::string res; - res.resize(100); - char *prefixed_message = const_cast(res.c_str()); - - int tid = dsn::utils::get_current_tid(); - auto t = dsn::task::get_current_task_id(); + const int tid = dsn::utils::get_current_tid(); + const auto t = dsn::task::get_current_task_id(); if (t) { if (nullptr != dsn::task::get_current_worker2()) { - sprintf(prefixed_message, - "%6s.%7s%d.%016" PRIx64 ": ", - dsn::task::get_current_node_name(), - dsn::task::get_current_worker2()->pool_spec().name.c_str(), - dsn::task::get_current_worker2()->index(), - t); + return fmt::format("{}.{}{}.{:016}: ", + dsn::task::get_current_node_name(), + dsn::task::get_current_worker2()->pool_spec().name, + dsn::task::get_current_worker2()->index(), + t); } else { - sprintf(prefixed_message, - "%6s.%7s.%05d.%016" PRIx64 ": ", - dsn::task::get_current_node_name(), - "io-thrd", - tid, - t); + return fmt::format( + "{}.io-thrd.{}.{:016}: ", dsn::task::get_current_node_name(), tid, t); } } else { if (nullptr != dsn::task::get_current_worker2()) { - sprintf(prefixed_message, - "%6s.%7s%u: ", - dsn::task::get_current_node_name(), - dsn::task::get_current_worker2()->pool_spec().name.c_str(), - dsn::task::get_current_worker2()->index()); + return fmt::format("{}.{}{}: ", + dsn::task::get_current_node_name(), + dsn::task::get_current_worker2()->pool_spec().name, + dsn::task::get_current_worker2()->index()); } else { - sprintf(prefixed_message, - "%6s.%7s.%05d: ", - dsn::task::get_current_node_name(), - "io-thrd", - tid); + return fmt::format("{}.io-thrd.{}: ", dsn::task::get_current_node_name(), tid); } } - - return res; } bool run(const char *config_file, @@ -443,18 +430,27 @@ bool run(const char *config_file, ::dsn::utils::coredump::init(); - // setup log dir - spec.dir_log = ::dsn::utils::filesystem::path_combine(cdir, "log"); - dsn::utils::filesystem::create_directory(spec.dir_log); + // Setup log directory. + // If log_dir is not set, use data_dir/log instead. + if (spec.log_dir.empty()) { + spec.log_dir = ::dsn::utils::filesystem::path_combine(spec.data_dir, "log"); + fmt::print(stdout, "log_dir is not set, use '{}' instead\n", spec.log_dir); + } + // Validate log_dir. + if (!dsn::utils::filesystem::is_absolute_path(spec.log_dir)) { + fmt::print(stderr, "log_dir({}) should be set with an absolute path\n", spec.log_dir); + return false; + } + dsn::utils::filesystem::create_directory(spec.log_dir); - // init tools + // Initialize tools. dsn_all.tool.reset(::dsn::utils::factory_store<::dsn::tools::tool_app>::create( spec.tool.c_str(), ::dsn::PROVIDER_TYPE_MAIN, spec.tool.c_str())); dsn_all.tool->install(spec); - // init app specs + // Initialize app specs. if (!spec.init_app_specs()) { - printf("error in config file %s, exit ...\n", config_file); + fmt::print(stderr, "error in config file {}, exit ...\n", config_file); return false; } @@ -462,10 +458,27 @@ bool run(const char *config_file, ::MallocExtension::instance()->SetMemoryReleaseRate(FLAGS_tcmalloc_release_rate); #endif - // init logging - dsn_log_init(spec.logging_factory_name, spec.dir_log, dsn_log_prefixed_message_func); + // Extract app_names. + std::list app_names_and_indexes; + ::dsn::utils::split_args(app_list.c_str(), app_names_and_indexes, ';'); + std::vector app_names; + for (const auto &app_name_and_index : app_names_and_indexes) { + std::vector name_and_index; + ::dsn::utils::split_args(app_name_and_index.c_str(), name_and_index, '@'); + if (name_and_index.empty()) { + fmt::print(stderr, "app_name should be specified in '{}'", app_name_and_index); + return false; + } + app_names.push_back(name_and_index[0]); + } - // prepare minimum necessary + // Initialize logging. + dsn_log_init(spec.logging_factory_name, + spec.log_dir, + fmt::format("{}", fmt::join(app_names, ".")), + dsn_log_prefixed_message_func); + + // Prepare the minimum necessary. ::dsn::service_engine::instance().init_before_toollets(spec); LOG_INFO("process({}) start: {}, date: {}", @@ -473,10 +486,10 @@ bool run(const char *config_file, dsn::utils::process_start_millis(), dsn::utils::process_start_date_time_mills()); - // init toollets - for (auto it = spec.toollets.begin(); it != spec.toollets.end(); ++it) { - auto tlet = - dsn::tools::internal_use_only::get_toollet(it->c_str(), ::dsn::PROVIDER_TYPE_MAIN); + // Initialize toollets. + for (const auto &toollet_name : spec.toollets) { + auto tlet = dsn::tools::internal_use_only::get_toollet(toollet_name.c_str(), + ::dsn::PROVIDER_TYPE_MAIN); CHECK_NOTNULL(tlet, "toolet not found"); tlet->install(spec); } @@ -507,29 +520,34 @@ bool run(const char *config_file, } } - // split app_name and app_index - std::list applistkvs; - ::dsn::utils::split_args(app_list.c_str(), applistkvs, ';'); - // init apps for (auto &sp : spec.app_specs) { - if (!sp.run) + if (!sp.run) { continue; + } bool create_it = false; - // create all apps - if (app_list == "") { + if (app_list.empty()) { create_it = true; } else { - for (auto &kv : applistkvs) { - std::list argskvs; - ::dsn::utils::split_args(kv.c_str(), argskvs, '@'); - if (std::string("apps.") + argskvs.front() == sp.config_section) { - if (argskvs.size() < 2) + for (const auto &app_name_and_index : app_names_and_indexes) { + std::vector name_and_index; + ::dsn::utils::split_args(app_name_and_index.c_str(), name_and_index, '@'); + CHECK(!name_and_index.empty(), + "app_name should be specified in '{}'", + app_name_and_index); + if (std::string("apps.") + name_and_index.front() == sp.config_section) { + if (name_and_index.size() < 2) { create_it = true; - else - create_it = (std::stoi(argskvs.back()) == sp.index); + } else { + int32_t index = 0; + const auto index_str = name_and_index.back(); + CHECK(dsn::buf2int32(index_str, index), + "'{}' is not a valid index", + index_str); + create_it = (index == sp.index); + } break; } } @@ -540,10 +558,11 @@ bool run(const char *config_file, } } - if (dsn::service_engine::instance().get_all_nodes().size() == 0) { - printf("no app are created, usually because \n" - "app_name is not specified correctly, should be 'xxx' in [apps.xxx]\n" - "or app_index (1-based) is greater than specified count in config file\n"); + if (dsn::service_engine::instance().get_all_nodes().empty()) { + fmt::print(stderr, + "no app are created, usually because \n" + "app_name is not specified correctly, should be 'xxx' in [apps.xxx]\n" + "or app_index (1-based) is greater than specified count in config file\n"); exit(1); } diff --git a/src/runtime/service_app.h b/src/runtime/service_app.h index 71e23fc6d7..81724409ea 100644 --- a/src/runtime/service_app.h +++ b/src/runtime/service_app.h @@ -32,9 +32,10 @@ #include "utils/api_utilities.h" #include "utils/error_code.h" #include "utils/threadpool_code.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "common/gpid.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_address.h" #include "common/gpid.h" #include "utils/factory_store.h" #include diff --git a/src/runtime/service_engine.cpp b/src/runtime/service_engine.cpp index dee8a53342..0012951186 100644 --- a/src/runtime/service_engine.cpp +++ b/src/runtime/service_engine.cpp @@ -35,12 +35,12 @@ #include "common/gpid.h" #include "fmt/core.h" #include "nlohmann/json.hpp" +#include "rpc/rpc_engine.h" +#include "rpc/rpc_message.h" #include "runtime/node_scoper.h" -#include "runtime/rpc/rpc_engine.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task.h" -#include "runtime/task/task_engine.h" -#include "runtime/task/task_spec.h" +#include "task/task.h" +#include "task/task_engine.h" +#include "task/task_spec.h" #include "utils/command_manager.h" #include "utils/factory_store.h" #include "utils/filesystem.h" diff --git a/src/runtime/service_engine.h b/src/runtime/service_engine.h index f2c7aea76e..77eae41431 100644 --- a/src/runtime/service_engine.h +++ b/src/runtime/service_engine.h @@ -35,7 +35,7 @@ #include "runtime/api_task.h" #include "runtime/global_config.h" #include "runtime/service_app.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "utils/error_code.h" #include "utils/singleton.h" diff --git a/src/runtime/simulator.cpp b/src/runtime/simulator.cpp index ab1225c171..51aad7dcaf 100644 --- a/src/runtime/simulator.cpp +++ b/src/runtime/simulator.cpp @@ -30,8 +30,8 @@ #include "env.sim.h" #include "runtime/global_config.h" -#include "runtime/task/task_engine.sim.h" -#include "runtime/task/task_spec.h" +#include "task/task_engine.sim.h" +#include "task/task_spec.h" #include "scheduler.h" #include "service_engine.h" #include "sim_clock.h" diff --git a/src/runtime/simulator.h b/src/runtime/simulator.h index d48f59c5e1..0ca2e5eb68 100644 --- a/src/runtime/simulator.h +++ b/src/runtime/simulator.h @@ -54,6 +54,7 @@ class checker virtual void initialize(const std::string &name, const std::vector &apps) = 0; virtual void check() = 0; const std::string &name() const { return _name; } + protected: std::vector _apps; std::string _name; @@ -72,5 +73,5 @@ class simulator : public tool_app }; // ---- inline implementation ------ -} -} // end namespace dsn::tools +} // namespace tools +} // namespace dsn diff --git a/src/runtime/test/CMakeLists.txt b/src/runtime/test/CMakeLists.txt index 83e355c449..92316efd39 100644 --- a/src/runtime/test/CMakeLists.txt +++ b/src/runtime/test/CMakeLists.txt @@ -36,11 +36,9 @@ set(MY_PROJ_LIBS gtest) set(MY_BOOST_LIBS Boost::system Boost::filesystem) set(MY_BINPLACES - config-test-corrupt-message.ini config-test.ini config-test-sim.ini command.txt run.sh - clear.sh - gtest.filter) + clear.sh) dsn_add_test() diff --git a/src/runtime/test/config-test-corrupt-message.ini b/src/runtime/test/config-test-corrupt-message.ini deleted file mode 100644 index 3ecf76ff7f..0000000000 --- a/src/runtime/test/config-test-corrupt-message.ini +++ /dev/null @@ -1,141 +0,0 @@ -; The MIT License (MIT) -; -; Copyright (c) 2015 Microsoft Corporation -; -; -=- Robust Distributed System Nucleus (rDSN) -=- -; -; Permission is hereby granted, free of charge, to any person obtaining a copy -; of this software and associated documentation files (the "Software"), to deal -; in the Software without restriction, including without limitation the rights -; to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -; copies of the Software, and to permit persons to whom the Software is -; furnished to do so, subject to the following conditions: -; -; The above copyright notice and this permission notice shall be included in -; all copies or substantial portions of the Software. -; -; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -; IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -; FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -; AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -; LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -; OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -; THE SOFTWARE. - -[apps..default] -run = true -count = 1 -network.client.RPC_CHANNEL_TCP = dsn::tools::asio_network_provider, 65536 -network.client.RPC_CHANNEL_UDP = dsn::tools::asio_udp_provider, 65536 -network.server.0.RPC_CHANNEL_TCP = dsn::tools::asio_network_provider, 65536 -network.server.0.RPC_CHANNEL_UDP = dsn::tools::asio_udp_provider, 65536 - -[apps.client] -type = test -arguments = localhost 20101 -run = true -ports = 20001 -count = 1 -delay_seconds = 1 -pools = THREAD_POOL_DEFAULT, THREAD_POOL_TEST_SERVER, THREAD_POOL_FOR_TEST_1, THREAD_POOL_FOR_TEST_2 - -[apps.server] -type = test -arguments = -ports = 20101,20102 -run = true -count = 1 -pools = THREAD_POOL_DEFAULT, THREAD_POOL_TEST_SERVER -network.client.RPC_CHANNEL_TCP = dsn::tools::asio_network_provider,65536 -network.server.20101.RPC_CHANNEL_TCP = dsn::tools::asio_network_provider,65536 -network.server.20102.RPC_CHANNEL_TCP = dsn::tools::asio_network_provider,65536 -network.server.20103.RPC_CHANNEL_TCP = dsn::tools::asio_network_provider,65536 - -[core] -;tool = simulator -tool = nativerun - -toollets = fault_injector -pause_on_start = false - -logging_start_level = LOG_LEVEL_DEBUG -logging_factory_name = dsn::tools::simple_logger - - - - -[tools.simple_logger] -fast_flush = true -short_header = false -stderr_start_level = LOG_LEVEL_FATAL - -[tools.simulator] -random_seed = 0 - -[network] -; how many network threads for network library (used by asio) -io_service_worker_count = 2 - -[task..default] -is_trace = true -is_profile = true -allow_inline = false -rpc_call_channel = RPC_CHANNEL_TCP -rpc_message_header_format = dsn -rpc_timeout_milliseconds = 1000 - -[task.RPC_TEST_HASH1] -is_trace = true -rpc_message_crc_required = true -rpc_request_drop_ratio = 0 -rpc_timeout_milliseconds = 1000 -rpc_request_data_corrupted_ratio = 1 -rpc_message_data_corrupted_type = header - -[task.RPC_TEST_HASH2] -is_trace = true -rpc_message_crc_required = true -rpc_request_drop_ratio = 0 -rpc_timeout_milliseconds = 1000 -rpc_request_data_corrupted_ratio = 1 -rpc_message_data_corrupted_type = body - -[task.RPC_TEST_HASH3_ACK] -is_trace = true -rpc_message_crc_required = true -rpc_response_drop_ratio = 0 -rpc_timeout_milliseconds = 1000 -rpc_response_data_corrupted_ratio = 1 -rpc_message_data_corrupted_type = header - -[task.RPC_TEST_HASH4_ACK] -is_trace = true -rpc_message_crc_required = true -rpc_response_drop_ratio = 0 -rpc_timeout_milliseconds = 1000 -rpc_response_data_corrupted_ratio = 1 -rpc_message_data_corrupted_type = body - -[task.LPC_AIO_IMMEDIATE_CALLBACK] -is_trace = false -is_profile = false -allow_inline = false - -[task.LPC_RPC_TIMEOUT] -is_trace = false -is_profile = false - -[task.RPC_TEST_UDP] -rpc_call_channel = RPC_CHANNEL_UDP -rpc_message_crc_required = true - -; specification for each thread pool -[threadpool..default] -worker_count = 2 - -[threadpool.THREAD_POOL_DEFAULT] -partitioned = false -worker_priority = THREAD_xPRIORITY_NORMAL - -[threadpool.THREAD_POOL_TEST_SERVER] -partitioned = false diff --git a/src/runtime/test/config-test-sim.ini b/src/runtime/test/config-test-sim.ini index b75516ee5f..6c54eba106 100644 --- a/src/runtime/test/config-test-sim.ini +++ b/src/runtime/test/config-test-sim.ini @@ -36,7 +36,7 @@ run = true ports = count = 1 delay_seconds = 1 -pools = THREAD_POOL_DEFAULT, THREAD_POOL_TEST_SERVER, THREAD_POOL_FOR_TEST_1, THREAD_POOL_FOR_TEST_2 +pools = THREAD_POOL_DEFAULT, THREAD_POOL_TEST_SERVER [apps.server] type = test @@ -106,14 +106,6 @@ worker_priority = THREAD_xPRIORITY_NORMAL [threadpool.THREAD_POOL_TEST_SERVER] partitioned = false -[threadpool.THREAD_POOL_FOR_TEST_1] -worker_count = 2 -partitioned = false - -[threadpool.THREAD_POOL_FOR_TEST_2] -worker_count = 2 -partitioned = true - [core.test] count = 1 run = true diff --git a/src/runtime/test/config-test.ini b/src/runtime/test/config-test.ini index ee6f5c5d7d..46681e369a 100644 --- a/src/runtime/test/config-test.ini +++ b/src/runtime/test/config-test.ini @@ -37,7 +37,7 @@ run = true ports = 20001 count = 1 delay_seconds = 1 -pools = THREAD_POOL_DEFAULT, THREAD_POOL_TEST_SERVER, THREAD_POOL_FOR_TEST_1, THREAD_POOL_FOR_TEST_2 +pools = THREAD_POOL_DEFAULT, THREAD_POOL_TEST_SERVER [apps.server] type = test @@ -124,20 +124,6 @@ worker_priority = THREAD_xPRIORITY_NORMAL [threadpool.THREAD_POOL_TEST_SERVER] partitioned = false -[threadpool.THREAD_POOL_FOR_TEST_1] -worker_count = 2 -worker_priority = THREAD_xPRIORITY_HIGHEST -worker_share_core = false -worker_affinity_mask = 1 -partitioned = false - -[threadpool.THREAD_POOL_FOR_TEST_2] -worker_count = 2 -worker_priority = THREAD_xPRIORITY_NORMAL -worker_share_core = true -worker_affinity_mask = 1 -partitioned = true - [components.simple_perf_counter] counter_computation_interval_seconds = 1 diff --git a/src/runtime/test/dns_resolver_test.cpp b/src/runtime/test/dns_resolver_test.cpp index 86a0cc7e17..4e4ec57b2b 100644 --- a/src/runtime/test/dns_resolver_test.cpp +++ b/src/runtime/test/dns_resolver_test.cpp @@ -21,11 +21,11 @@ #include #include "gtest/gtest.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/group_address.h" -#include "runtime/rpc/group_host_port.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/dns_resolver.h" +#include "rpc/group_address.h" +#include "rpc/group_host_port.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" namespace dsn { diff --git a/src/runtime/test/gtest.filter b/src/runtime/test/gtest.filter deleted file mode 100644 index 2d19ecb8aa..0000000000 --- a/src/runtime/test/gtest.filter +++ /dev/null @@ -1,3 +0,0 @@ -config-test.ini -core.corrupt_message:core.aio*:core.operation_failed:tools_hpc.* -config-test-sim.ini -core.corrupt_message:core.aio*:core.operation_failed:tools_hpc.*:tools_simulator.*:task_test.signal_finished_task -config-test-sim.ini tools_simulator.* diff --git a/src/runtime/test/pipeline_test.cpp b/src/runtime/test/pipeline_test.cpp index 444c7f2684..4158b3b596 100644 --- a/src/runtime/test/pipeline_test.cpp +++ b/src/runtime/test/pipeline_test.cpp @@ -30,7 +30,7 @@ #include "common/replication.codes.h" #include "gtest/gtest.h" #include "runtime/pipeline.h" -#include "runtime/task/task_tracker.h" +#include "task/task_tracker.h" #include "utils/chrono_literals.h" namespace dsn { diff --git a/src/runtime/test/run.sh b/src/runtime/test/run.sh index 648ccb41b5..96d0c1d45e 100755 --- a/src/runtime/test/run.sh +++ b/src/runtime/test/run.sh @@ -28,21 +28,20 @@ if [ -z "${REPORT_DIR}" ]; then REPORT_DIR="." fi -while read -r -a line; do - test_case=${line[0]} - gtest_filter=${line[1]} +test_cases=(config-test.ini config-test-sim.ini) +for test_case in ${test_cases[*]}; do output_xml="${REPORT_DIR}/dsn_runtime_tests_${test_case/.ini/.xml}" - echo "============ run dsn_runtime_tests ${test_case} with gtest_filter ${gtest_filter} ============" + echo "============ run dsn_runtime_tests ${test_case} ============" ./clear.sh - GTEST_OUTPUT="xml:${output_xml}" GTEST_FILTER=${gtest_filter} ./dsn_runtime_tests ${test_case} < command.txt + GTEST_OUTPUT="xml:${output_xml}" ./dsn_runtime_tests ${test_case} < command.txt if [ $? -ne 0 ]; then echo "run dsn_runtime_tests $test_case failed" echo "---- ls ----" ls -l - if find . -name log.1.txt; then - echo "---- tail -n 100 log.1.txt ----" - tail -n 100 `find . -name log.1.txt` + if [ `find . -name pegasus.log.* | wc -l` -ne 0 ]; then + echo "---- tail -n 100 pegasus.log.* ----" + tail -n 100 `find . -name pegasus.log.*` fi if [ -f core ]; then echo "---- gdb ./dsn_runtime_tests core ----" @@ -50,8 +49,8 @@ while read -r -a line; do fi exit 1 fi - echo "============ done dsn_runtime_tests ${test_case} with gtest_filter ${gtest_filter} ============" -done #include +#include "common/gpid.h" #include "gtest/gtest.h" #include "runtime/api_layer1.h" #include "runtime/global_config.h" #include "runtime/service_app.h" #include "runtime/service_engine.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" #include "runtime/tool_api.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/config_api.h" #include "utils/error_code.h" #include "utils/flags.h" @@ -202,8 +203,11 @@ TEST(core, dsn_semaphore) TEST(core, dsn_env) { - if (dsn::service_engine::instance().spec().tool == "simulator") - return; + if (dsn::service_engine::instance().spec().tool == "simulator") { + GTEST_SKIP() << "Skip the test in simulator mode, set 'tool = nativerun' in '[core]' " + "section in config file to enable it."; + } + ASSERT_EQ("nativerun", dsn::service_engine::instance().spec().tool); uint64_t now1 = dsn_now_ns(); std::this_thread::sleep_for(std::chrono::milliseconds(1)); uint64_t now2 = dsn_now_ns(); diff --git a/src/runtime/test/sim_lock.cpp b/src/runtime/test/sim_lock.cpp index 7bde41bf28..d23c0af6a8 100644 --- a/src/runtime/test/sim_lock.cpp +++ b/src/runtime/test/sim_lock.cpp @@ -31,8 +31,8 @@ #include "runtime/global_config.h" #include "runtime/scheduler.h" #include "runtime/service_engine.h" -#include "runtime/task/task.h" -#include "runtime/task/task_engine.sim.h" +#include "task/task.h" +#include "task/task_engine.sim.h" #include "utils/synchronize.h" #include "utils/zlocks.h" @@ -86,13 +86,17 @@ namespace dsn { namespace test { typedef std::function system_callback; } -} +} // namespace dsn TEST(tools_simulator, scheduler) { - if (dsn::task::get_current_worker() == nullptr) - return; - if (dsn::service_engine::instance().spec().tool != "simulator") - return; + if (dsn::task::get_current_worker() == nullptr) { + GTEST_SKIP() << "Skip the test in non-worker thread."; + } + if (dsn::service_engine::instance().spec().tool == "nativerun") { + GTEST_SKIP() << "Skip the test in nativerun mode, set 'tool = simulator' in '[core]' " + "section in config file to enable it."; + } + ASSERT_EQ("simulator", dsn::service_engine::instance().spec().tool); dsn::tools::sim_worker_state *s = dsn::tools::scheduler::task_worker_ext::get(dsn::task::get_current_worker()); diff --git a/src/runtime/test_utils.h b/src/runtime/test_utils.h index 4b54fde386..29df59b28a 100644 --- a/src/runtime/test_utils.h +++ b/src/runtime/test_utils.h @@ -26,24 +26,25 @@ #pragma once +#include +#include +#include "common/gpid.h" +#include "rpc/dns_resolver.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_host_port.h" +#include "rpc/rpc_stream.h" +#include "rpc/serialization.h" +#include "runtime/serverlet.h" +#include "runtime/service_app.h" #include "runtime/api_task.h" #include "runtime/api_layer1.h" #include "runtime/app_model.h" +#include "task/task_code.h" +#include "task/task.h" +#include "task/task_worker.h" #include "utils/api_utilities.h" #include "utils/error_code.h" #include "utils/threadpool_code.h" -#include "runtime/task/task_code.h" -#include "common/gpid.h" -#include "runtime/rpc/dns_resolver.h" -#include "runtime/rpc/serialization.h" -#include "runtime/rpc/rpc_stream.h" -#include "runtime/serverlet.h" -#include "runtime/service_app.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task.h" -#include "runtime/task/task_worker.h" -#include -#include #ifndef TEST_PORT_BEGIN #define TEST_PORT_BEGIN 20201 diff --git a/src/runtime/threadpool_code.cpp b/src/runtime/threadpool_code.cpp index 48a050e228..dcc044306b 100644 --- a/src/runtime/threadpool_code.cpp +++ b/src/runtime/threadpool_code.cpp @@ -50,4 +50,4 @@ const char *threadpool_code::to_string() const { return dsn::utils::customized_id_mgr::instance().get_name(_internal_code); } -} +} // namespace dsn diff --git a/src/runtime/tool_api.cpp b/src/runtime/tool_api.cpp index fa9bff1542..b80a5016f7 100644 --- a/src/runtime/tool_api.cpp +++ b/src/runtime/tool_api.cpp @@ -26,18 +26,17 @@ #include "runtime/tool_api.h" -#include #include #include #include #include #include +#include "rpc/message_parser_manager.h" #include "runtime/global_config.h" -#include "runtime/rpc/message_parser_manager.h" #include "runtime/service_engine.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "task/task.h" +#include "task/task_code.h" #include "utils/error_code.h" #include "utils/factory_store.h" #include "utils/fmt_logging.h" diff --git a/src/runtime/tool_api.h b/src/runtime/tool_api.h index c9e145a435..20c87e2cdd 100644 --- a/src/runtime/tool_api.h +++ b/src/runtime/tool_api.h @@ -42,14 +42,13 @@ Component providers define the interface for the local components (e.g., network #include #include +#include "rpc/message_parser.h" +#include "rpc/network.h" #include "runtime/env_provider.h" -#include "runtime/rpc/message_parser.h" -#include "runtime/rpc/network.h" -#include "runtime/task/task_queue.h" -#include "runtime/task/task_spec.h" -#include "runtime/task/task_worker.h" -#include "runtime/task/timer_service.h" -// providers +#include "task/task_queue.h" +#include "task/task_spec.h" +#include "task/task_worker.h" +#include "task/timer_service.h" #include "utils/factory_store.h" #include "utils/join_point.h" #include "utils/logging_provider.h" // IWYU pragma: keep diff --git a/src/runtime/tracer.cpp b/src/runtime/tracer.cpp index dd76006f0f..364aa88c36 100644 --- a/src/runtime/tracer.cpp +++ b/src/runtime/tracer.cpp @@ -36,11 +36,11 @@ #include "aio/aio_task.h" #include "fmt/core.h" #include "fmt/format.h" +#include "rpc/rpc_message.h" #include "runtime/global_config.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/command_manager.h" #include "utils/config_api.h" #include "utils/enum_helper.h" @@ -408,7 +408,7 @@ void tracer::install(service_spec &spec) "tracer.find", "Find related logs", "[forward|f|backward|b] [rpc|r|task|t] [trace_id|task_id(e.g., a023003920302390)] " - "", + "", tracer_log_flow); }); } diff --git a/src/sample/run.sh b/src/sample/run.sh index 646446ef83..5da1a91689 100755 --- a/src/sample/run.sh +++ b/src/sample/run.sh @@ -16,5 +16,25 @@ # specific language governing permissions and limitations # under the License. -export LD_LIBRARY_PATH=`pwd`/../../../../../thirdparty/output/lib:`pwd`/../../lib:/usr/lib/jvm/java-1.8.0-openjdk/jre/lib/amd64/server +if [ ! -d "$PEGASUS_THIRDPARTY_ROOT" ]; then + echo "ERROR: PEGASUS_THIRDPARTY_ROOT not set" + exit 1 +fi + +if [ ! -d "$JAVA_HOME" ]; then + echo "ERROR: JAVA_HOME not set" + exit 1 +fi + +ARCH_TYPE='' +arch_output=$(arch) +if [ "$arch_output"x == "x86_64"x ]; then + ARCH_TYPE="amd64" +elif [ "$arch_output"x == "aarch64"x ]; then + ARCH_TYPE="aarch64" +else + echo "WARNING: unsupported CPU architecture '$arch_output', use 'x86_64' as default" +fi +export LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/${ARCH_TYPE}:${JAVA_HOME}/jre/lib/${ARCH_TYPE}/server:${PEGASUS_THIRDPARTY_ROOT}/output/lib:$(pwd)/../../lib:${LD_LIBRARY_PATH} + ./sample onebox temp diff --git a/src/security/client_negotiation.cpp b/src/security/client_negotiation.cpp index 68a954baa2..c8b6b5ba27 100644 --- a/src/security/client_negotiation.cpp +++ b/src/security/client_negotiation.cpp @@ -26,8 +26,8 @@ #include "fmt/format.h" #include "negotiation_manager.h" #include "negotiation_utils.h" -#include "runtime/rpc/network.h" -#include "runtime/rpc/rpc_address.h" +#include "rpc/network.h" +#include "rpc/rpc_address.h" #include "security/negotiation.h" #include "security/sasl_wrapper.h" #include "utils/autoref_ptr.h" diff --git a/src/security/client_negotiation.h b/src/security/client_negotiation.h index 990a525fc6..604cab8045 100644 --- a/src/security/client_negotiation.h +++ b/src/security/client_negotiation.h @@ -20,7 +20,7 @@ #include #include "negotiation.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/rpc_message.h" #include "security_types.h" #include "utils/blob.h" diff --git a/src/security/meta_access_controller.cpp b/src/security/meta_access_controller.cpp index 204e34cb12..7e4fea9108 100644 --- a/src/security/meta_access_controller.cpp +++ b/src/security/meta_access_controller.cpp @@ -19,11 +19,11 @@ #include -#include "ranger/ranger_resource_policy_manager.h" #include "ranger/ranger_resource_policy.h" -#include "runtime/rpc/network.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task_code.h" +#include "ranger/ranger_resource_policy_manager.h" +#include "rpc/network.h" +#include "rpc/rpc_message.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/flags.h" #include "utils/fmt_logging.h" diff --git a/src/security/negotiation.h b/src/security/negotiation.h index c9098a2491..235d8d42b8 100644 --- a/src/security/negotiation.h +++ b/src/security/negotiation.h @@ -22,9 +22,9 @@ #include #include -#include "runtime/rpc/network.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/network.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_message.h" #include "sasl_wrapper.h" #include "security_types.h" #include "utils/autoref_ptr.h" diff --git a/src/security/negotiation_manager.cpp b/src/security/negotiation_manager.cpp index 22a4098f52..0737ff8bc9 100644 --- a/src/security/negotiation_manager.cpp +++ b/src/security/negotiation_manager.cpp @@ -23,11 +23,11 @@ #include "failure_detector/fd.code.definition.h" #include "http/http_server.h" #include "negotiation_utils.h" -#include "runtime/rpc/network.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task_code.h" +#include "rpc/network.h" +#include "rpc/rpc_message.h" #include "security_types.h" #include "server_negotiation.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/flags.h" diff --git a/src/security/replica_access_controller.cpp b/src/security/replica_access_controller.cpp index 0f480c77b4..6d19b213d7 100644 --- a/src/security/replica_access_controller.cpp +++ b/src/security/replica_access_controller.cpp @@ -28,8 +28,8 @@ #pragma GCC diagnostic pop #include "replica_access_controller.h" -#include "runtime/rpc/network.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/network.h" +#include "rpc/rpc_message.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/flags.h" diff --git a/src/security/sasl_client_wrapper.cpp b/src/security/sasl_client_wrapper.cpp index 50296826da..acdce301e0 100644 --- a/src/security/sasl_client_wrapper.cpp +++ b/src/security/sasl_client_wrapper.cpp @@ -23,7 +23,7 @@ #include "utils/error_code.h" #include "utils/fail_point.h" #include "utils/flags.h" -#include "absl/strings/string_view.h" +#include DSN_DECLARE_string(service_fqdn); DSN_DECLARE_string(service_name); @@ -33,7 +33,7 @@ namespace security { error_s sasl_client_wrapper::init() { - FAIL_POINT_INJECT_F("sasl_client_wrapper_init", [](absl::string_view str) { + FAIL_POINT_INJECT_F("sasl_client_wrapper_init", [](std::string_view str) { error_code err = error_code::try_get(str.data(), ERR_UNKNOWN); return error_s::make(err); }); @@ -45,7 +45,7 @@ error_s sasl_client_wrapper::init() error_s sasl_client_wrapper::start(const std::string &mechanism, const blob &input, blob &output) { - FAIL_POINT_INJECT_F("sasl_client_wrapper_start", [](absl::string_view str) { + FAIL_POINT_INJECT_F("sasl_client_wrapper_start", [](std::string_view str) { error_code err = error_code::try_get(str.data(), ERR_UNKNOWN); return error_s::make(err); }); @@ -62,7 +62,7 @@ error_s sasl_client_wrapper::start(const std::string &mechanism, const blob &inp error_s sasl_client_wrapper::step(const blob &input, blob &output) { - FAIL_POINT_INJECT_F("sasl_client_wrapper_step", [](absl::string_view str) { + FAIL_POINT_INJECT_F("sasl_client_wrapper_step", [](std::string_view str) { error_code err = error_code::try_get(str.data(), ERR_UNKNOWN); return error_s::make(err); }); diff --git a/src/security/sasl_server_wrapper.cpp b/src/security/sasl_server_wrapper.cpp index 4cda84241b..67498926ff 100644 --- a/src/security/sasl_server_wrapper.cpp +++ b/src/security/sasl_server_wrapper.cpp @@ -23,7 +23,7 @@ #include "utils/error_code.h" #include "utils/fail_point.h" #include "utils/flags.h" -#include "absl/strings/string_view.h" +#include DSN_DECLARE_string(service_fqdn); DSN_DECLARE_string(service_name); @@ -33,7 +33,7 @@ namespace security { error_s sasl_server_wrapper::init() { - FAIL_POINT_INJECT_F("sasl_server_wrapper_init", [](absl::string_view str) { + FAIL_POINT_INJECT_F("sasl_server_wrapper_init", [](std::string_view str) { error_code err = error_code::try_get(str.data(), ERR_UNKNOWN); return error_s::make(err); }); @@ -45,7 +45,7 @@ error_s sasl_server_wrapper::init() error_s sasl_server_wrapper::start(const std::string &mechanism, const blob &input, blob &output) { - FAIL_POINT_INJECT_F("sasl_server_wrapper_start", [](absl::string_view str) { + FAIL_POINT_INJECT_F("sasl_server_wrapper_start", [](std::string_view str) { error_code err = error_code::try_get(str.data(), ERR_UNKNOWN); return error_s::make(err); }); @@ -61,7 +61,7 @@ error_s sasl_server_wrapper::start(const std::string &mechanism, const blob &inp error_s sasl_server_wrapper::step(const blob &input, blob &output) { - FAIL_POINT_INJECT_F("sasl_server_wrapper_step", [](absl::string_view str) { + FAIL_POINT_INJECT_F("sasl_server_wrapper_step", [](std::string_view str) { error_code err = error_code::try_get(str.data(), ERR_UNKNOWN); return error_s::make(err); }); diff --git a/src/security/sasl_wrapper.cpp b/src/security/sasl_wrapper.cpp index 14c28f7b95..7d7d222c51 100644 --- a/src/security/sasl_wrapper.cpp +++ b/src/security/sasl_wrapper.cpp @@ -23,7 +23,7 @@ #include "sasl_server_wrapper.h" #include "utils/error_code.h" #include "utils/fail_point.h" -#include "absl/strings/string_view.h" +#include namespace dsn { namespace security { @@ -44,7 +44,7 @@ sasl_wrapper::~sasl_wrapper() error_s sasl_wrapper::retrieve_username(std::string &output) { - FAIL_POINT_INJECT_F("sasl_wrapper_retrieve_username", [](absl::string_view str) { + FAIL_POINT_INJECT_F("sasl_wrapper_retrieve_username", [](std::string_view str) { error_code err = error_code::try_get(str.data(), ERR_UNKNOWN); return error_s::make(err); }); diff --git a/src/security/server_negotiation.cpp b/src/security/server_negotiation.cpp index 174b418677..b5daee2588 100644 --- a/src/security/server_negotiation.cpp +++ b/src/security/server_negotiation.cpp @@ -22,8 +22,8 @@ #include "fmt/core.h" #include "fmt/format.h" -#include "runtime/rpc/network.h" -#include "runtime/rpc/rpc_address.h" +#include "rpc/network.h" +#include "rpc/rpc_address.h" #include "security/negotiation.h" #include "security/sasl_wrapper.h" #include "security_types.h" diff --git a/src/security/server_negotiation.h b/src/security/server_negotiation.h index c43e55c022..48d7023f2b 100644 --- a/src/security/server_negotiation.h +++ b/src/security/server_negotiation.h @@ -20,7 +20,7 @@ #include #include "negotiation.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/rpc_message.h" namespace dsn { class blob; diff --git a/src/security/test/client_negotiation_test.cpp b/src/security/test/client_negotiation_test.cpp index fcfaf61227..596d73ee35 100644 --- a/src/security/test/client_negotiation_test.cpp +++ b/src/security/test/client_negotiation_test.cpp @@ -20,10 +20,10 @@ #include #include "gtest/gtest.h" -#include "runtime/rpc/network.sim.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/network.sim.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_message.h" #include "security/client_negotiation.h" #include "security/negotiation.h" #include "security_types.h" diff --git a/src/security/test/meta_access_controller_test.cpp b/src/security/test/meta_access_controller_test.cpp index 11fc1c588e..83d8b7d872 100644 --- a/src/security/test/meta_access_controller_test.cpp +++ b/src/security/test/meta_access_controller_test.cpp @@ -21,12 +21,12 @@ #include "common/replication.codes.h" #include "gtest/gtest.h" -#include "runtime/rpc/network.h" -#include "runtime/rpc/network.sim.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/network.h" +#include "rpc/network.sim.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_message.h" #include "security/access_controller.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/flags.h" diff --git a/src/security/test/negotiation_manager_test.cpp b/src/security/test/negotiation_manager_test.cpp index dbf79db51d..2f5429e5c6 100644 --- a/src/security/test/negotiation_manager_test.cpp +++ b/src/security/test/negotiation_manager_test.cpp @@ -21,13 +21,13 @@ #include "gtest/gtest.h" #include "http/http_server.h" #include "nfs/nfs_code_definition.h" -#include "runtime/rpc/network.h" -#include "runtime/rpc/network.sim.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/network.h" +#include "rpc/network.sim.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_message.h" #include "security/negotiation_utils.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "security_types.h" #include "utils/autoref_ptr.h" #include "utils/flags.h" diff --git a/src/security/test/replica_access_controller_test.cpp b/src/security/test/replica_access_controller_test.cpp index 998d828b96..aca34f2db8 100644 --- a/src/security/test/replica_access_controller_test.cpp +++ b/src/security/test/replica_access_controller_test.cpp @@ -23,10 +23,10 @@ #include "common/replication.codes.h" #include "gtest/gtest.h" #include "ranger/access_type.h" -#include "runtime/rpc/network.h" -#include "runtime/rpc/network.sim.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/network.h" +#include "rpc/network.sim.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_message.h" #include "security/replica_access_controller.h" #include "utils/autoref_ptr.h" #include "utils/flags.h" diff --git a/src/security/test/server_negotiation_test.cpp b/src/security/test/server_negotiation_test.cpp index 92b5b0b188..8fff293de0 100644 --- a/src/security/test/server_negotiation_test.cpp +++ b/src/security/test/server_negotiation_test.cpp @@ -21,9 +21,9 @@ #include #include "gtest/gtest.h" -#include "runtime/rpc/network.sim.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_holder.h" +#include "rpc/network.sim.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_holder.h" #include "security/negotiation.h" #include "security/negotiation_utils.h" #include "security_types.h" diff --git a/src/server/available_detector.cpp b/src/server/available_detector.cpp index 67754149d7..8b130e8b23 100644 --- a/src/server/available_detector.cpp +++ b/src/server/available_detector.cpp @@ -20,6 +20,7 @@ #include "available_detector.h" #include +#include // IWYU pragma: keep // IWYU pragma: no_include #include #include @@ -31,18 +32,17 @@ #include #include -#include // IWYU pragma: keep - #include "base/pegasus_key_schema.h" #include "client/replication_ddl_client.h" #include "common/common.h" #include "common/replication_other_types.h" +#include "dsn.layer2_types.h" #include "pegasus/client.h" #include "perf_counter/perf_counter.h" #include "result_writer.h" #include "runtime/api_layer1.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task_code.h" +#include "task/async_calls.h" +#include "task/task_code.h" #include "utils/blob.h" #include "utils/error_code.h" #include "utils/flags.h" @@ -260,14 +260,14 @@ void available_detector::report_availability_info() std::chrono::minutes(1), 0, std::chrono::minutes(2) // waiting for pegasus finishing start. - ); + ); } bool available_detector::generate_hash_keys() { // get app_id and partition_count. - auto err = - _ddl_client->list_app(FLAGS_available_detect_app, _app_id, _partition_count, partitions); + std::vector<::dsn::partition_configuration> pcs; + auto err = _ddl_client->list_app(FLAGS_available_detect_app, _app_id, _partition_count, pcs); if (err == ::dsn::ERR_OK && _app_id >= 0) { _hash_keys.clear(); for (auto pidx = 0; pidx < _partition_count; pidx++) { @@ -326,8 +326,9 @@ void available_detector::on_detect(int32_t idx) _recent_minute_detect_times.fetch_add(1); // define async_get callback function. - auto async_get_callback = [this, idx]( - int err, std::string &&_value, pegasus_client::internal_info &&info) { + auto async_get_callback = [this, idx](int err, + std::string &&_value, + pegasus_client::internal_info &&info) { std::atomic &cnt = (*_fail_count[idx]); if (err != PERR_OK) { int prev = cnt.fetch_add(1); @@ -350,10 +351,8 @@ void available_detector::on_detect(int32_t idx) }; // define async_set callback function. - auto async_set_callback = - [ this, idx, user_async_get_callback = std::move(async_get_callback) ]( - int err, pegasus_client::internal_info &&info) - { + auto async_set_callback = [this, idx, user_async_get_callback = std::move(async_get_callback)]( + int err, pegasus_client::internal_info &&info) { std::atomic &cnt = (*_fail_count[idx]); if (err != PERR_OK) { int prev = cnt.fetch_add(1); diff --git a/src/server/available_detector.h b/src/server/available_detector.h index 075c271dd2..4e974615a9 100644 --- a/src/server/available_detector.h +++ b/src/server/available_detector.h @@ -25,11 +25,10 @@ #include #include -#include "dsn.layer2_types.h" #include "perf_counter/perf_counter_wrapper.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task.h" -#include "runtime/task/task_tracker.h" +#include "rpc/rpc_host_port.h" +#include "task/task.h" +#include "task/task_tracker.h" #include "utils/synchronize.h" namespace dsn { @@ -86,7 +85,6 @@ class available_detector std::vector<::dsn::task_ptr> _detect_tasks; int32_t _app_id; int32_t _partition_count; - std::vector<::dsn::partition_configuration> partitions; std::string _send_alert_email_cmd; std::string _send_availability_info_email_cmd; diff --git a/src/server/capacity_unit_calculator.cpp b/src/server/capacity_unit_calculator.cpp index cfbc086f41..88f6e2f172 100644 --- a/src/server/capacity_unit_calculator.cpp +++ b/src/server/capacity_unit_calculator.cpp @@ -19,7 +19,7 @@ #include "capacity_unit_calculator.h" -#include +#include #include #include #include @@ -27,7 +27,7 @@ #include "hotkey_collector.h" #include "rrdb/rrdb_types.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/rpc_message.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/flags.h" diff --git a/src/server/compaction_filter_rule.cpp b/src/server/compaction_filter_rule.cpp index cf369da96b..d04b67e70b 100644 --- a/src/server/compaction_filter_rule.cpp +++ b/src/server/compaction_filter_rule.cpp @@ -22,14 +22,14 @@ #include "base/pegasus_utils.h" #include "base/pegasus_value_schema.h" #include "utils/fmt_logging.h" -#include "absl/strings/string_view.h" +#include #include "utils/strings.h" namespace pegasus { namespace server { -bool string_pattern_match(absl::string_view value, +bool string_pattern_match(std::string_view value, string_match_type type, - absl::string_view filter_pattern) + std::string_view filter_pattern) { if (filter_pattern.empty()) return false; @@ -38,7 +38,7 @@ bool string_pattern_match(absl::string_view value, switch (type) { case string_match_type::SMT_MATCH_ANYWHERE: - return value.find(filter_pattern) != absl::string_view::npos; + return value.find(filter_pattern) != std::string_view::npos; case string_match_type::SMT_MATCH_PREFIX: return dsn::utils::mequals(value.data(), filter_pattern.data(), filter_pattern.length()); case string_match_type::SMT_MATCH_POSTFIX: @@ -53,27 +53,27 @@ bool string_pattern_match(absl::string_view value, hashkey_pattern_rule::hashkey_pattern_rule(uint32_t data_version) {} -bool hashkey_pattern_rule::match(absl::string_view hash_key, - absl::string_view sort_key, - absl::string_view existing_value) const +bool hashkey_pattern_rule::match(std::string_view hash_key, + std::string_view sort_key, + std::string_view existing_value) const { return string_pattern_match(hash_key, match_type, pattern); } sortkey_pattern_rule::sortkey_pattern_rule(uint32_t data_version) {} -bool sortkey_pattern_rule::match(absl::string_view hash_key, - absl::string_view sort_key, - absl::string_view existing_value) const +bool sortkey_pattern_rule::match(std::string_view hash_key, + std::string_view sort_key, + std::string_view existing_value) const { return string_pattern_match(sort_key, match_type, pattern); } ttl_range_rule::ttl_range_rule(uint32_t data_version) : data_version(data_version) {} -bool ttl_range_rule::match(absl::string_view hash_key, - absl::string_view sort_key, - absl::string_view existing_value) const +bool ttl_range_rule::match(std::string_view hash_key, + std::string_view sort_key, + std::string_view existing_value) const { uint32_t expire_ts = pegasus_extract_expire_ts(data_version, existing_value); // if start_ttl and stop_ttl = 0, it means we want to delete keys which have no ttl diff --git a/src/server/compaction_filter_rule.h b/src/server/compaction_filter_rule.h index be211ba6b0..e471a759fd 100644 --- a/src/server/compaction_filter_rule.h +++ b/src/server/compaction_filter_rule.h @@ -30,7 +30,7 @@ #include "utils/enum_helper.h" #include "utils/factory_store.h" #include "utils/fmt_utils.h" -#include "absl/strings/string_view.h" +#include namespace pegasus { namespace server { @@ -76,9 +76,9 @@ class compaction_filter_rule // TODO(zhaoliwei): we can use `value_filed` to replace existing_value in the later, // after the refactor of value schema - virtual bool match(absl::string_view hash_key, - absl::string_view sort_key, - absl::string_view existing_value) const = 0; + virtual bool match(std::string_view hash_key, + std::string_view sort_key, + std::string_view existing_value) const = 0; }; enum string_match_type @@ -102,9 +102,9 @@ class hashkey_pattern_rule : public compaction_filter_rule public: hashkey_pattern_rule(uint32_t data_version = VERSION_MAX); - bool match(absl::string_view hash_key, - absl::string_view sort_key, - absl::string_view existing_value) const; + bool match(std::string_view hash_key, + std::string_view sort_key, + std::string_view existing_value) const; DEFINE_JSON_SERIALIZATION(pattern, match_type) private: @@ -124,9 +124,9 @@ class sortkey_pattern_rule : public compaction_filter_rule public: sortkey_pattern_rule(uint32_t data_version = VERSION_MAX); - bool match(absl::string_view hash_key, - absl::string_view sort_key, - absl::string_view existing_value) const; + bool match(std::string_view hash_key, + std::string_view sort_key, + std::string_view existing_value) const; DEFINE_JSON_SERIALIZATION(pattern, match_type) private: @@ -144,9 +144,9 @@ class ttl_range_rule : public compaction_filter_rule public: explicit ttl_range_rule(uint32_t data_version); - bool match(absl::string_view hash_key, - absl::string_view sort_key, - absl::string_view existing_value) const; + bool match(std::string_view hash_key, + std::string_view sort_key, + std::string_view existing_value) const; DEFINE_JSON_SERIALIZATION(start_ttl, stop_ttl) private: diff --git a/src/server/compaction_operation.cpp b/src/server/compaction_operation.cpp index 5c139073c1..a7d21bd326 100644 --- a/src/server/compaction_operation.cpp +++ b/src/server/compaction_operation.cpp @@ -17,7 +17,7 @@ * under the License. */ -#include +#include #include #include "base/pegasus_utils.h" @@ -30,9 +30,9 @@ namespace pegasus { namespace server { compaction_operation::~compaction_operation() = default; -bool compaction_operation::all_rules_match(absl::string_view hash_key, - absl::string_view sort_key, - absl::string_view existing_value) const +bool compaction_operation::all_rules_match(std::string_view hash_key, + std::string_view sort_key, + std::string_view existing_value) const { if (rules.empty()) { return false; @@ -55,9 +55,9 @@ delete_key::delete_key(filter_rules &&rules, uint32_t data_version) delete_key::delete_key(uint32_t data_version) : compaction_operation(data_version) {} -bool delete_key::filter(absl::string_view hash_key, - absl::string_view sort_key, - absl::string_view existing_value, +bool delete_key::filter(std::string_view hash_key, + std::string_view sort_key, + std::string_view existing_value, std::string *new_value, bool *value_changed) const { @@ -74,9 +74,9 @@ update_ttl::update_ttl(filter_rules &&rules, uint32_t data_version) update_ttl::update_ttl(uint32_t data_version) : compaction_operation(data_version) {} -bool update_ttl::filter(absl::string_view hash_key, - absl::string_view sort_key, - absl::string_view existing_value, +bool update_ttl::filter(std::string_view hash_key, + std::string_view sort_key, + std::string_view existing_value, std::string *new_value, bool *value_changed) const { diff --git a/src/server/compaction_operation.h b/src/server/compaction_operation.h index d7c9d811eb..1101be1fa6 100644 --- a/src/server/compaction_operation.h +++ b/src/server/compaction_operation.h @@ -21,13 +21,12 @@ #include #include -#include #include #include +#include #include #include -#include "absl/strings/string_view.h" #include "common/json_helper.h" #include "compaction_filter_rule.h" #include "utils/blob.h" @@ -75,18 +74,18 @@ class compaction_operation explicit compaction_operation(uint32_t data_version) : data_version(data_version) {} virtual ~compaction_operation() = 0; - bool all_rules_match(absl::string_view hash_key, - absl::string_view sort_key, - absl::string_view existing_value) const; + bool all_rules_match(std::string_view hash_key, + std::string_view sort_key, + std::string_view existing_value) const; void set_rules(filter_rules &&rules); /** * @return false indicates that this key-value should be removed * If you want to modify the existing_value, you can pass it back through new_value and * value_changed needs to be set to true in this case. */ - virtual bool filter(absl::string_view hash_key, - absl::string_view sort_key, - absl::string_view existing_value, + virtual bool filter(std::string_view hash_key, + std::string_view sort_key, + std::string_view existing_value, std::string *new_value, bool *value_changed) const = 0; @@ -106,9 +105,9 @@ class delete_key : public compaction_operation delete_key(filter_rules &&rules, uint32_t data_version); explicit delete_key(uint32_t data_version); - bool filter(absl::string_view hash_key, - absl::string_view sort_key, - absl::string_view existing_value, + bool filter(std::string_view hash_key, + std::string_view sort_key, + std::string_view existing_value, std::string *new_value, bool *value_changed) const; @@ -154,9 +153,9 @@ class update_ttl : public compaction_operation update_ttl(filter_rules &&rules, uint32_t data_version); explicit update_ttl(uint32_t data_version); - bool filter(absl::string_view hash_key, - absl::string_view sort_key, - absl::string_view existing_value, + bool filter(std::string_view hash_key, + std::string_view sort_key, + std::string_view existing_value, std::string *new_value, bool *value_changed) const; DEFINE_JSON_SERIALIZATION(type, value) diff --git a/src/server/hotkey_collector.cpp b/src/server/hotkey_collector.cpp index a05c3012d8..ca67a74d03 100644 --- a/src/server/hotkey_collector.cpp +++ b/src/server/hotkey_collector.cpp @@ -17,7 +17,7 @@ #include "hotkey_collector.h" -#include +#include #include // IWYU pragma: no_include #include @@ -129,7 +129,7 @@ find_outlier_index(const std::vector &captured_keys, int threshold, in // TODO: (Tangyanzhao) replace it to xxhash -/*extern*/ int get_bucket_id(absl::string_view data, int bucket_num) +/*extern*/ int get_bucket_id(std::string_view data, int bucket_num) { return static_cast(boost::hash_range(data.begin(), data.end()) % bucket_num); } @@ -398,7 +398,7 @@ struct blob_hash { std::size_t operator()(const dsn::blob &str) const { - absl::string_view cp = str.to_string_view(); + std::string_view cp = str.to_string_view(); return boost::hash_range(cp.begin(), cp.end()); } }; @@ -429,8 +429,8 @@ void hotkey_fine_data_collector::analyse_data(detect_hotkey_result &result) // the weight of all the collected hash keys std::vector weights; weights.reserve(hash_keys_weight.size()); - absl::string_view weight_max_key; // the hashkey with the max weight - uint64_t weight_max = 0; // the max weight by far + std::string_view weight_max_key; // the hashkey with the max weight + uint64_t weight_max = 0; // the max weight by far for (const auto &iter : hash_keys_weight) { weights.push_back(iter.second); if (iter.second > weight_max) { diff --git a/src/server/hotkey_collector.h b/src/server/hotkey_collector.h index 826bf157b8..b8b74ccf51 100644 --- a/src/server/hotkey_collector.h +++ b/src/server/hotkey_collector.h @@ -29,7 +29,7 @@ #include "replica/replica_base.h" #include "replica_admin_types.h" #include "utils/blob.h" -#include "absl/strings/string_view.h" +#include namespace pegasus { namespace server { @@ -50,7 +50,7 @@ struct detect_hotkey_result } }; -extern int get_bucket_id(absl::string_view data, int bucket_num); +extern int get_bucket_id(std::string_view data, int bucket_num); extern bool find_outlier_index(const std::vector &captured_keys, int threshold, int &hot_index); diff --git a/src/server/hotspot_partition_calculator.cpp b/src/server/hotspot_partition_calculator.cpp index 93ab165d22..dbf4e64789 100644 --- a/src/server/hotspot_partition_calculator.cpp +++ b/src/server/hotspot_partition_calculator.cpp @@ -21,13 +21,13 @@ #include #include #include +#include -#include "absl/strings/string_view.h" #include "client/replication_ddl_client.h" #include "common/gpid.h" #include "common/serialization_helper/dsn.layer2_types.h" #include "perf_counter/perf_counter.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "server/hotspot_partition_stat.h" #include "shell/command_executor.h" #include "utils/error_code.h" @@ -212,28 +212,27 @@ void hotspot_partition_calculator::send_detect_hotkey_request( const dsn::replication::hotkey_type::type hotkey_type, const dsn::replication::detect_action::type action) { - FAIL_POINT_INJECT_F("send_detect_hotkey_request", [](absl::string_view) {}); + FAIL_POINT_INJECT_F("send_detect_hotkey_request", [](std::string_view) {}); int app_id = -1; int partition_count = -1; - std::vector partitions; - _shell_context->ddl_client->list_app(app_name, app_id, partition_count, partitions); + std::vector pcs; + _shell_context->ddl_client->list_app(app_name, app_id, partition_count, pcs); dsn::replication::detect_hotkey_response resp; dsn::replication::detect_hotkey_request req; req.type = hotkey_type; req.action = action; req.pid = dsn::gpid(app_id, partition_index); - auto error = _shell_context->ddl_client->detect_hotkey( - partitions[partition_index].hp_primary, req, resp); + auto error = + _shell_context->ddl_client->detect_hotkey(pcs[partition_index].hp_primary, req, resp); LOG_INFO("{} {} hotkey detection in {}.{}, server: {}", (action == dsn::replication::detect_action::STOP) ? "Stop" : "Start", (hotkey_type == dsn::replication::hotkey_type::WRITE) ? "write" : "read", app_name, partition_index, - FMT_HOST_PORT_AND_IP(partitions[partition_index], primary)); - + FMT_HOST_PORT_AND_IP(pcs[partition_index], primary)); if (error != dsn::ERR_OK) { LOG_ERROR("Hotkey detect rpc sending failed, in {}.{}, error_hint:{}", app_name, diff --git a/src/server/hotspot_partition_calculator.h b/src/server/hotspot_partition_calculator.h index d117bdc717..f5e3d93eda 100644 --- a/src/server/hotspot_partition_calculator.h +++ b/src/server/hotspot_partition_calculator.h @@ -27,7 +27,7 @@ #include "hotspot_partition_stat.h" #include "perf_counter/perf_counter_wrapper.h" #include "replica_admin_types.h" -#include "runtime/rpc/rpc_holder.h" +#include "rpc/rpc_holder.h" struct row_data; struct shell_context; diff --git a/src/server/info_collector.cpp b/src/server/info_collector.cpp index aa77a5d3c5..a59d42ec1a 100644 --- a/src/server/info_collector.cpp +++ b/src/server/info_collector.cpp @@ -32,10 +32,10 @@ #include "hotspot_partition_calculator.h" #include "pegasus/client.h" #include "result_writer.h" -#include "runtime/rpc/group_host_port.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task_code.h" +#include "rpc/group_host_port.h" #include "shell/command_executor.h" +#include "task/async_calls.h" +#include "task/task_code.h" #include "utils/flags.h" #include "utils/fmt_logging.h" #include "utils/strings.h" @@ -118,13 +118,13 @@ info_collector::~info_collector() void info_collector::start() { - _app_stat_timer_task = - ::dsn::tasking::enqueue_timer(LPC_PEGASUS_APP_STAT_TIMER, - &_tracker, - [this] { on_app_stat(); }, - std::chrono::seconds(FLAGS_app_stat_interval_seconds), - 0, - std::chrono::minutes(1)); + _app_stat_timer_task = ::dsn::tasking::enqueue_timer( + LPC_PEGASUS_APP_STAT_TIMER, + &_tracker, + [this] { on_app_stat(); }, + std::chrono::seconds(FLAGS_app_stat_interval_seconds), + 0, + std::chrono::minutes(1)); _capacity_unit_stat_timer_task = ::dsn::tasking::enqueue_timer( LPC_PEGASUS_CAPACITY_UNIT_STAT_TIMER, @@ -272,11 +272,12 @@ void info_collector::on_capacity_unit_stat(int remaining_retry_count) "wait {} seconds to retry", remaining_retry_count, _capacity_unit_retry_wait_seconds); - ::dsn::tasking::enqueue(LPC_PEGASUS_CAPACITY_UNIT_STAT_TIMER, - &_tracker, - [=] { on_capacity_unit_stat(remaining_retry_count - 1); }, - 0, - std::chrono::seconds(_capacity_unit_retry_wait_seconds)); + ::dsn::tasking::enqueue( + LPC_PEGASUS_CAPACITY_UNIT_STAT_TIMER, + &_tracker, + [=] { on_capacity_unit_stat(remaining_retry_count - 1); }, + 0, + std::chrono::seconds(_capacity_unit_retry_wait_seconds)); } else { LOG_ERROR("get capacity unit stat failed, remaining_retry_count = 0, no retry anymore"); } @@ -319,11 +320,12 @@ void info_collector::on_storage_size_stat(int remaining_retry_count) "seconds to retry", remaining_retry_count, _storage_size_retry_wait_seconds); - ::dsn::tasking::enqueue(LPC_PEGASUS_STORAGE_SIZE_STAT_TIMER, - &_tracker, - [=] { on_storage_size_stat(remaining_retry_count - 1); }, - 0, - std::chrono::seconds(_storage_size_retry_wait_seconds)); + ::dsn::tasking::enqueue( + LPC_PEGASUS_STORAGE_SIZE_STAT_TIMER, + &_tracker, + [=] { on_storage_size_stat(remaining_retry_count - 1); }, + 0, + std::chrono::seconds(_storage_size_retry_wait_seconds)); } else { LOG_ERROR("get storage size stat failed, remaining_retry_count = 0, no retry anymore"); } diff --git a/src/server/info_collector.h b/src/server/info_collector.h index fe168ad7f1..747d5d7107 100644 --- a/src/server/info_collector.h +++ b/src/server/info_collector.h @@ -29,10 +29,10 @@ #include "perf_counter/perf_counter.h" #include "perf_counter/perf_counter_wrapper.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/task.h" -#include "runtime/task/task_tracker.h" +#include "rpc/rpc_host_port.h" #include "shell/command_helper.h" +#include "task/task.h" +#include "task/task_tracker.h" #include "utils/synchronize.h" struct shell_context; diff --git a/src/server/info_collector_app.cpp b/src/server/info_collector_app.cpp index 398f8bd5b1..bf70b39cd3 100644 --- a/src/server/info_collector_app.cpp +++ b/src/server/info_collector_app.cpp @@ -53,5 +53,5 @@ ::dsn::error_code info_collector_app::stop(bool cleanup) _detector.stop(); return ::dsn::ERR_OK; } -} -} // namespace +} // namespace server +} // namespace pegasus diff --git a/src/server/info_collector_app.h b/src/server/info_collector_app.h index b731f0e483..73dc958f31 100644 --- a/src/server/info_collector_app.h +++ b/src/server/info_collector_app.h @@ -43,5 +43,5 @@ class info_collector_app : public ::dsn::service_app info_collector _collector; available_detector _detector; }; -} -} // namespace +} // namespace server +} // namespace pegasus diff --git a/src/server/key_ttl_compaction_filter.h b/src/server/key_ttl_compaction_filter.h index 8df418de9d..6f4f07f05c 100644 --- a/src/server/key_ttl_compaction_filter.h +++ b/src/server/key_ttl_compaction_filter.h @@ -79,7 +79,7 @@ class KeyWithTTLCompactionFilter : public rocksdb::CompactionFilter } if (!_user_specified_operations.empty()) { - absl::string_view value_view = utils::to_string_view(existing_value); + std::string_view value_view = utils::to_string_view(existing_value); if (*value_changed) { value_view = *new_value; } @@ -92,7 +92,7 @@ class KeyWithTTLCompactionFilter : public rocksdb::CompactionFilter } bool user_specified_operation_filter(const rocksdb::Slice &key, - absl::string_view existing_value, + std::string_view existing_value, std::string *new_value, bool *value_changed) const { diff --git a/src/server/pegasus_event_listener.cpp b/src/server/pegasus_event_listener.cpp index 1c4ebbfd66..ba6b622c91 100644 --- a/src/server/pegasus_event_listener.cpp +++ b/src/server/pegasus_event_listener.cpp @@ -19,7 +19,7 @@ #include "pegasus_event_listener.h" -#include +#include #include #include #include diff --git a/src/server/pegasus_manual_compact_service.cpp b/src/server/pegasus_manual_compact_service.cpp index f4d143690b..2c8a604ac9 100644 --- a/src/server/pegasus_manual_compact_service.cpp +++ b/src/server/pegasus_manual_compact_service.cpp @@ -19,7 +19,7 @@ #include "pegasus_manual_compact_service.h" -#include +#include #include #include #include @@ -32,8 +32,8 @@ #include "common/replication.codes.h" #include "pegasus_server_impl.h" #include "runtime/api_layer1.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task_code.h" +#include "task/async_calls.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/flags.h" #include "utils/fmt_logging.h" diff --git a/src/server/pegasus_mutation_duplicator.cpp b/src/server/pegasus_mutation_duplicator.cpp index 849fa56b3c..091d1247a8 100644 --- a/src/server/pegasus_mutation_duplicator.cpp +++ b/src/server/pegasus_mutation_duplicator.cpp @@ -19,7 +19,6 @@ #include "pegasus_mutation_duplicator.h" -#include #include #include #include @@ -27,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -37,10 +37,10 @@ #include "duplication_internal_types.h" #include "pegasus/client.h" #include "pegasus_key_schema.h" +#include "rpc/rpc_message.h" #include "rrdb/rrdb.code.definition.h" #include "rrdb/rrdb_types.h" #include "runtime/message_utils.h" -#include "runtime/rpc/rpc_message.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/chrono_literals.h" @@ -75,9 +75,9 @@ DSN_TAG_VARIABLE(dup_max_allowed_write_size, FT_MUTABLE); /// static definition of mutation_duplicator::creator. /*static*/ std::function( - replica_base *, absl::string_view, absl::string_view)> + replica_base *, std::string_view, std::string_view)> mutation_duplicator::creator = - [](replica_base *r, absl::string_view remote, absl::string_view app) { + [](replica_base *r, std::string_view remote, std::string_view app) { return std::make_unique(r, remote, app); }; @@ -116,8 +116,8 @@ using namespace dsn::literals::chrono_literals; } pegasus_mutation_duplicator::pegasus_mutation_duplicator(dsn::replication::replica_base *r, - absl::string_view remote_cluster, - absl::string_view app) + std::string_view remote_cluster, + std::string_view app) : mutation_duplicator(r), _remote_cluster(remote_cluster), METRIC_VAR_INIT_replica(dup_shipped_successful_requests), @@ -171,11 +171,12 @@ void pegasus_mutation_duplicator::send(uint64_t hash, callback cb) _inflights[hash].pop_front(); } - _client->async_duplicate(rpc, - [hash, cb, rpc, this](dsn::error_code err) mutable { - on_duplicate_reply(hash, std::move(cb), std::move(rpc), err); - }, - _env.__conf.tracker); + _client->async_duplicate( + rpc, + [hash, cb, rpc, this](dsn::error_code err) mutable { + on_duplicate_reply(hash, std::move(cb), std::move(rpc), err); + }, + _env.__conf.tracker); } void pegasus_mutation_duplicator::on_duplicate_reply(uint64_t hash, diff --git a/src/server/pegasus_mutation_duplicator.h b/src/server/pegasus_mutation_duplicator.h index 2d81a6a37c..e3b488960e 100644 --- a/src/server/pegasus_mutation_duplicator.h +++ b/src/server/pegasus_mutation_duplicator.h @@ -28,10 +28,10 @@ #include "replica/duplication/mutation_duplicator.h" #include "rrdb/rrdb.client.h" #include "runtime/pipeline.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_tracker.h" +#include "task/task_code.h" +#include "task/task_tracker.h" #include "utils/chrono_literals.h" -#include "absl/strings/string_view.h" +#include #include "utils/metrics.h" #include "utils/zlocks.h" @@ -61,8 +61,8 @@ class pegasus_mutation_duplicator : public dsn::replication::mutation_duplicator public: pegasus_mutation_duplicator(dsn::replication::replica_base *r, - absl::string_view remote_cluster, - absl::string_view app); + std::string_view remote_cluster, + std::string_view app); void duplicate(mutation_tuple_set muts, callback cb) override; diff --git a/src/server/pegasus_scan_context.h b/src/server/pegasus_scan_context.h index ebf4399080..4e019afeca 100644 --- a/src/server/pegasus_scan_context.h +++ b/src/server/pegasus_scan_context.h @@ -138,5 +138,5 @@ class pegasus_context_cache std::unordered_map> _map; ::dsn::utils::ex_lock_nr_spin _lock; }; -} -} +} // namespace server +} // namespace pegasus diff --git a/src/server/pegasus_server_impl.cpp b/src/server/pegasus_server_impl.cpp index 35419d8efb..835596cd73 100644 --- a/src/server/pegasus_server_impl.cpp +++ b/src/server/pegasus_server_impl.cpp @@ -43,8 +43,8 @@ #include #include #include +#include -#include "absl/strings/string_view.h" #include "base/idl_utils.h" // IWYU pragma: keep #include "base/meta_store.h" #include "base/pegasus_key_schema.h" @@ -60,17 +60,17 @@ #include "pegasus_rpc_types.h" #include "pegasus_server_write.h" #include "replica_admin_types.h" +#include "rpc/rpc_message.h" #include "rrdb/rrdb.code.definition.h" #include "rrdb/rrdb_types.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task_code.h" #include "server/key_ttl_compaction_filter.h" #include "server/pegasus_manual_compact_service.h" #include "server/pegasus_read_service.h" #include "server/pegasus_scan_context.h" #include "server/range_read_limiter.h" +#include "task/async_calls.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/chrono_literals.h" @@ -1372,11 +1372,12 @@ void pegasus_server_impl::on_get_scanner(get_scanner_rpc rpc) // if the context is used, it will be fetched and re-put into cache, // which will change the handle, // then the delayed task will fetch null context by old handle, and do nothing. - ::dsn::tasking::enqueue(LPC_PEGASUS_SERVER_DELAY, - &_tracker, - [this, handle]() { _context_cache.fetch(handle); }, - 0, - std::chrono::minutes(5)); + ::dsn::tasking::enqueue( + LPC_PEGASUS_SERVER_DELAY, + &_tracker, + [this, handle]() { _context_cache.fetch(handle); }, + 0, + std::chrono::minutes(5)); } else { // scan completed resp.context_id = pegasus_scan_context::SCAN_CONTEXT_ID_COMPLETED; @@ -1517,11 +1518,12 @@ void pegasus_server_impl::on_scan(scan_rpc rpc) // scan not completed int64_t handle = _context_cache.put(std::move(context)); resp.context_id = handle; - ::dsn::tasking::enqueue(LPC_PEGASUS_SERVER_DELAY, - &_tracker, - [this, handle]() { _context_cache.fetch(handle); }, - 0, - std::chrono::minutes(5)); + ::dsn::tasking::enqueue( + LPC_PEGASUS_SERVER_DELAY, + &_tracker, + [this, handle]() { _context_cache.fetch(handle); }, + 0, + std::chrono::minutes(5)); } else { // scan completed resp.context_id = pegasus_scan_context::SCAN_CONTEXT_ID_COMPLETED; @@ -1805,11 +1807,11 @@ dsn::error_code pegasus_server_impl::start(int argc, char **argv) } LOG_DEBUG_PREFIX("start the update replica-level rocksdb statistics timer task"); - _update_replica_rdb_stat = - dsn::tasking::enqueue_timer(LPC_REPLICATION_LONG_COMMON, - &_tracker, - [this]() { this->update_replica_rocksdb_statistics(); }, - std::chrono::seconds(FLAGS_update_rdb_stat_interval)); + _update_replica_rdb_stat = dsn::tasking::enqueue_timer( + LPC_REPLICATION_LONG_COMMON, + &_tracker, + [this]() { this->update_replica_rocksdb_statistics(); }, + std::chrono::seconds(FLAGS_update_rdb_stat_interval)); // These counters are singletons on this server shared by all replicas, their metrics update // task should be scheduled once an interval on the server view. @@ -1817,6 +1819,12 @@ dsn::error_code pegasus_server_impl::start(int argc, char **argv) std::call_once(flag, [&]() { // The timer task will always running even though there is no replicas CHECK_NE(kServerStatUpdateTimeSec.count(), 0); + + // TODO(wangdan): _update_server_rdb_stat is server-level, thus it could not be simply + // cancelled in the destructor of pegasus_server_impl which is replica-level. + // + // We should refactor to make _update_server_rdb_stat exit gracefully by + // `_update_server_rdb_stat->cancel(true)`. _update_server_rdb_stat = dsn::tasking::enqueue_timer( LPC_REPLICATION_LONG_COMMON, nullptr, // TODO: the tracker is nullptr, we will fix it later @@ -1829,15 +1837,17 @@ dsn::error_code pegasus_server_impl::start(int argc, char **argv) this, _read_hotkey_collector, _write_hotkey_collector, _read_size_throttling_controller); _server_write = std::make_unique(this); - dsn::tasking::enqueue_timer(LPC_ANALYZE_HOTKEY, - &_tracker, - [this]() { _read_hotkey_collector->analyse_data(); }, - std::chrono::seconds(FLAGS_hotkey_analyse_time_interval_s)); + dsn::tasking::enqueue_timer( + LPC_ANALYZE_HOTKEY, + &_tracker, + [this]() { _read_hotkey_collector->analyse_data(); }, + std::chrono::seconds(FLAGS_hotkey_analyse_time_interval_s)); - dsn::tasking::enqueue_timer(LPC_ANALYZE_HOTKEY, - &_tracker, - [this]() { _write_hotkey_collector->analyse_data(); }, - std::chrono::seconds(FLAGS_hotkey_analyse_time_interval_s)); + dsn::tasking::enqueue_timer( + LPC_ANALYZE_HOTKEY, + &_tracker, + [this]() { _write_hotkey_collector->analyse_data(); }, + std::chrono::seconds(FLAGS_hotkey_analyse_time_interval_s)); return dsn::ERR_OK; } @@ -1869,10 +1879,7 @@ ::dsn::error_code pegasus_server_impl::stop(bool clear_state) _update_replica_rdb_stat->cancel(true); _update_replica_rdb_stat = nullptr; } - if (_update_server_rdb_stat != nullptr) { - _update_server_rdb_stat->cancel(true); - _update_server_rdb_stat = nullptr; - } + _tracker.cancel_outstanding_tasks(); _context_cache.clear(); @@ -2168,47 +2175,49 @@ ::dsn::error_code pegasus_server_impl::copy_checkpoint_to_dir_unsafe(const char } LOG_INFO_PREFIX("copy checkpoint to dir({}) succeed", checkpoint_dir); - if (checkpoint_decree != nullptr) { - rocksdb::DB *snapshot_db = nullptr; - std::vector handles_opened; - auto cleanup = [&](bool remove_checkpoint) { - if (remove_checkpoint && !::dsn::utils::filesystem::remove_path(checkpoint_dir)) { - LOG_ERROR_PREFIX("remove checkpoint directory {} failed", checkpoint_dir); - } - if (snapshot_db) { - for (auto handle : handles_opened) { - if (handle) { - snapshot_db->DestroyColumnFamilyHandle(handle); - handle = nullptr; - } + if (checkpoint_decree == nullptr) { + return ::dsn::ERR_OK; + } + + rocksdb::DB *snapshot_db = nullptr; + std::vector handles_opened; + auto cleanup = [&](bool remove_checkpoint) { + if (remove_checkpoint && !::dsn::utils::filesystem::remove_path(checkpoint_dir)) { + LOG_ERROR_PREFIX("remove checkpoint directory {} failed", checkpoint_dir); + } + if (snapshot_db) { + for (auto handle : handles_opened) { + if (handle) { + snapshot_db->DestroyColumnFamilyHandle(handle); + handle = nullptr; } - delete snapshot_db; - snapshot_db = nullptr; } - }; - - // Because of RocksDB's restriction, we have to to open default column family even though - // not use it - std::vector column_families( - {{meta_store::DATA_COLUMN_FAMILY_NAME, rocksdb::ColumnFamilyOptions()}, - {meta_store::META_COLUMN_FAMILY_NAME, rocksdb::ColumnFamilyOptions()}}); - status = rocksdb::DB::OpenForReadOnly( - _db_opts, checkpoint_dir, column_families, &handles_opened, &snapshot_db); - if (!status.ok()) { - LOG_ERROR_PREFIX( - "OpenForReadOnly from {} failed, error = {}", checkpoint_dir, status.ToString()); + delete snapshot_db; snapshot_db = nullptr; - cleanup(true); - return ::dsn::ERR_LOCAL_APP_FAILURE; } - CHECK_EQ_PREFIX(handles_opened.size(), 2); - CHECK_EQ_PREFIX(handles_opened[1]->GetName(), meta_store::META_COLUMN_FAMILY_NAME); - uint64_t last_flushed_decree = - _meta_store->get_decree_from_readonly_db(snapshot_db, handles_opened[1]); - *checkpoint_decree = last_flushed_decree; + }; - cleanup(false); + // Because of RocksDB's restriction, we have to to open default column family even though + // not use it + std::vector column_families( + {{meta_store::DATA_COLUMN_FAMILY_NAME, rocksdb::ColumnFamilyOptions()}, + {meta_store::META_COLUMN_FAMILY_NAME, rocksdb::ColumnFamilyOptions()}}); + status = rocksdb::DB::OpenForReadOnly( + _db_opts, checkpoint_dir, column_families, &handles_opened, &snapshot_db); + if (!status.ok()) { + LOG_ERROR_PREFIX( + "OpenForReadOnly from {} failed, error = {}", checkpoint_dir, status.ToString()); + snapshot_db = nullptr; + cleanup(true); + return ::dsn::ERR_LOCAL_APP_FAILURE; } + CHECK_EQ_PREFIX(handles_opened.size(), 2); + CHECK_EQ_PREFIX(handles_opened[1]->GetName(), meta_store::META_COLUMN_FAMILY_NAME); + uint64_t last_flushed_decree = + _meta_store->get_decree_from_readonly_db(snapshot_db, handles_opened[1]); + *checkpoint_decree = last_flushed_decree; + + cleanup(false); return ::dsn::ERR_OK; } @@ -2318,6 +2327,17 @@ pegasus_server_impl::storage_apply_checkpoint(chkpt_apply_mode mode, return ::dsn::ERR_OK; } +int64_t pegasus_server_impl::last_flushed_decree() const +{ + uint64_t decree = 0; + const auto &err = _meta_store->get_last_flushed_decree(&decree); + if (dsn_unlikely(err != dsn::ERR_OK)) { + return -1; + } + + return static_cast(decree); +} + bool pegasus_server_impl::validate_filter(::dsn::apps::filter_type::type filter_type, const ::dsn::blob &filter_pattern, const ::dsn::blob &value) @@ -2334,7 +2354,7 @@ bool pegasus_server_impl::validate_filter(::dsn::apps::filter_type::type filter_ return false; if (filter_type == ::dsn::apps::filter_type::FT_MATCH_ANYWHERE) { return value.to_string_view().find(filter_pattern.to_string_view()) != - absl::string_view::npos; + std::string_view::npos; } else if (filter_type == ::dsn::apps::filter_type::FT_MATCH_PREFIX) { return dsn::utils::mequals( value.data(), filter_pattern.data(), filter_pattern.length()); @@ -3362,14 +3382,15 @@ uint64_t pegasus_server_impl::do_manual_compact(const rocksdb::CompactRangeOptio // we will try to generate it again, and it will probably succeed because at least some // empty data is written into rocksdb by periodic group check. LOG_INFO_PREFIX("release storage failed after manual compact, will retry after 5 minutes"); - ::dsn::tasking::enqueue(LPC_PEGASUS_SERVER_DELAY, - &_tracker, - [this]() { - LOG_INFO_PREFIX("retry release storage after manual compact"); - release_storage_after_manual_compact(); - }, - 0, - std::chrono::minutes(5)); + ::dsn::tasking::enqueue( + LPC_PEGASUS_SERVER_DELAY, + &_tracker, + [this]() { + LOG_INFO_PREFIX("retry release storage after manual compact"); + release_storage_after_manual_compact(); + }, + 0, + std::chrono::minutes(5)); } // update rocksdb statistics immediately diff --git a/src/server/pegasus_server_impl.h b/src/server/pegasus_server_impl.h index 361d9cbbae..f65c43266c 100644 --- a/src/server/pegasus_server_impl.h +++ b/src/server/pegasus_server_impl.h @@ -46,8 +46,8 @@ #include "pegasus_value_schema.h" #include "range_read_limiter.h" #include "replica/replication_app_base.h" -#include "runtime/task/task.h" -#include "runtime/task/task_tracker.h" +#include "task/task.h" +#include "task/task_tracker.h" #include "utils/error_code.h" #include "utils/flags.h" #include "utils/metrics.h" @@ -223,6 +223,8 @@ class pegasus_server_impl : public pegasus_read_service ::dsn::error_code storage_apply_checkpoint(chkpt_apply_mode mode, const dsn::replication::learn_state &state) override; + int64_t last_flushed_decree() const override; + int64_t last_durable_decree() const override { return _last_durable_decree.load(); } void update_app_envs(const std::map &envs) override; @@ -391,8 +393,8 @@ class pegasus_server_impl : public pegasus_read_service bool check_value_if_nearby(uint64_t base_value, uint64_t check_value) { uint64_t gap = base_value / 4; - uint64_t actual_gap = - (base_value < check_value) ? check_value - base_value : base_value - check_value; + uint64_t actual_gap = (base_value < check_value) ? check_value - base_value + : base_value - check_value; return actual_gap <= gap; } diff --git a/src/server/pegasus_server_impl_init.cpp b/src/server/pegasus_server_impl_init.cpp index d7611e2660..6e54c25a89 100644 --- a/src/server/pegasus_server_impl_init.cpp +++ b/src/server/pegasus_server_impl_init.cpp @@ -17,7 +17,6 @@ * under the License. */ -#include #include #include #include @@ -27,12 +26,12 @@ #include #include #include -#include #include #include #include #include #include +#include #include #include #include @@ -45,8 +44,8 @@ #include "pegasus_server_impl.h" #include "pegasus_value_schema.h" #include "replica_admin_types.h" +#include "rpc/rpc_host_port.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_host_port.h" #include "server/capacity_unit_calculator.h" // IWYU pragma: keep #include "server/key_ttl_compaction_filter.h" #include "server/pegasus_read_service.h" diff --git a/src/server/pegasus_server_write.cpp b/src/server/pegasus_server_write.cpp index 1bb6c968ad..77f679ee71 100644 --- a/src/server/pegasus_server_write.cpp +++ b/src/server/pegasus_server_write.cpp @@ -17,11 +17,11 @@ * under the License. */ -#include #include #include #include #include +#include #include #include "base/pegasus_key_schema.h" @@ -32,9 +32,9 @@ #include "pegasus_server_impl.h" #include "pegasus_server_write.h" #include "pegasus_utils.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_message.h" #include "rrdb/rrdb.code.definition.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_message.h" #include "server/pegasus_write_service.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" diff --git a/src/server/pegasus_server_write.h b/src/server/pegasus_server_write.h index 2329ec586a..add75b392d 100644 --- a/src/server/pegasus_server_write.h +++ b/src/server/pegasus_server_write.h @@ -29,7 +29,7 @@ #include "pegasus_write_service.h" #include "replica/replica_base.h" #include "rrdb/rrdb_types.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "utils/metrics.h" namespace dsn { diff --git a/src/server/pegasus_write_service.cpp b/src/server/pegasus_write_service.cpp index d688ab6e45..af2e07302a 100644 --- a/src/server/pegasus_write_service.cpp +++ b/src/server/pegasus_write_service.cpp @@ -17,7 +17,7 @@ * under the License. */ -#include +#include #include #include #include @@ -38,8 +38,8 @@ #include "rrdb/rrdb_types.h" #include "runtime/api_layer1.h" #include "runtime/message_utils.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task_code.h" +#include "task/async_calls.h" +#include "task/task_code.h" #include "server/pegasus_server_impl.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" diff --git a/src/server/pegasus_write_service_impl.h b/src/server/pegasus_write_service_impl.h index 6eec4b7601..35a9c6399d 100644 --- a/src/server/pegasus_write_service_impl.h +++ b/src/server/pegasus_write_service_impl.h @@ -90,7 +90,7 @@ class pegasus_write_service::impl : public dsn::replication::replica_base int empty_put(int64_t decree) { int err = - _rocksdb_wrapper->write_batch_put(decree, absl::string_view(), absl::string_view(), 0); + _rocksdb_wrapper->write_batch_put(decree, std::string_view(), std::string_view(), 0); auto cleanup = dsn::defer([this]() { _rocksdb_wrapper->clear_up_write_batch(); }); if (err != rocksdb::Status::kOk) { return err; @@ -178,7 +178,7 @@ class pegasus_write_service::impl : public dsn::replication::replica_base resp.decree = decree; resp.server = _primary_host_port; - absl::string_view raw_key = update.key.to_string_view(); + std::string_view raw_key = update.key.to_string_view(); int64_t new_value = 0; uint32_t new_expire_ts = 0; db_get_context get_ctx; @@ -275,7 +275,7 @@ class pegasus_write_service::impl : public dsn::replication::replica_base pegasus_generate_key(check_key, update.hash_key, update.check_sort_key); db_get_context get_context; - absl::string_view check_raw_key = check_key.to_string_view(); + std::string_view check_raw_key = check_key.to_string_view(); int err = _rocksdb_wrapper->get(check_raw_key, &get_context); if (err != rocksdb::Status::kOk) { // read check value failed @@ -327,7 +327,7 @@ class pegasus_write_service::impl : public dsn::replication::replica_base } else { // check not passed, write empty record to update rocksdb's last flushed decree resp.error = _rocksdb_wrapper->write_batch_put( - decree, absl::string_view(), absl::string_view(), 0); + decree, std::string_view(), std::string_view(), 0); } auto cleanup = dsn::defer([this]() { _rocksdb_wrapper->clear_up_write_batch(); }); @@ -342,8 +342,8 @@ class pegasus_write_service::impl : public dsn::replication::replica_base if (!passed) { // check not passed, return proper error code to user - resp.error = - invalid_argument ? rocksdb::Status::kInvalidArgument : rocksdb::Status::kTryAgain; + resp.error = invalid_argument ? rocksdb::Status::kInvalidArgument + : rocksdb::Status::kTryAgain; } return rocksdb::Status::kOk; @@ -395,7 +395,7 @@ class pegasus_write_service::impl : public dsn::replication::replica_base pegasus_generate_key(check_key, update.hash_key, update.check_sort_key); db_get_context get_context; - absl::string_view check_raw_key = check_key.to_string_view(); + std::string_view check_raw_key = check_key.to_string_view(); int err = _rocksdb_wrapper->get(check_raw_key, &get_context); if (err != rocksdb::Status::kOk) { // read check value failed @@ -453,7 +453,7 @@ class pegasus_write_service::impl : public dsn::replication::replica_base } else { // check not passed, write empty record to update rocksdb's last flushed decree resp.error = _rocksdb_wrapper->write_batch_put( - decree, absl::string_view(), absl::string_view(), 0); + decree, std::string_view(), std::string_view(), 0); } auto cleanup = dsn::defer([this]() { _rocksdb_wrapper->clear_up_write_batch(); }); @@ -468,8 +468,8 @@ class pegasus_write_service::impl : public dsn::replication::replica_base if (!passed) { // check not passed, return proper error code to user - resp.error = - invalid_argument ? rocksdb::Status::kInvalidArgument : rocksdb::Status::kTryAgain; + resp.error = invalid_argument ? rocksdb::Status::kInvalidArgument + : rocksdb::Status::kTryAgain; } return rocksdb::Status::kOk; } @@ -562,7 +562,7 @@ class pegasus_write_service::impl : public dsn::replication::replica_base _rocksdb_wrapper->clear_up_write_batch(); } - static dsn::blob composite_raw_key(absl::string_view hash_key, absl::string_view sort_key) + static dsn::blob composite_raw_key(std::string_view hash_key, std::string_view sort_key) { dsn::blob raw_key; pegasus_generate_key(raw_key, hash_key, sort_key); @@ -609,7 +609,7 @@ class pegasus_write_service::impl : public dsn::replication::replica_base return false; if (check_type == ::dsn::apps::cas_check_type::CT_VALUE_MATCH_ANYWHERE) { return value.to_string_view().find(check_operand.to_string_view()) != - absl::string_view::npos; + std::string_view::npos; } else if (check_type == ::dsn::apps::cas_check_type::CT_VALUE_MATCH_PREFIX) { return dsn::utils::mequals( value.data(), check_operand.data(), check_operand.length()); diff --git a/src/server/result_writer.cpp b/src/server/result_writer.cpp index 5629106b90..4d3a9160e0 100644 --- a/src/server/result_writer.cpp +++ b/src/server/result_writer.cpp @@ -24,8 +24,8 @@ #include #include "pegasus/client.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task_code.h" +#include "task/async_calls.h" +#include "task/task_code.h" #include "utils/flags.h" #include "utils/fmt_logging.h" #include "utils/threadpool_code.h" diff --git a/src/server/result_writer.h b/src/server/result_writer.h index d9f92e4da5..fc204aeb3b 100644 --- a/src/server/result_writer.h +++ b/src/server/result_writer.h @@ -21,7 +21,7 @@ #include -#include "runtime/task/task_tracker.h" +#include "task/task_tracker.h" namespace pegasus { class pegasus_client; diff --git a/src/server/rocksdb_wrapper.cpp b/src/server/rocksdb_wrapper.cpp index 59203836be..078b2b9d8a 100644 --- a/src/server/rocksdb_wrapper.cpp +++ b/src/server/rocksdb_wrapper.cpp @@ -19,7 +19,7 @@ #include "rocksdb_wrapper.h" -#include +#include #include #include #include @@ -74,9 +74,9 @@ rocksdb_wrapper::rocksdb_wrapper(pegasus_server_impl *server) _wt_opts->disableWAL = true; } -int rocksdb_wrapper::get(absl::string_view raw_key, /*out*/ db_get_context *ctx) +int rocksdb_wrapper::get(std::string_view raw_key, /*out*/ db_get_context *ctx) { - FAIL_POINT_INJECT_F("db_get", [](absl::string_view) -> int { return FAIL_DB_GET; }); + FAIL_POINT_INJECT_F("db_get", [](std::string_view) -> int { return FAIL_DB_GET; }); rocksdb::Status s = _db->Get(_rd_opts, utils::to_rocksdb_slice(raw_key), &(ctx->raw_value)); if (dsn_likely(s.ok())) { @@ -105,20 +105,20 @@ int rocksdb_wrapper::get(absl::string_view raw_key, /*out*/ db_get_context *ctx) } int rocksdb_wrapper::write_batch_put(int64_t decree, - absl::string_view raw_key, - absl::string_view value, + std::string_view raw_key, + std::string_view value, uint32_t expire_sec) { return write_batch_put_ctx(db_write_context::empty(decree), raw_key, value, expire_sec); } int rocksdb_wrapper::write_batch_put_ctx(const db_write_context &ctx, - absl::string_view raw_key, - absl::string_view value, + std::string_view raw_key, + std::string_view value, uint32_t expire_sec) { FAIL_POINT_INJECT_F("db_write_batch_put", - [](absl::string_view) -> int { return FAIL_DB_WRITE_BATCH_PUT; }); + [](std::string_view) -> int { return FAIL_DB_WRITE_BATCH_PUT; }); uint64_t new_timetag = ctx.remote_timetag; if (!ctx.is_duplicated_write()) { // local write @@ -143,7 +143,7 @@ int rocksdb_wrapper::write_batch_put_ctx(const db_write_context &ctx, if (local_timetag >= new_timetag) { // ignore this stale update with lower timetag, // and write an empty record instead - raw_key = value = absl::string_view(); + raw_key = value = std::string_view(); } } } @@ -175,7 +175,7 @@ int rocksdb_wrapper::write(int64_t decree) return FLAGS_inject_write_error_for_test; } - FAIL_POINT_INJECT_F("db_write", [](absl::string_view) -> int { return FAIL_DB_WRITE; }); + FAIL_POINT_INJECT_F("db_write", [](std::string_view) -> int { return FAIL_DB_WRITE; }); rocksdb::Status status = _write_batch->Put(_meta_cf, meta_store::LAST_FLUSHED_DECREE, std::to_string(decree)); @@ -194,10 +194,10 @@ int rocksdb_wrapper::write(int64_t decree) return status.code(); } -int rocksdb_wrapper::write_batch_delete(int64_t decree, absl::string_view raw_key) +int rocksdb_wrapper::write_batch_delete(int64_t decree, std::string_view raw_key) { FAIL_POINT_INJECT_F("db_write_batch_delete", - [](absl::string_view) -> int { return FAIL_DB_WRITE_BATCH_DELETE; }); + [](std::string_view) -> int { return FAIL_DB_WRITE_BATCH_DELETE; }); rocksdb::Status s = _write_batch->Delete(utils::to_rocksdb_slice(raw_key)); if (dsn_unlikely(!s.ok())) { diff --git a/src/server/rocksdb_wrapper.h b/src/server/rocksdb_wrapper.h index 00a682a540..c73f5cb918 100644 --- a/src/server/rocksdb_wrapper.h +++ b/src/server/rocksdb_wrapper.h @@ -29,7 +29,7 @@ #include "pegasus_value_schema.h" #include "replica/replica_base.h" -#include "absl/strings/string_view.h" +#include #include "utils/metrics.h" namespace rocksdb { @@ -54,18 +54,18 @@ class rocksdb_wrapper : public dsn::replication::replica_base /// is returned. /// \result ctx.expired=true if record expired. Still rocksdb::Status::kOk is returned. /// \result ctx.found=false if record is not found. Still rocksdb::Status::kOk is returned. - int get(absl::string_view raw_key, /*out*/ db_get_context *ctx); + int get(std::string_view raw_key, /*out*/ db_get_context *ctx); int write_batch_put(int64_t decree, - absl::string_view raw_key, - absl::string_view value, + std::string_view raw_key, + std::string_view value, uint32_t expire_sec); int write_batch_put_ctx(const db_write_context &ctx, - absl::string_view raw_key, - absl::string_view value, + std::string_view raw_key, + std::string_view value, uint32_t expire_sec); int write(int64_t decree); - int write_batch_delete(int64_t decree, absl::string_view raw_key); + int write_batch_delete(int64_t decree, std::string_view raw_key); void clear_up_write_batch(); int ingest_files(int64_t decree, const std::vector &sst_file_list, diff --git a/src/server/test/capacity_unit_calculator_test.cpp b/src/server/test/capacity_unit_calculator_test.cpp index b093cb0586..87309e6756 100644 --- a/src/server/test/capacity_unit_calculator_test.cpp +++ b/src/server/test/capacity_unit_calculator_test.cpp @@ -31,7 +31,7 @@ #include "pegasus_server_test_base.h" #include "replica_admin_types.h" #include "rrdb/rrdb_types.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/rpc_message.h" #include "server/capacity_unit_calculator.h" #include "server/hotkey_collector.h" #include "utils/autoref_ptr.h" diff --git a/src/server/test/hotkey_collector_test.cpp b/src/server/test/hotkey_collector_test.cpp index c8eb49cadb..7219c5ca04 100644 --- a/src/server/test/hotkey_collector_test.cpp +++ b/src/server/test/hotkey_collector_test.cpp @@ -17,9 +17,9 @@ #include "server/hotkey_collector.h" -#include #include #include +#include #include #include "base/pegasus_key_schema.h" @@ -27,14 +27,14 @@ #include "common/replication.codes.h" #include "gtest/gtest.h" #include "pegasus_server_test_base.h" +#include "rpc/rpc_holder.h" #include "rrdb/rrdb.code.definition.h" #include "rrdb/rrdb_types.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task_tracker.h" #include "server/hotkey_collector_state.h" #include "server/pegasus_read_service.h" #include "server/test/message_utils.h" +#include "task/async_calls.h" +#include "task/task_tracker.h" #include "utils/error_code.h" #include "utils/flags.h" #include "utils/fmt_logging.h" @@ -70,7 +70,7 @@ TEST(hotkey_collector_public_func_test, get_bucket_id_test) { int bucket_id = -1; for (int i = 0; i < 1000000; i++) { - bucket_id = get_bucket_id(absl::string_view(generate_hash_key_by_random(false)), + bucket_id = get_bucket_id(std::string_view(generate_hash_key_by_random(false)), FLAGS_hotkey_buckets_num); ASSERT_GE(bucket_id, 0); ASSERT_LT(bucket_id, FLAGS_hotkey_buckets_num); diff --git a/src/server/test/pegasus_mutation_duplicator_test.cpp b/src/server/test/pegasus_mutation_duplicator_test.cpp index aaf91a0657..1bb4be6246 100644 --- a/src/server/test/pegasus_mutation_duplicator_test.cpp +++ b/src/server/test/pegasus_mutation_duplicator_test.cpp @@ -19,12 +19,11 @@ #include "server/pegasus_mutation_duplicator.h" -#include #include #include #include -#include #include +#include #include #include #include @@ -39,11 +38,11 @@ #include "pegasus_key_schema.h" #include "pegasus_server_test_base.h" #include "replica/replica_base.h" +#include "rpc/rpc_holder.h" +#include "rpc/rpc_message.h" #include "rrdb/rrdb.code.definition.h" #include "rrdb/rrdb_types.h" #include "runtime/message_utils.h" -#include "runtime/rpc/rpc_holder.h" -#include "runtime/rpc/rpc_message.h" #include "utils/blob.h" #include "utils/error_code.h" @@ -116,12 +115,13 @@ class pegasus_mutation_duplicator_test : public pegasus_server_test_base total_shipped_size += rpc.dsn_request()->body_size() + rpc.dsn_request()->header->hdr_length; - duplicator_impl->on_duplicate_reply(get_hash(rpc), - [total_shipped_size](size_t final_size) { - ASSERT_EQ(total_shipped_size, final_size); - }, - rpc, - dsn::ERR_OK); + duplicator_impl->on_duplicate_reply( + get_hash(rpc), + [total_shipped_size](size_t final_size) { + ASSERT_EQ(total_shipped_size, final_size); + }, + rpc, + dsn::ERR_OK); // schedule next round _tracker.wait_outstanding_tasks(); @@ -190,7 +190,8 @@ class pegasus_mutation_duplicator_test : public pegasus_server_test_base // with other error rpc.response().error = PERR_INVALID_ARGUMENT; - duplicator_impl->on_duplicate_reply(get_hash(rpc), [](size_t) {}, rpc, dsn::ERR_OK); + duplicator_impl->on_duplicate_reply( + get_hash(rpc), [](size_t) {}, rpc, dsn::ERR_OK); _tracker.wait_outstanding_tasks(); ASSERT_EQ(duplicator_impl->_inflights.size(), 1); ASSERT_EQ(duplicate_rpc::mail_box().size(), 1); @@ -260,7 +261,8 @@ class pegasus_mutation_duplicator_test : public pegasus_server_test_base auto rpc_list = std::move(duplicate_rpc::mail_box()); for (const auto &rpc : rpc_list) { rpc.response().error = dsn::ERR_OK; - duplicator_impl->on_duplicate_reply(get_hash(rpc), [](size_t) {}, rpc, dsn::ERR_OK); + duplicator_impl->on_duplicate_reply( + get_hash(rpc), [](size_t) {}, rpc, dsn::ERR_OK); } _tracker.wait_outstanding_tasks(); ASSERT_EQ(duplicate_rpc::mail_box().size(), 0); diff --git a/src/server/test/pegasus_server_impl_test.cpp b/src/server/test/pegasus_server_impl_test.cpp index 1c57922c48..f2a4915750 100644 --- a/src/server/test/pegasus_server_impl_test.cpp +++ b/src/server/test/pegasus_server_impl_test.cpp @@ -104,7 +104,8 @@ class pegasus_server_impl_test : public pegasus_server_test_base std::string env_value; std::string expect_value; } tests[] = { - {"rocksdb.num_levels", "5", "5"}, {"rocksdb.write_buffer_size", "33554432", "33554432"}, + {"rocksdb.num_levels", "5", "5"}, + {"rocksdb.write_buffer_size", "33554432", "33554432"}, }; std::map all_test_envs; diff --git a/src/server/test/pegasus_server_write_test.cpp b/src/server/test/pegasus_server_write_test.cpp index 556cd9d7fe..72d21eb6a2 100644 --- a/src/server/test/pegasus_server_write_test.cpp +++ b/src/server/test/pegasus_server_write_test.cpp @@ -31,7 +31,7 @@ #include "pegasus_rpc_types.h" #include "pegasus_server_test_base.h" #include "rrdb/rrdb_types.h" -#include "runtime/rpc/rpc_holder.h" +#include "rpc/rpc_holder.h" #include "server/pegasus_server_write.h" #include "server/pegasus_write_service.h" #include "server/pegasus_write_service_impl.h" diff --git a/src/server/test/pegasus_write_service_impl_test.cpp b/src/server/test/pegasus_write_service_impl_test.cpp index a076699ef1..0ac33faa40 100644 --- a/src/server/test/pegasus_write_service_impl_test.cpp +++ b/src/server/test/pegasus_write_service_impl_test.cpp @@ -34,7 +34,7 @@ #include "server/rocksdb_wrapper.h" #include "utils/blob.h" #include "utils/fail_point.h" -#include "absl/strings/string_view.h" +#include namespace pegasus { namespace server { @@ -55,7 +55,7 @@ class pegasus_write_service_impl_test : public pegasus_server_test_base _rocksdb_wrapper = _write_impl->_rocksdb_wrapper.get(); } - int db_get(absl::string_view raw_key, db_get_context *get_ctx) + int db_get(std::string_view raw_key, db_get_context *get_ctx) { return _rocksdb_wrapper->get(raw_key, get_ctx); } @@ -79,7 +79,7 @@ class incr_test : public pegasus_write_service_impl_test { pegasus_write_service_impl_test::SetUp(); pegasus::pegasus_generate_key( - req.key, absl::string_view("hash_key"), absl::string_view("sort_key")); + req.key, std::string_view("hash_key"), std::string_view("sort_key")); } dsn::apps::incr_request req; diff --git a/src/server/test/pegasus_write_service_test.cpp b/src/server/test/pegasus_write_service_test.cpp index 6e448a4ade..e5c5b43828 100644 --- a/src/server/test/pegasus_write_service_test.cpp +++ b/src/server/test/pegasus_write_service_test.cpp @@ -32,15 +32,15 @@ #include "gtest/gtest.h" #include "message_utils.h" #include "pegasus_server_test_base.h" +#include "rpc/rpc_message.h" #include "rrdb/rrdb.code.definition.h" #include "rrdb/rrdb_types.h" #include "runtime/message_utils.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task_code.h" #include "server/pegasus_server_write.h" #include "server/pegasus_write_service.h" #include "server/pegasus_write_service_impl.h" #include "server/rocksdb_wrapper.h" +#include "task/task_code.h" #include "utils/blob.h" #include "utils/fail_point.h" diff --git a/src/server/test/rocksdb_wrapper_test.cpp b/src/server/test/rocksdb_wrapper_test.cpp index e7f4b0006c..7831afc713 100644 --- a/src/server/test/rocksdb_wrapper_test.cpp +++ b/src/server/test/rocksdb_wrapper_test.cpp @@ -39,7 +39,7 @@ #include "utils/blob.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" -#include "absl/strings/string_view.h" +#include namespace pegasus { namespace server { @@ -60,12 +60,12 @@ class rocksdb_wrapper_test : public pegasus_server_test_base _rocksdb_wrapper = _server_write->_write_svc->_impl->_rocksdb_wrapper.get(); pegasus::pegasus_generate_key( - _raw_key, absl::string_view("hash_key"), absl::string_view("sort_key")); + _raw_key, std::string_view("hash_key"), std::string_view("sort_key")); } void single_set(db_write_context write_ctx, dsn::blob raw_key, - absl::string_view user_value, + std::string_view user_value, int32_t expire_ts_seconds) { ASSERT_EQ(_rocksdb_wrapper->write_batch_put_ctx( @@ -93,7 +93,7 @@ class rocksdb_wrapper_test : public pegasus_server_test_base SetUp(); } - uint64_t read_timestamp_from(absl::string_view raw_value) + uint64_t read_timestamp_from(std::string_view raw_value) { uint64_t local_timetag = pegasus_extract_timetag(_rocksdb_wrapper->_pegasus_data_version, raw_value); diff --git a/src/shell/command_helper.h b/src/shell/command_helper.h index e4e16f6218..db8e3c6f3b 100644 --- a/src/shell/command_helper.h +++ b/src/shell/command_helper.h @@ -29,6 +29,7 @@ #include #include +#include #include #include #include @@ -52,10 +53,10 @@ #include "http/http_client.h" #include "perf_counter/perf_counter_utils.h" #include "remote_cmd/remote_command.h" -#include "runtime/task/async_calls.h" +#include "task/async_calls.h" #include "tools/mutation_log_tool.h" #include "utils/fmt_utils.h" -#include "absl/strings/string_view.h" +#include #include "utils/errors.h" #include "utils/metrics.h" #include "utils/ports.h" @@ -64,6 +65,30 @@ #include "utils/synchronize.h" #include "utils/time_utils.h" +#define SHELL_PRINTLN_ERROR(msg, ...) \ + fmt::print(stderr, \ + fmt::emphasis::bold | fmt::fg(fmt::color::red), \ + "ERROR: {}\n", \ + fmt::format(msg, ##__VA_ARGS__)) + +#define SHELL_PRINT_WARNING_BASE(msg, ...) \ + fmt::print(stdout, \ + fmt::emphasis::bold | fmt::fg(fmt::color::yellow), \ + "WARNING: {}", \ + fmt::format(msg, ##__VA_ARGS__)) + +#define SHELL_PRINT_WARNING(msg, ...) SHELL_PRINT_WARNING_BASE(msg, ##__VA_ARGS__) + +#define SHELL_PRINTLN_WARNING(msg, ...) \ + SHELL_PRINT_WARNING_BASE("{}\n", fmt::format(msg, ##__VA_ARGS__)) + +#define SHELL_PRINT_OK_BASE(msg, ...) \ + fmt::print(stdout, fmt::emphasis::bold | fmt::fg(fmt::color::green), msg, ##__VA_ARGS__) + +#define SHELL_PRINT_OK(msg, ...) SHELL_PRINT_OK_BASE(msg, ##__VA_ARGS__) + +#define SHELL_PRINTLN_OK(msg, ...) SHELL_PRINT_OK_BASE("{}\n", fmt::format(msg, ##__VA_ARGS__)) + using namespace dsn::replication; DEFINE_TASK_CODE(LPC_SCAN_DATA, TASK_PRIORITY_COMMON, ::dsn::THREAD_POOL_DEFAULT) @@ -261,7 +286,7 @@ inline bool validate_filter(pegasus::pegasus_client::filter_type filter_type, if (value.length() < filter_pattern.length()) return false; if (filter_type == pegasus::pegasus_client::FT_MATCH_ANYWHERE) { - return absl::string_view(value).find(filter_pattern) != absl::string_view::npos; + return std::string_view(value).find(filter_pattern) != std::string_view::npos; } else if (filter_type == pegasus::pegasus_client::FT_MATCH_PREFIX) { return dsn::utils::mequals( value.data(), filter_pattern.data(), filter_pattern.length()); @@ -350,7 +375,8 @@ inline void scan_multi_data_next(scan_data_context *context) context->sema.wait(); auto callback = [context]( - int err, pegasus::pegasus_client::internal_info &&info) { + int err, + pegasus::pegasus_client::internal_info &&info) { if (err != pegasus::PERR_OK) { if (!context->split_completed.exchange(true)) { fprintf(stderr, @@ -429,28 +455,29 @@ inline void scan_data_next(scan_data_context *context) if (ts_expired) { scan_data_next(context); } else if (context->no_overwrite) { - auto callback = [context]( - int err, - pegasus::pegasus_client::check_and_set_results &&results, - pegasus::pegasus_client::internal_info &&info) { - if (err != pegasus::PERR_OK) { - if (!context->split_completed.exchange(true)) { - fprintf(stderr, + auto callback = + [context](int err, + pegasus::pegasus_client::check_and_set_results &&results, + pegasus::pegasus_client::internal_info &&info) { + if (err != pegasus::PERR_OK) { + if (!context->split_completed.exchange(true)) { + fprintf( + stderr, "ERROR: split[%d] async check and set failed: %s\n", context->split_id, context->client->get_error_string(err)); - context->error_occurred->store(true); - } - } else { - if (results.set_succeed) { - context->split_rows++; + context->error_occurred->store(true); + } + } else { + if (results.set_succeed) { + context->split_rows++; + } + scan_data_next(context); } - scan_data_next(context); - } - // should put "split_request_count--" at end of the scope, - // to prevent that split_request_count becomes 0 in the middle. - context->split_request_count--; - }; + // should put "split_request_count--" at end of the scope, + // to prevent that split_request_count becomes 0 in the middle. + context->split_request_count--; + }; pegasus::pegasus_client::check_and_set_options options; options.set_value_ttl_seconds = ttl_seconds; context->client->async_check_and_set( @@ -682,30 +709,67 @@ inline std::vector get_metrics(const std::vector &n return results; } +// Adapt the result returned by `get_metrics` into the structure that could be processed by +// `remote_command`. +template +inline dsn::error_s process_get_metrics_result(const dsn::http_result &result, + const node_desc &node, + const char *what, + Args &&...args) +{ + if (dsn_unlikely(!result.error())) { + return FMT_ERR(result.error().code(), + "ERROR: query {} metrics from node {} failed, msg={}", + fmt::format(what, std::forward(args)...), + node.hp, + result.error()); + } + + if (dsn_unlikely(result.status() != dsn::http_status_code::kOk)) { + return FMT_ERR(dsn::ERR_HTTP_ERROR, + "ERROR: query {} metrics from node {} failed, http_status={}, msg={}", + fmt::format(what, std::forward(args)...), + node.hp, + dsn::get_http_status_message(result.status()), + result.body()); + } + + return dsn::error_s::ok(); +} + #define RETURN_SHELL_IF_GET_METRICS_FAILED(result, node, what, ...) \ do { \ - if (dsn_unlikely(!result.error())) { \ - std::cout << "ERROR: send http request to query " << fmt::format(what, ##__VA_ARGS__) \ - << " metrics from node " << node.hp << " failed: " << result.error() \ - << std::endl; \ - return true; \ - } \ - if (dsn_unlikely(result.status() != dsn::http_status_code::kOk)) { \ - std::cout << "ERROR: send http request to query " << what << " metrics from node " \ - << node.hp << " failed: " << dsn::get_http_status_message(result.status()) \ - << std::endl \ - << result.body() << std::endl; \ + const auto &res = process_get_metrics_result(result, node, what, ##__VA_ARGS__); \ + if (dsn_unlikely(!res)) { \ + fmt::println(res.description()); \ return true; \ } \ } while (0) +// Adapt the result of some parsing operations on the metrics returned by `get_metrics` into the +// structure that could be processed by `remote_command`. +template +inline dsn::error_s process_parse_metrics_result(const dsn::error_s &result, + const node_desc &node, + const char *what, + Args &&...args) +{ + if (dsn_unlikely(!result)) { + return FMT_ERR(result.code(), + "ERROR: {} metrics response from node {} failed, msg={}", + fmt::format(what, std::forward(args)...), + node.hp, + result); + } + + return dsn::error_s::ok(); +} + #define RETURN_SHELL_IF_PARSE_METRICS_FAILED(expr, node, what, ...) \ do { \ - const auto &res = (expr); \ + const auto &res = process_parse_metrics_result(expr, node, what, ##__VA_ARGS__); \ if (dsn_unlikely(!res)) { \ - std::cout << "ERROR: parse " << fmt::format(what, ##__VA_ARGS__) \ - << " metrics response from node " << node.hp << " failed: " << res \ - << std::endl; \ + fmt::println(res.description()); \ return true; \ } \ } while (0) @@ -799,18 +863,26 @@ class aggregate_stats_calcs #define DEF_CALC_CREATOR(name) \ template \ - void create_##name(Args &&... args) \ + void create_##name(Args &&...args) \ { \ _##name = std::make_unique(std::forward(args)...); \ } // Create the aggregations as needed. + DEF_CALC_CREATOR(assignments) DEF_CALC_CREATOR(sums) DEF_CALC_CREATOR(increases) DEF_CALC_CREATOR(rates) #undef DEF_CALC_CREATOR +#define CALC_ASSIGNMENT_STATS(entities) \ + do { \ + if (_assignments) { \ + RETURN_NOT_OK(_assignments->assign(entities)); \ + } \ + } while (0) + #define CALC_ACCUM_STATS(entities) \ do { \ if (_sums) { \ @@ -818,24 +890,38 @@ class aggregate_stats_calcs } \ } while (0) - // Perform the chosen accum aggregations on the fetched metrics. + // Perform the chosen aggregations (both assignment and accum) on the fetched metrics. dsn::error_s aggregate_metrics(const std::string &json_string) { DESERIALIZE_METRIC_QUERY_BRIEF_SNAPSHOT(value, json_string, query_snapshot); + return aggregate_metrics(query_snapshot); + } + + dsn::error_s aggregate_metrics(const dsn::metric_query_brief_value_snapshot &query_snapshot) + { + CALC_ASSIGNMENT_STATS(query_snapshot.entities); CALC_ACCUM_STATS(query_snapshot.entities); return dsn::error_s::ok(); } - // Perform all of the chosen aggregations (both accum and delta) on the fetched metrics. + // Perform the chosen aggregations (assignement, accum, delta and rate) on the fetched metrics. dsn::error_s aggregate_metrics(const std::string &json_string_start, const std::string &json_string_end) { DESERIALIZE_METRIC_QUERY_BRIEF_2_SAMPLES( json_string_start, json_string_end, query_snapshot_start, query_snapshot_end); - // Apply ending sample to the accum aggregations. + return aggregate_metrics(query_snapshot_start, query_snapshot_end); + } + + dsn::error_s + aggregate_metrics(const dsn::metric_query_brief_value_snapshot &query_snapshot_start, + const dsn::metric_query_brief_value_snapshot &query_snapshot_end) + { + // Apply ending sample to the assignment and accum aggregations. + CALC_ASSIGNMENT_STATS(query_snapshot_end.entities); CALC_ACCUM_STATS(query_snapshot_end.entities); const std::array deltas_list = {&_increases, &_rates}; @@ -857,9 +943,12 @@ class aggregate_stats_calcs #undef CALC_ACCUM_STATS +#undef CALC_ASSIGNMENT_STATS + private: DISALLOW_COPY_AND_ASSIGN(aggregate_stats_calcs); + std::unique_ptr _assignments; std::unique_ptr _sums; std::unique_ptr _increases; std::unique_ptr _rates; @@ -904,6 +993,12 @@ class aggregate_stats_calcs } \ } while (false) +#define PARSE_OPT_STRS(container, def_val, ...) \ + do { \ + const auto param = cmd(__VA_ARGS__, (def_val)).str(); \ + ::dsn::utils::split_args(param.c_str(), container, ','); \ + } while (false) + // A helper macro to parse command argument, the result is filled in an uint32_t variable named // 'value'. #define PARSE_UINT(value) \ @@ -1441,7 +1536,7 @@ inline dsn::metric_filters row_data_filters(int32_t table_id) #define BIND_ROW(metric_name, member) \ { \ - #metric_name, &row.member \ +#metric_name, &row.member \ } inline stat_var_map create_sums(row_data &row) @@ -1529,7 +1624,7 @@ inline stat_var_map create_rates(row_data &row) // Given all tables, create all aggregations needed for the table-level stats. All selected // partitions should have their primary replicas on this node. inline std::unique_ptr create_table_aggregate_stats_calcs( - const std::map> &table_partitions, + const std::map> &pcs_by_appid, const dsn::host_port &node, const std::string &entity_type, std::vector &rows) @@ -1541,7 +1636,9 @@ inline std::unique_ptr create_table_aggregate_stats_calcs for (auto &row : rows) { const std::vector>> processors = { - {&sums, create_sums}, {&increases, create_increases}, {&rates, create_rates}, + {&sums, create_sums}, + {&increases, create_increases}, + {&rates, create_rates}, }; for (auto &processor : processors) { // Put both dimensions of table id and metric name into filters for each kind of @@ -1549,18 +1646,18 @@ inline std::unique_ptr create_table_aggregate_stats_calcs processor.first->emplace(row.app_id, processor.second(row)); } - const auto &table = table_partitions.find(row.app_id); - CHECK(table != table_partitions.end(), - "table could not be found in table_partitions: table_id={}", + const auto &iter = pcs_by_appid.find(row.app_id); + CHECK(iter != pcs_by_appid.end(), + "table could not be found in pcs_by_appid: table_id={}", row.app_id); - for (const auto &partition : table->second) { - if (partition.hp_primary != node) { + for (const auto &pc : iter->second) { + if (pc.hp_primary != node) { // Ignore once the replica of the metrics is not the primary of the partition. continue; } - partitions.insert(partition.pid); + partitions.insert(pc.pid); } } @@ -1575,25 +1672,27 @@ inline std::unique_ptr create_table_aggregate_stats_calcs // stats. All selected partitions should have their primary replicas on this node. inline std::unique_ptr create_partition_aggregate_stats_calcs(const int32_t table_id, - const std::vector &partitions, + const std::vector &pcs, const dsn::host_port &node, const std::string &entity_type, std::vector &rows) { - CHECK_EQ(rows.size(), partitions.size()); + CHECK_EQ(rows.size(), pcs.size()); partition_stat_map sums; partition_stat_map increases; partition_stat_map rates; for (size_t i = 0; i < rows.size(); ++i) { - if (partitions[i].hp_primary != node) { + if (pcs[i].hp_primary != node) { // Ignore once the replica of the metrics is not the primary of the partition. continue; } const std::vector>> processors = { - {&sums, create_sums}, {&increases, create_increases}, {&rates, create_rates}, + {&sums, create_sums}, + {&increases, create_increases}, + {&rates, create_rates}, }; for (auto &processor : processors) { // Put all dimensions of table id, partition_id, and metric name into filters for @@ -1754,13 +1853,13 @@ inline bool get_apps_and_nodes(shell_context *sc, inline bool get_app_partitions(shell_context *sc, const std::vector<::dsn::app_info> &apps, - std::map> &app_partitions) + std::map> &pcs_by_appid) { for (const ::dsn::app_info &app : apps) { int32_t app_id = 0; int32_t partition_count = 0; dsn::error_code err = sc->ddl_client->list_app( - app.app_name, app_id, partition_count, app_partitions[app.app_id]); + app.app_name, app_id, partition_count, pcs_by_appid[app.app_id]); if (err != ::dsn::ERR_OK) { LOG_ERROR("list app {} failed, error = {}", app.app_name, err); return false; @@ -1813,8 +1912,8 @@ inline bool get_app_partition_stat(shell_context *sc, } // get app_id --> partitions - std::map> app_partitions; - if (!get_app_partitions(sc, apps, app_partitions)) { + std::map> pcs_by_appid; + if (!get_app_partitions(sc, apps, pcs_by_appid)) { return false; } @@ -1838,8 +1937,8 @@ inline bool get_app_partition_stat(shell_context *sc, if (parse_app_pegasus_perf_counter_name( m.name, app_id_x, partition_index_x, counter_name)) { // only primary partition will be counted - auto find = app_partitions.find(app_id_x); - if (find != app_partitions.end() && + const auto find = pcs_by_appid.find(app_id_x); + if (find != pcs_by_appid.end() && find->second[partition_index_x].hp_primary == nodes[i].hp) { row_data &row = rows[app_id_name[app_id_x]][partition_index_x]; row.row_name = std::to_string(partition_index_x); @@ -1877,8 +1976,8 @@ get_table_stats(shell_context *sc, uint32_t sample_interval_ms, std::vector> table_partitions; - if (!get_app_partitions(sc, apps, table_partitions)) { + std::map> pcs_by_appid; + if (!get_app_partitions(sc, apps, pcs_by_appid)) { return false; } @@ -1892,19 +1991,18 @@ get_table_stats(shell_context *sc, uint32_t sample_interval_ms, std::vectoraggregate_metrics(results_start[i].body(), results_end[i].body()), nodes[i], - "row data requests"); + "aggregate row data requests"); } return true; @@ -1924,13 +2022,13 @@ inline bool get_partition_stats(shell_context *sc, int32_t table_id = 0; int32_t partition_count = 0; - std::vector partitions; - const auto &err = sc->ddl_client->list_app(table_name, table_id, partition_count, partitions); + std::vector pcs; + const auto &err = sc->ddl_client->list_app(table_name, table_id, partition_count, pcs); if (err != ::dsn::ERR_OK) { LOG_ERROR("list app {} failed, error = {}", table_name, err); return false; } - CHECK_EQ(partitions.size(), partition_count); + CHECK_EQ(pcs.size(), partition_count); const auto &query_string = row_data_filters(table_id).to_query_string(); const auto &results_start = get_metrics(nodes, query_string); @@ -1949,12 +2047,12 @@ inline bool get_partition_stats(shell_context *sc, RETURN_SHELL_IF_GET_METRICS_FAILED( results_end[i], nodes[i], "ending row data requests for table(id={})", table_id); - auto calcs = create_partition_aggregate_stats_calcs( - table_id, partitions, nodes[i].hp, "replica", rows); + auto calcs = + create_partition_aggregate_stats_calcs(table_id, pcs, nodes[i].hp, "replica", rows); RETURN_SHELL_IF_PARSE_METRICS_FAILED( calcs->aggregate_metrics(results_start[i].body(), results_end[i].body()), nodes[i], - "row data requests for table(id={})", + "aggregate row data requests for table(id={})", table_id); } @@ -2060,17 +2158,16 @@ inline bool get_storage_size_stat(shell_context *sc, app_storage_size_stat &st_s return false; } - std::map> app_partitions; - if (!get_app_partitions(sc, apps, app_partitions)) { + std::map> pcs_by_appid; + if (!get_app_partitions(sc, apps, pcs_by_appid)) { LOG_ERROR("get app partitions failed"); return false; } - for (auto &kv : app_partitions) { - auto &v = kv.second; - for (auto &c : v) { + for (auto &[_, pcs] : pcs_by_appid) { + for (auto &pc : pcs) { // use partition_flags to record if this partition's storage size is calculated, - // because `app_partitions' is a temporary variable, so we can re-use partition_flags. - c.partition_flags = 0; + // because `pcs_by_appid' is a temporary variable, so we can re-use partition_flags. + pc.partition_flags = 0; } } @@ -2091,10 +2188,10 @@ inline bool get_storage_size_stat(shell_context *sc, app_storage_size_stat &st_s CHECK(parse_ret, "name = {}", m.name); if (counter_name != "disk.storage.sst(MB)") continue; - auto find = app_partitions.find(app_id_x); - if (find == app_partitions.end()) // app id not found + auto find = pcs_by_appid.find(app_id_x); + if (find == pcs_by_appid.end()) // app id not found continue; - dsn::partition_configuration &pc = find->second[partition_index_x]; + auto &pc = find->second[partition_index_x]; if (pc.hp_primary != nodes[i].hp) // not primary replica continue; if (pc.partition_flags != 0) // already calculated diff --git a/src/shell/command_utils.cpp b/src/shell/command_utils.cpp index fb3be3bced..c5311cb985 100644 --- a/src/shell/command_utils.cpp +++ b/src/shell/command_utils.cpp @@ -22,34 +22,36 @@ #include "client/replication_ddl_client.h" #include "command_executor.h" #include "meta_admin_types.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "utils/error_code.h" bool validate_ip(shell_context *sc, - const std::string &ip_str, + const std::string &target_hp_str, dsn::host_port &target_hp, std::string &err_info) { - target_hp = dsn::host_port::from_string(ip_str); + target_hp = dsn::host_port::from_string(target_hp_str); if (!target_hp) { - err_info = fmt::format("invalid ip:port={}, can't transform it into host_port", ip_str); + err_info = + fmt::format("invalid host:port '{}', can't transform it into host_port", target_hp_str); return false; } - std::map nodes; - auto error = sc->ddl_client->list_nodes(dsn::replication::node_status::NS_INVALID, nodes); + std::map ns_by_nodes; + const auto error = + sc->ddl_client->list_nodes(dsn::replication::node_status::NS_INVALID, ns_by_nodes); if (error != dsn::ERR_OK) { - err_info = fmt::format("list nodes failed, error={}", error.to_string()); + err_info = fmt::format("list nodes failed, error={}", error); return false; } - for (const auto &node : nodes) { - if (target_hp == node.first) { + for (const auto &[node, _] : ns_by_nodes) { + if (target_hp == node) { return true; } } - err_info = fmt::format("invalid ip:port={}, can't find it in the cluster", ip_str); + err_info = fmt::format("invalid host:port '{}', can't find it in the cluster", target_hp_str); return false; } diff --git a/src/shell/command_utils.h b/src/shell/command_utils.h index 5e1095d9a1..e076c3d32d 100644 --- a/src/shell/command_utils.h +++ b/src/shell/command_utils.h @@ -66,7 +66,7 @@ inline bool validate_cmd(const argh::parser &cmd, } bool validate_ip(shell_context *sc, - const std::string &ip_str, + const std::string &host_port_str, /*out*/ dsn::host_port &target_hp, /*out*/ std::string &err_info); diff --git a/src/shell/commands.h b/src/shell/commands.h index 24754aa84d..2e6044b064 100644 --- a/src/shell/commands.h +++ b/src/shell/commands.h @@ -29,7 +29,7 @@ #include "utils/filesystem.h" #include "utils/output_utils.h" #include "utils/string_conv.h" -#include "absl/strings/string_view.h" +#include #include "client/replication_ddl_client.h" #include "tools/mutation_log_tool.h" @@ -229,6 +229,7 @@ bool ls_backup_policy(command_executor *e, shell_context *sc, arguments args); bool modify_backup_policy(command_executor *e, shell_context *sc, arguments args); +extern const std::string disable_backup_policy_help; bool disable_backup_policy(command_executor *e, shell_context *sc, arguments args); bool enable_backup_policy(command_executor *e, shell_context *sc, arguments args); diff --git a/src/shell/commands/bulk_load.cpp b/src/shell/commands/bulk_load.cpp index e5b0265e3d..bd155d0b05 100644 --- a/src/shell/commands/bulk_load.cpp +++ b/src/shell/commands/bulk_load.cpp @@ -37,10 +37,10 @@ #include "bulk_load_types.h" #include "client/replication_ddl_client.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/task/task_spec.h" +#include "rpc/rpc_address.h" #include "shell/command_executor.h" #include "shell/commands.h" +#include "task/task_spec.h" #include "utils/error_code.h" #include "utils/errors.h" #include "utils/output_utils.h" @@ -165,8 +165,8 @@ bool control_bulk_load_helper(command_executor *e, err = dsn::error_s::make(err_resp.get_value().err); hint_msg = err_resp.get_value().hint_msg; } - std::string type_str = - type == dsn::replication::bulk_load_control_type::BLC_PAUSE ? "pause" : "restart"; + std::string type_str = type == dsn::replication::bulk_load_control_type::BLC_PAUSE ? "pause" + : "restart"; if (!err.is_ok()) { fmt::print( stderr, "{} bulk load failed, error={} [hint:\"{}\"]\n", type_str, err, hint_msg); diff --git a/src/shell/commands/cold_backup.cpp b/src/shell/commands/cold_backup.cpp index ba159c4cea..0a154d4616 100644 --- a/src/shell/commands/cold_backup.cpp +++ b/src/shell/commands/cold_backup.cpp @@ -20,6 +20,7 @@ // IWYU pragma: no_include #include #include +// IWYU pragma: no_include #include #include #include @@ -27,12 +28,17 @@ #include #include #include +#include #include +#include #include +#include #include #include "client/replication_ddl_client.h" +#include "shell/argh.h" #include "shell/command_executor.h" +#include "shell/command_helper.h" #include "shell/commands.h" #include "shell/sds/sds.h" #include "utils/error_code.h" @@ -143,64 +149,52 @@ bool add_backup_policy(command_executor *e, shell_context *sc, arguments args) bool ls_backup_policy(command_executor *e, shell_context *sc, arguments args) { - ::dsn::error_code err = sc->ddl_client->ls_backup_policy(); + argh::parser cmd(args.argc, args.argv); + const bool json = cmd[{"-j", "--json"}]; + + ::dsn::error_code err = sc->ddl_client->ls_backup_policy(json); if (err != ::dsn::ERR_OK) { std::cout << "ls backup policy failed" << std::endl; - } else { - std::cout << std::endl << "ls backup policy succeed" << std::endl; } return true; } bool query_backup_policy(command_executor *e, shell_context *sc, arguments args) { - static struct option long_options[] = {{"policy_name", required_argument, 0, 'p'}, - {"backup_info_cnt", required_argument, 0, 'b'}, - {0, 0, 0, 0}}; + const std::string query_backup_policy_help = + "<-p|--policy_name> [-b|--backup_info_cnt] [-j|--json]"; + argh::parser cmd(args.argc, args.argv, argh::parser::PREFER_PARAM_FOR_UNREG_OPTION); + RETURN_FALSE_IF_NOT(!cmd.params().empty(), + "invalid command, should be in the form of '{}'", + query_backup_policy_help); + std::vector policy_names; - int backup_info_cnt = 3; + PARSE_OPT_STRS(policy_names, "", {"-p", "--policy_name"}); - optind = 0; - while (true) { - int option_index = 0; - int c; - c = getopt_long(args.argc, args.argv, "p:b:", long_options, &option_index); - if (c == -1) - break; - switch (c) { - case 'p': { - std::vector names; - ::dsn::utils::split_args(optarg, names, ','); - for (const auto &policy_name : names) { - if (policy_name.empty()) { - fprintf(stderr, "invalid, empty policy_name, just ignore\n"); - continue; - } else { - policy_names.emplace_back(policy_name); - } - } - } break; - case 'b': - backup_info_cnt = atoi(optarg); - if (backup_info_cnt <= 0) { - fprintf(stderr, "invalid backup_info_cnt %s\n", optarg); - return false; - } - break; - default: - return false; - } - } if (policy_names.empty()) { - fprintf(stderr, "empty policy_name, please assign policy_name you want to query\n"); + SHELL_PRINTLN_ERROR( + "invalid command, policy_name should be in the form of 'val1,val2,val3' and " + "should not be empty"); return false; } - ::dsn::error_code ret = sc->ddl_client->query_backup_policy(policy_names, backup_info_cnt); + + std::set str_set(policy_names.begin(), policy_names.end()); + if (str_set.size() != policy_names.size()) { + SHELL_PRINTLN_ERROR("invalid command, policy_name has duplicate values"); + return false; + } + + uint32_t backup_info_cnt; + PARSE_OPT_UINT(backup_info_cnt, 3, {"-b", "--backup_info_cnt"}); + + const bool json = cmd[{"-j", "--json"}]; + + ::dsn::error_code ret = + sc->ddl_client->query_backup_policy(policy_names, backup_info_cnt, json); if (ret != ::dsn::ERR_OK) { fprintf(stderr, "query backup policy failed, err = %s\n", ret.to_string()); - } else { - std::cout << std::endl << "query backup policy succeed" << std::endl; } + return true; } @@ -311,37 +305,32 @@ bool modify_backup_policy(command_executor *e, shell_context *sc, arguments args return true; } +const std::string disable_backup_policy_help = "<-p|--policy_name str> [-f|--force]"; bool disable_backup_policy(command_executor *e, shell_context *sc, arguments args) { - static struct option long_options[] = {{"policy_name", required_argument, 0, 'p'}, - {0, 0, 0, 0}}; - - std::string policy_name; - optind = 0; - while (true) { - int option_index = 0; - int c; - c = getopt_long(args.argc, args.argv, "p:", long_options, &option_index); - if (c == -1) - break; - switch (c) { - case 'p': - policy_name = optarg; - break; - default: - return false; - } - } - - if (policy_name.empty()) { - fprintf(stderr, "empty policy name\n"); - return false; - } - - ::dsn::error_code ret = sc->ddl_client->disable_backup_policy(policy_name); - if (ret != dsn::ERR_OK) { - fprintf(stderr, "disable backup policy failed, with err = %s\n", ret.to_string()); - } + const argh::parser cmd(args.argc, args.argv, argh::parser::PREFER_PARAM_FOR_UNREG_OPTION); + // TODO(yingchun): make the following code as a function. + RETURN_FALSE_IF_NOT(cmd.pos_args().size() == 1 && cmd.pos_args()[0] == "disable_backup_policy", + "invalid command, should be in the form of '{}'", + disable_backup_policy_help); + RETURN_FALSE_IF_NOT(cmd.flags().empty() || + (cmd.flags().size() == 1 && + (cmd.flags().count("force") == 1 || cmd.flags().count("f") == 1)), + "invalid command, should be in the form of '{}'", + disable_backup_policy_help); + RETURN_FALSE_IF_NOT(cmd.params().size() == 1 && (cmd.params().begin()->first == "policy_name" || + cmd.params().begin()->first == "p"), + "invalid command, should be in the form of '{}'", + disable_backup_policy_help); + + const std::string policy_name = cmd({"-p", "--policy_name"}).str(); + RETURN_FALSE_IF_NOT(!policy_name.empty(), "invalid command, policy_name should not be empty"); + + const bool force = cmd[{"-f", "--force"}]; + + const auto ret = sc->ddl_client->disable_backup_policy(policy_name, force); + RETURN_FALSE_IF_NOT( + ret == dsn::ERR_OK, "disable backup policy failed, with err = {}", ret.to_string()); return true; } @@ -387,11 +376,13 @@ bool restore(command_executor *e, shell_context *sc, arguments args) {"new_app_name", required_argument, 0, 'n'}, {"timestamp", required_argument, 0, 't'}, {"backup_provider_type", required_argument, 0, 'b'}, + {"restore_path", required_argument, 0, 'r'}, {"skip_bad_partition", no_argument, 0, 's'}, {0, 0, 0, 0}}; std::string old_cluster_name, old_policy_name; std::string old_app_name, new_app_name; std::string backup_provider_type; + std::string restore_path; int32_t old_app_id = 0; int64_t timestamp = 0; bool skip_bad_partition = false; @@ -400,7 +391,7 @@ bool restore(command_executor *e, shell_context *sc, arguments args) while (true) { int option_index = 0; int c; - c = getopt_long(args.argc, args.argv, "c:p:a:i:n:t:b:s", long_options, &option_index); + c = getopt_long(args.argc, args.argv, "c:p:a:i:n:t:b:r:s", long_options, &option_index); if (c == -1) break; switch (c) { @@ -425,6 +416,9 @@ bool restore(command_executor *e, shell_context *sc, arguments args) case 'b': backup_provider_type = optarg; break; + case 'r': + restore_path = optarg; + break; case 's': skip_bad_partition = true; break; @@ -452,7 +446,8 @@ bool restore(command_executor *e, shell_context *sc, arguments args) old_app_name, old_app_id, new_app_name, - skip_bad_partition); + skip_bad_partition, + restore_path); if (err != ::dsn::ERR_OK) { fprintf(stderr, "restore app failed with err(%s)\n", err.to_string()); } diff --git a/src/shell/commands/data_operations.cpp b/src/shell/commands/data_operations.cpp index c8be633a78..3c8c31ca7a 100644 --- a/src/shell/commands/data_operations.cpp +++ b/src/shell/commands/data_operations.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -49,19 +50,18 @@ #include "pegasus/client.h" #include "pegasus_key_schema.h" #include "pegasus_utils.h" +#include "rpc/rpc_host_port.h" #include "rrdb/rrdb_types.h" -#include "runtime/rpc/rpc_host_port.h" -#include "runtime/task/async_calls.h" #include "shell/args.h" #include "shell/command_executor.h" #include "shell/command_helper.h" #include "shell/command_utils.h" #include "shell/commands.h" #include "shell/sds/sds.h" +#include "task/async_calls.h" #include "utils/blob.h" #include "utils/defer.h" #include "utils/error_code.h" -#include "utils/errors.h" #include "utils/flags.h" #include "utils/fmt_logging.h" #include "utils/metrics.h" @@ -2231,16 +2231,16 @@ inline dsn::metric_filters rdb_estimated_keys_filters(int32_t table_id) // All selected partitions should have their primary replicas on this node. std::unique_ptr create_rdb_estimated_keys_stats_calcs(const int32_t table_id, - const std::vector &partitions, + const std::vector &pcs, const dsn::host_port &node, const std::string &entity_type, std::vector &rows) { - CHECK_EQ(rows.size(), partitions.size()); + CHECK_EQ(rows.size(), pcs.size()); partition_stat_map sums; for (size_t i = 0; i < rows.size(); ++i) { - if (partitions[i].hp_primary != node) { + if (pcs[i].hp_primary != node) { // Ignore once the replica of the metrics is not the primary of the partition. continue; } @@ -2268,13 +2268,13 @@ bool get_rdb_estimated_keys_stats(shell_context *sc, int32_t table_id = 0; int32_t partition_count = 0; - std::vector partitions; - const auto &err = sc->ddl_client->list_app(table_name, table_id, partition_count, partitions); + std::vector pcs; + const auto &err = sc->ddl_client->list_app(table_name, table_id, partition_count, pcs); if (err != ::dsn::ERR_OK) { LOG_ERROR("list app {} failed, error = {}", table_name, err); return false; } - CHECK_EQ(partitions.size(), partition_count); + CHECK_EQ(pcs.size(), partition_count); const auto &results = get_metrics(nodes, rdb_estimated_keys_filters(table_id).to_query_string()); @@ -2289,11 +2289,11 @@ bool get_rdb_estimated_keys_stats(shell_context *sc, RETURN_SHELL_IF_GET_METRICS_FAILED( results[i], nodes[i], "rdb_estimated_keys for table(id={})", table_id); - auto calcs = create_rdb_estimated_keys_stats_calcs( - table_id, partitions, nodes[i].hp, "replica", rows); + auto calcs = + create_rdb_estimated_keys_stats_calcs(table_id, pcs, nodes[i].hp, "replica", rows); RETURN_SHELL_IF_PARSE_METRICS_FAILED(calcs->aggregate_metrics(results[i].body()), nodes[i], - "rdb_estimated_keys for table(id={})", + "aggregate rdb_estimated_keys for table(id={})", table_id); } @@ -2870,9 +2870,9 @@ bool calculate_hash_value(command_executor *e, shell_context *sc, arguments args if (!sc->current_app_name.empty()) { int32_t app_id; int32_t partition_count; - std::vector<::dsn::partition_configuration> partitions; + std::vector<::dsn::partition_configuration> pcs; ::dsn::error_code err = - sc->ddl_client->list_app(sc->current_app_name, app_id, partition_count, partitions); + sc->ddl_client->list_app(sc->current_app_name, app_id, partition_count, pcs); if (err != ::dsn::ERR_OK) { std::cout << "list app [" << sc->current_app_name << "] failed, error=" << err << std::endl; @@ -2883,17 +2883,11 @@ bool calculate_hash_value(command_executor *e, shell_context *sc, arguments args tp.add_row_name_and_data("app_id", app_id); tp.add_row_name_and_data("partition_count", partition_count); tp.add_row_name_and_data("partition_index", partition_index); - if (partitions.size() > partition_index) { - ::dsn::partition_configuration &pc = partitions[partition_index]; + if (pcs.size() > partition_index) { + const auto &pc = pcs[partition_index]; tp.add_row_name_and_data("primary", pc.hp_primary.to_string()); - - std::ostringstream oss; - for (int i = 0; i < pc.hp_secondaries.size(); ++i) { - if (i != 0) - oss << ","; - oss << pc.hp_secondaries[i]; - } - tp.add_row_name_and_data("secondaries", oss.str()); + tp.add_row_name_and_data("secondaries", + fmt::format("{}", fmt::join(pc.hp_secondaries, ","))); } } tp.output(std::cout); diff --git a/src/shell/commands/debugger.cpp b/src/shell/commands/debugger.cpp index 183814eb58..d854291ff3 100644 --- a/src/shell/commands/debugger.cpp +++ b/src/shell/commands/debugger.cpp @@ -52,15 +52,15 @@ #include "pegasus_key_schema.h" #include "pegasus_utils.h" #include "pegasus_value_schema.h" +#include "rpc/rpc_message.h" +#include "rpc/serialization.h" #include "rrdb/rrdb.code.definition.h" #include "rrdb/rrdb_types.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/rpc/serialization.h" -#include "runtime/task/task_code.h" #include "shell/args.h" #include "shell/command_executor.h" #include "shell/commands.h" #include "shell/sds/sds.h" +#include "task/task_code.h" #include "tools/mutation_log_tool.h" #include "utils/blob.h" #include "utils/filesystem.h" @@ -143,8 +143,10 @@ bool mlog_dump(command_executor *e, shell_context *sc, arguments args) std::function callback; if (detailed) { - callback = [&os, sc]( - int64_t decree, int64_t timestamp, dsn::message_ex **requests, int count) mutable { + callback = [&os, sc](int64_t decree, + int64_t timestamp, + dsn::message_ex **requests, + int count) mutable { for (int i = 0; i < count; ++i) { dsn::message_ex *request = requests[i]; CHECK_NOTNULL(request, ""); @@ -205,8 +207,8 @@ bool mlog_dump(command_executor *e, shell_context *sc, arguments args) } else if (msg->local_rpc_code == ::dsn::apps::RPC_RRDB_RRDB_CHECK_AND_SET) { dsn::apps::check_and_set_request update; dsn::unmarshall(request, update); - auto set_sort_key = - update.set_diff_sort_key ? update.set_sort_key : update.check_sort_key; + auto set_sort_key = update.set_diff_sort_key ? update.set_sort_key + : update.check_sort_key; std::string check_operand; if (pegasus::cas_is_check_operand_needed(update.check_type)) { check_operand = fmt::format( diff --git a/src/shell/commands/detect_hotkey.cpp b/src/shell/commands/detect_hotkey.cpp index c78f906c67..b93acd1d05 100644 --- a/src/shell/commands/detect_hotkey.cpp +++ b/src/shell/commands/detect_hotkey.cpp @@ -24,7 +24,7 @@ #include "client/replication_ddl_client.h" #include "common/gpid.h" #include "replica_admin_types.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "shell/argh.h" #include "shell/command_executor.h" #include "shell/command_utils.h" @@ -102,8 +102,8 @@ bool detect_hotkey(command_executor *e, shell_context *sc, arguments args) dsn::host_port target_hp; std::string err_info; - std::string ip_str = cmd({"-d", "--address"}).str(); - if (!validate_ip(sc, ip_str, target_hp, err_info)) { + const auto &target_hp_str = cmd({"-d", "--address"}).str(); + if (!validate_ip(sc, target_hp_str, target_hp, err_info)) { fmt::print(stderr, "{}\n", err_info); return false; } @@ -145,7 +145,7 @@ bool detect_hotkey(command_executor *e, shell_context *sc, arguments args) app_id, partition_index, hotkey_type, - ip_str); + target_hp_str); break; case dsn::replication::detect_action::STOP: fmt::print("Hotkey detection is stopped now\n"); diff --git a/src/shell/commands/duplication.cpp b/src/shell/commands/duplication.cpp index 097c1ec733..82237af1a7 100644 --- a/src/shell/commands/duplication.cpp +++ b/src/shell/commands/duplication.cpp @@ -19,7 +19,6 @@ #include #include -#include #include #include #include @@ -51,27 +50,23 @@ bool add_dup(command_executor *e, shell_context *sc, arguments args) // add_dup [-s|--sst] [-a|--remote_app_name str] // [-r|--remote_replica_count num] - argh::parser cmd(args.argc, args.argv); - if (cmd.pos_args().size() > 5) { - fmt::print(stderr, "too many params\n"); - return false; - } + argh::parser cmd(args.argc, args.argv, argh::parser::PREFER_PARAM_FOR_UNREG_OPTION); if (!cmd(1)) { - fmt::print(stderr, "missing param \n"); + SHELL_PRINTLN_ERROR("missing param "); return false; } std::string app_name = cmd(1).str(); if (!cmd(2)) { - fmt::print(stderr, "missing param \n"); + SHELL_PRINTLN_ERROR("missing param "); return false; } std::string remote_cluster_name = cmd(2).str(); + if (remote_cluster_name == sc->current_cluster_name) { - fmt::print(stderr, - "illegal operation: adding duplication to itself [remote: {}]\n", - remote_cluster_name); + SHELL_PRINTLN_ERROR("illegal operation: adding duplication to itself [remote: {}]", + remote_cluster_name); return true; } @@ -87,6 +82,14 @@ bool add_dup(command_executor *e, shell_context *sc, arguments args) uint32_t remote_replica_count = 0; PARSE_OPT_UINT(remote_replica_count, 0, {"-r", "--remote_replica_count"}); + fmt::println("trying to add duplication [app_name: {}, remote_cluster_name: {}, " + "is_duplicating_checkpoint: {}, remote_app_name: {}, remote_replica_count: {}]", + app_name, + remote_cluster_name, + is_duplicating_checkpoint, + remote_app_name, + remote_replica_count); + auto err_resp = sc->ddl_client->add_dup(app_name, remote_cluster_name, is_duplicating_checkpoint, @@ -99,36 +102,63 @@ bool add_dup(command_executor *e, shell_context *sc, arguments args) hint = err_resp.get_value().hint; } - if (!err) { - fmt::print(stderr, - "adding duplication failed [app: {}, remote: {}, checkpoint: {}, error: {}]\n", - app_name, - remote_cluster_name, - is_duplicating_checkpoint, - err); + if (!err && err.code() != dsn::ERR_DUP_EXIST) { + SHELL_PRINTLN_ERROR( + "adding duplication failed [app_name: {}, remote_cluster_name: {}, " + "is_duplicating_checkpoint: {}, remote_app_name: {}, remote_replica_count: {}, " + "error: {}]", + app_name, + remote_cluster_name, + is_duplicating_checkpoint, + remote_app_name, + remote_replica_count, + err); if (!hint.empty()) { - fmt::print(stderr, "detail:\n {}\n", hint); + SHELL_PRINTLN_ERROR("detail:\n {}", hint); } return true; } + if (err.code() == dsn::ERR_DUP_EXIST) { + SHELL_PRINT_WARNING("duplication has been existing"); + } else { + SHELL_PRINT_OK("adding duplication succeed"); + } + const auto &resp = err_resp.get_value(); - fmt::print("adding duplication succeed [app: {}, remote: {}, appid: {}, dupid: " - "{}], checkpoint: {}", - app_name, - remote_cluster_name, - resp.appid, - resp.dupid, - is_duplicating_checkpoint); + SHELL_PRINT_OK(" [app_name: {}, remote_cluster_name: {}, appid: {}, dupid: {}", + app_name, + remote_cluster_name, + resp.appid, + resp.dupid); + + if (err) { + SHELL_PRINT_OK(", is_duplicating_checkpoint: {}", is_duplicating_checkpoint); + } if (resp.__isset.remote_app_name) { - fmt::print(", remote_app_name: {}\n", remote_app_name); - } else { - fmt::print("\nWARNING: meta server does NOT support specifying remote_app_name, " - "remote_app_name might has been specified with {}\n", - app_name); + SHELL_PRINT_OK(", remote_app_name: {}", resp.remote_app_name); + } + + if (resp.__isset.remote_replica_count) { + SHELL_PRINT_OK(", remote_replica_count: {}", resp.remote_replica_count); + } + + SHELL_PRINTLN_OK("]"); + + if (!resp.__isset.remote_app_name) { + SHELL_PRINTLN_WARNING("WARNING: meta server does NOT support specifying remote_app_name, " + "remote_app_name might has been specified with '{}'", + app_name); + } + + if (!resp.__isset.remote_replica_count) { + SHELL_PRINTLN_WARNING( + "WARNING: meta server does NOT support specifying remote_replica_count, " + "remote_replica_count might has been specified with the replica count of '{}'", + app_name); } return true; @@ -139,7 +169,7 @@ bool string2dupid(const std::string &str, dupid_t *dup_id) { bool ok = dsn::buf2int32(str, *dup_id); if (!ok) { - fmt::print(stderr, "parsing {} as positive int failed: {}\n", str); + SHELL_PRINTLN_ERROR("parsing {} as positive int failed", str); return false; } return true; @@ -151,18 +181,18 @@ bool query_dup(command_executor *e, shell_context *sc, arguments args) argh::parser cmd(args.argc, args.argv); if (cmd.pos_args().size() > 2) { - fmt::print(stderr, "too many params\n"); + SHELL_PRINTLN_ERROR("too many params"); return false; } for (const auto &flag : cmd.flags()) { if (flag != "d" && flag != "detail") { - fmt::print(stderr, "unknown flag {}\n", flag); + SHELL_PRINTLN_ERROR("unknown flag {}", flag); return false; } } if (!cmd(1)) { - fmt::print(stderr, "missing param \n"); + SHELL_PRINTLN_ERROR("missing param "); return false; } std::string app_name = cmd(1).str(); @@ -176,20 +206,20 @@ bool query_dup(command_executor *e, shell_context *sc, arguments args) err = dsn::error_s::make(err_resp.get_value().err); } if (!err) { - fmt::print(stderr, "querying duplications of app [{}] failed, error={}\n", app_name, err); + SHELL_PRINTLN_ERROR("querying duplications of app [{}] failed, error={}", app_name, err); return true; } if (detail) { - fmt::print("duplications of app [{}] in detail:\n", app_name); - fmt::print("{}\n\n", duplication_query_response_to_string(err_resp.get_value())); + fmt::println("duplications of app [{}] in detail:", app_name); + fmt::println("{}\n", duplication_query_response_to_string(err_resp.get_value())); return true; } const auto &resp = err_resp.get_value(); - fmt::print("duplications of app [{}] are listed as below:\n", app_name); + fmt::println("duplications of app [{}] are listed as below:", app_name); dsn::utils::table_printer printer; printer.add_title("dup_id"); @@ -225,9 +255,9 @@ void handle_duplication_modify_response( hint = " [duplication not found]"; } if (err.is_ok()) { - fmt::print("{} succeed\n", operation); + SHELL_PRINTLN_OK("{} succeed", operation); } else { - fmt::print(stderr, "{} failed, error={}{}\n", operation, err.description(), hint); + SHELL_PRINTLN_ERROR("{} failed, error={}{}", operation, err.description(), hint); } } @@ -290,19 +320,19 @@ bool set_dup_fail_mode(command_executor *e, shell_context *sc, arguments args) argh::parser cmd(args.argc, args.argv); if (cmd.pos_args().size() > 4) { - fmt::print(stderr, "too many params\n"); + SHELL_PRINTLN_ERROR("too many params"); return false; } std::string app_name = cmd(1).str(); std::string dupid_str = cmd(2).str(); dupid_t dup_id; if (!dsn::buf2int32(dupid_str, dup_id)) { - fmt::print(stderr, "invalid dup_id {}\n", dupid_str); + SHELL_PRINTLN_ERROR("invalid dup_id {}", dupid_str); return false; } std::string fail_mode_str = cmd(3).str(); if (fail_mode_str != "slow" && fail_mode_str != "skip") { - fmt::print(stderr, "fail_mode must be \"slow\" or \"skip\": {}\n", fail_mode_str); + SHELL_PRINTLN_ERROR("fail_mode must be \"slow\" or \"skip\": {}", fail_mode_str); return false; } auto fmode = fail_mode_str == "slow" ? duplication_fail_mode::FAIL_SLOW diff --git a/src/shell/commands/local_partition_split.cpp b/src/shell/commands/local_partition_split.cpp index 6cc4726fa8..f880f42464 100644 --- a/src/shell/commands/local_partition_split.cpp +++ b/src/shell/commands/local_partition_split.cpp @@ -240,11 +240,10 @@ bool split_file(const LocalPartitionSplitContext &lpsc, const auto &svalue = iter->value(); // Skip empty write, see: // https://pegasus.apache.org/zh/2018/03/07/last_flushed_decree.html. - if (skey.empty() && - pegasus::value_schema_manager::instance() - .get_value_schema(pegasus_data_version) - ->extract_user_data(svalue.ToString()) - .empty()) { + if (skey.empty() && pegasus::value_schema_manager::instance() + .get_value_schema(pegasus_data_version) + ->extract_user_data(svalue.ToString()) + .empty()) { continue; } @@ -737,8 +736,7 @@ bool local_partition_split(command_executor *e, shell_context *sc, arguments arg tp.add_column("key_count"); for (const auto &ddsr : ddsrs) { for (const auto &psr : ddsr.psrs) { - for (const auto & [ new_dst_replica_dir, key_count ] : - psr.key_count_by_dst_replica_dirs) { + for (const auto &[new_dst_replica_dir, key_count] : psr.key_count_by_dst_replica_dirs) { tp.add_row(psr.src_replica_dir); tp.append_data(new_dst_replica_dir); tp.append_data(psr.success); diff --git a/src/shell/commands/node_management.cpp b/src/shell/commands/node_management.cpp index abf4c7f750..3b0a823ee4 100644 --- a/src/shell/commands/node_management.cpp +++ b/src/shell/commands/node_management.cpp @@ -17,17 +17,22 @@ * under the License. */ +#include +#include #include +#include +#include #include #include -#include #include // IWYU pragma: no_include #include #include +#include #include #include #include +#include #include #include #include @@ -35,23 +40,25 @@ #include #include "client/replication_ddl_client.h" +#include "common/json_helper.h" #include "common/replication_enums.h" #include "dsn.layer2_types.h" #include "meta_admin_types.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" +#include "shell/argh.h" #include "shell/command_executor.h" #include "shell/command_helper.h" #include "shell/command_utils.h" #include "shell/commands.h" -#include "shell/sds/sds.h" +#include "utils/blob.h" #include "utils/error_code.h" #include "utils/errors.h" #include "utils/flags.h" +#include "utils/fmt_logging.h" #include "utils/math.h" #include "utils/metrics.h" #include "utils/output_utils.h" #include "utils/ports.h" -#include "utils/strings.h" DSN_DEFINE_uint32(shell, nodes_sample_interval_ms, 1000, "The interval between sampling metrics."); DSN_DEFINE_validator(nodes_sample_interval_ms, [](uint32_t value) -> bool { return value > 0; }); @@ -227,19 +234,173 @@ dsn::metric_filters rw_requests_filters() return filters; } +dsn::metric_filters server_stat_filters() +{ + dsn::metric_filters filters; + filters.with_metric_fields = {dsn::kMetricNameField, dsn::kMetricSingleValueField}; + filters.entity_types = {"server"}; + filters.entity_metrics = {"virtual_mem_usage_mb", "resident_mem_usage_mb"}; + return filters; +} + +struct meta_server_stats +{ + meta_server_stats() = default; + + double virt_mem_mb{0.0}; + double res_mem_mb{0.0}; + + DEFINE_JSON_SERIALIZATION(virt_mem_mb, res_mem_mb) +}; + +std::pair +aggregate_meta_server_stats(const node_desc &node, + const dsn::metric_query_brief_value_snapshot &query_snapshot) +{ + aggregate_stats_calcs calcs; + meta_server_stats stats; + calcs.create_assignments( + "server", + stat_var_map({{"virtual_mem_usage_mb", &stats.virt_mem_mb}, + {"resident_mem_usage_mb", &stats.res_mem_mb}})); + + auto command_result = process_parse_metrics_result( + calcs.aggregate_metrics(query_snapshot), node, "aggregate meta server stats"); + if (!command_result) { + // Metrics failed to be aggregated. + return std::make_pair(false, command_result.description()); + } + + return std::make_pair(true, + dsn::json::json_forwarder::encode(stats).to_string()); +} + +struct replica_server_stats +{ + replica_server_stats() = default; + + double virt_mem_mb{0.0}; + double res_mem_mb{0.0}; + + DEFINE_JSON_SERIALIZATION(virt_mem_mb, res_mem_mb) +}; + +std::pair +aggregate_replica_server_stats(const node_desc &node, + const dsn::metric_query_brief_value_snapshot &query_snapshot_start, + const dsn::metric_query_brief_value_snapshot &query_snapshot_end) +{ + aggregate_stats_calcs calcs; + meta_server_stats stats; + calcs.create_assignments( + "server", + stat_var_map({{"virtual_mem_usage_mb", &stats.virt_mem_mb}, + {"resident_mem_usage_mb", &stats.res_mem_mb}})); + + auto command_result = process_parse_metrics_result( + calcs.aggregate_metrics(query_snapshot_start, query_snapshot_end), + node, + "aggregate replica server stats"); + if (!command_result) { + // Metrics failed to be aggregated. + return std::make_pair(false, command_result.description()); + } + + return std::make_pair(true, + dsn::json::json_forwarder::encode(stats).to_string()); +} + +std::vector> get_server_stats(const std::vector &nodes, + uint32_t sample_interval_ms) +{ + // Ask target node (meta or replica server) for the metrics of server stats. + const auto &query_string = server_stat_filters().to_query_string(); + const auto &results_start = get_metrics(nodes, query_string); + std::this_thread::sleep_for(std::chrono::milliseconds(sample_interval_ms)); + const auto &results_end = get_metrics(nodes, query_string); + + std::vector> command_results; + command_results.reserve(nodes.size()); + for (size_t i = 0; i < nodes.size(); ++i) { + +#define SKIP_IF_PROCESS_RESULT_FALSE() \ + if (!command_result) { \ + command_results.emplace_back(command_result, command_result.description()); \ + continue; \ + } + +#define PROCESS_GET_METRICS_RESULT(result, what, ...) \ + { \ + auto command_result = process_get_metrics_result(result, nodes[i], what, ##__VA_ARGS__); \ + SKIP_IF_PROCESS_RESULT_FALSE() \ + } + + // Skip the metrics that failed to be fetched. + PROCESS_GET_METRICS_RESULT(results_start[i], "starting server stats") + PROCESS_GET_METRICS_RESULT(results_end[i], "ending server stats") + +#undef PROCESS_GET_METRICS_RESULT + + dsn::metric_query_brief_value_snapshot query_snapshot_start; + dsn::metric_query_brief_value_snapshot query_snapshot_end; + { + // Skip the metrics that failed to be deserialized. + auto command_result = process_parse_metrics_result( + deserialize_metric_query_2_samples(results_start[i].body(), + results_end[i].body(), + query_snapshot_start, + query_snapshot_end), + nodes[i], + "deserialize server stats"); + SKIP_IF_PROCESS_RESULT_FALSE() + } + +#undef SKIP_IF_PROCESS_RESULT_FALSE + + if (query_snapshot_end.role == "meta") { + command_results.push_back(aggregate_meta_server_stats(nodes[i], query_snapshot_end)); + continue; + } + + if (query_snapshot_end.role == "replica") { + command_results.push_back( + aggregate_replica_server_stats(nodes[i], query_snapshot_start, query_snapshot_end)); + continue; + } + + command_results.emplace_back( + false, fmt::format("role {} is unsupported", query_snapshot_end.role)); + } + + return command_results; +} + +std::vector> call_nodes(shell_context *sc, + const std::vector &nodes, + const std::string &command, + const std::vector &arguments, + uint32_t sample_interval_ms) +{ + if (command == "server_stat") { + return get_server_stats(nodes, sample_interval_ms); + } + + return call_remote_command(sc, nodes, command, arguments); +} + } // anonymous namespace -bool ls_nodes(command_executor *e, shell_context *sc, arguments args) +bool ls_nodes(command_executor *, shell_context *sc, arguments args) { - static struct option long_options[] = {{"detailed", no_argument, 0, 'd'}, - {"resolve_ip", no_argument, 0, 'r'}, - {"resource_usage", no_argument, 0, 'u'}, - {"qps", no_argument, 0, 'q'}, - {"json", no_argument, 0, 'j'}, - {"status", required_argument, 0, 's'}, - {"output", required_argument, 0, 'o'}, - {"sample_interval_ms", required_argument, 0, 't'}, - {0, 0, 0, 0}}; + static struct option long_options[] = {{"detailed", no_argument, nullptr, 'd'}, + {"resolve_ip", no_argument, nullptr, 'r'}, + {"resource_usage", no_argument, nullptr, 'u'}, + {"qps", no_argument, nullptr, 'q'}, + {"json", no_argument, nullptr, 'j'}, + {"status", required_argument, nullptr, 's'}, + {"output", required_argument, nullptr, 'o'}, + {"sample_interval_ms", required_argument, nullptr, 'i'}, + {nullptr, 0, nullptr, 0}}; std::string status; std::string output_file; @@ -254,7 +415,9 @@ bool ls_nodes(command_executor *e, shell_context *sc, arguments args) optind = 0; while (true) { int option_index = 0; - int c = getopt_long(args.argc, args.argv, "druqjs:o:t:", long_options, &option_index); + // TODO(wangdan): getopt_long() is not thread-safe (clang-tidy[concurrency-mt-unsafe]), + // could use https://github.com/p-ranav/argparse instead. + int c = getopt_long(args.argc, args.argv, "druqjs:o:i:", long_options, &option_index); if (c == -1) { // -1 means all command-line options have been parsed. break; @@ -283,7 +446,7 @@ bool ls_nodes(command_executor *e, shell_context *sc, arguments args) case 'o': output_file = optarg; break; - case 't': + case 'i': RETURN_FALSE_IF_SAMPLE_INTERVAL_MS_INVALID(); break; default: @@ -340,22 +503,22 @@ bool ls_nodes(command_executor *e, shell_context *sc, arguments args) for (auto &app : apps) { int32_t app_id; int32_t partition_count; - std::vector partitions; - r = sc->ddl_client->list_app(app.app_name, app_id, partition_count, partitions); + std::vector pcs; + r = sc->ddl_client->list_app(app.app_name, app_id, partition_count, pcs); if (r != dsn::ERR_OK) { std::cout << "list app " << app.app_name << " failed, error=" << r << std::endl; return true; } - for (const dsn::partition_configuration &p : partitions) { - if (p.hp_primary) { - auto find = tmp_map.find(p.hp_primary); + for (const auto &pc : pcs) { + if (pc.hp_primary) { + auto find = tmp_map.find(pc.hp_primary); if (find != tmp_map.end()) { find->second.primary_count++; } } - for (const auto &hp : p.hp_secondaries) { - auto find = tmp_map.find(hp); + for (const auto &secondary : pc.hp_secondaries) { + auto find = tmp_map.find(secondary); if (find != tmp_map.end()) { find->second.secondary_count++; } @@ -383,7 +546,7 @@ bool ls_nodes(command_executor *e, shell_context *sc, arguments args) auto &stat = tmp_it->second; RETURN_SHELL_IF_PARSE_METRICS_FAILED( - parse_resource_usage(results[i].body(), stat), nodes[i], "resource"); + parse_resource_usage(results[i].body(), stat), nodes[i], "parse resource usage"); } } @@ -425,7 +588,7 @@ bool ls_nodes(command_executor *e, shell_context *sc, arguments args) RETURN_SHELL_IF_PARSE_METRICS_FAILED( calcs.aggregate_metrics(results_start[i].body(), results_end[i].body()), nodes[i], - "rw requests"); + "aggregate rw requests"); } } @@ -447,8 +610,9 @@ bool ls_nodes(command_executor *e, shell_context *sc, arguments args) RETURN_SHELL_IF_GET_METRICS_FAILED(results[i], nodes[i], "profiler latency"); auto &stat = tmp_it->second; - RETURN_SHELL_IF_PARSE_METRICS_FAILED( - parse_profiler_latency(results[i].body(), stat), nodes[i], "profiler latency"); + RETURN_SHELL_IF_PARSE_METRICS_FAILED(parse_profiler_latency(results[i].body(), stat), + nodes[i], + "parse profiler latency"); } } @@ -544,145 +708,140 @@ bool ls_nodes(command_executor *e, shell_context *sc, arguments args) bool server_info(command_executor *e, shell_context *sc, arguments args) { - char *argv[args.argc + 1]; - memcpy(argv, args.argv, sizeof(char *) * args.argc); - argv[args.argc] = (char *)"server-info"; - arguments new_args; - new_args.argc = args.argc + 1; - new_args.argv = argv; - return remote_command(e, sc, new_args); + return remote_command(e, sc, args); } bool server_stat(command_executor *e, shell_context *sc, arguments args) { - char *argv[args.argc + 1]; - memcpy(argv, args.argv, sizeof(char *) * args.argc); - argv[args.argc] = (char *)"server-stat"; - arguments new_args; - new_args.argc = args.argc + 1; - new_args.argv = argv; - return remote_command(e, sc, new_args); + return remote_command(e, sc, args); } -bool remote_command(command_executor *e, shell_context *sc, arguments args) +bool flush_log(command_executor *e, shell_context *sc, arguments args) { - static struct option long_options[] = {{"node_type", required_argument, 0, 't'}, - {"node_list", required_argument, 0, 'l'}, - {"resolve_ip", no_argument, 0, 'r'}, - {0, 0, 0, 0}}; + return remote_command(e, sc, args); +} - std::string type; - std::string nodes; - optind = 0; - bool resolve_ip = false; - while (true) { - int option_index = 0; - int c; - c = getopt_long(args.argc, args.argv, "t:l:r", long_options, &option_index); - if (c == -1) - break; - switch (c) { - case 't': - type = optarg; - break; - case 'l': - nodes = optarg; - break; - case 'r': - resolve_ip = true; +bool remote_command(command_executor *e, shell_context *sc, arguments args) +{ + // Command format: [remote_command] [arguments...] + // [-t all|meta-server|replica-server] + // [-r|--resolve_ip] + // [-l host:port,host:port...] + // [-i|--sample_interval_ms num] + argh::parser cmd(args.argc, args.argv, argh::parser::PREFER_PARAM_FOR_UNREG_OPTION); + + std::string command; + std::vector pos_args; + int pos = 0; + do { + // Try to parse the positional args. + const auto &pos_arg = cmd(pos++); + if (!pos_arg) { break; - default: - return false; } - } - if (!type.empty() && !nodes.empty()) { - fprintf(stderr, "can not specify both node_type and node_list\n"); - return false; - } + // Ignore the args that are useless to the command. + static const std::set kIgnoreArgs({"remote_command"}); + if (kIgnoreArgs.count(pos_arg.str()) == 1) { + continue; + } - if (type.empty() && nodes.empty()) { - type = "all"; - } + // Collect the positional args following by the command. + if (!command.empty()) { + pos_args.emplace_back(pos_arg.str()); + continue; + } - if (!type.empty() && type != "all" && type != "meta-server" && type != "replica-server") { - fprintf(stderr, "invalid type, should be: all | meta-server | replica-server\n"); + // Initialize the command. + const std::map kCmdsMapping( + {{"server_info", "server-info"}, {"flush_log", "flush-log"}}); + const auto &it = kCmdsMapping.find(pos_arg.str()); + if (it != kCmdsMapping.end()) { + // Use the mapped command. + command = it->second; + } else { + command = pos_arg.str(); + } + } while (true); + + if (command.empty()) { + SHELL_PRINTLN_ERROR("missing "); return false; } + const auto resolve_ip = cmd[{"-r", "--resolve_ip"}]; + auto node_type = cmd({"-t"}).str(); + std::vector nodes_str; + PARSE_OPT_STRS(nodes_str, "", {"-l"}); - if (optind == args.argc) { - fprintf(stderr, "command not specified\n"); + if (!node_type.empty() && !nodes_str.empty()) { + SHELL_PRINTLN_ERROR("can not specify both node_type and nodes_str"); return false; } - std::string cmd = args.argv[optind]; - std::vector arguments; - for (int i = optind + 1; i < args.argc; i++) { - arguments.push_back(args.argv[i]); + if (node_type.empty() && nodes_str.empty()) { + node_type = "all"; } - std::vector node_list; - if (!type.empty()) { - if (!fill_nodes(sc, type, node_list)) { - fprintf(stderr, "prepare nodes failed, type = %s\n", type.c_str()); - return true; - } - } else { - std::vector tokens; - dsn::utils::split_args(nodes.c_str(), tokens, ','); - if (tokens.empty()) { - fprintf(stderr, "can't parse node from node_list\n"); - return true; - } + static const std::set kValidNodeTypes({"all", "meta-server", "replica-server"}); + if (!node_type.empty() && kValidNodeTypes.count(node_type) == 0) { + SHELL_PRINTLN_ERROR("invalid node_type, should be in [{}]", + fmt::join(kValidNodeTypes, ", ")); + return false; + } - for (std::string &token : tokens) { - const auto node = dsn::host_port::from_string(token); - if (!node) { - fprintf(stderr, "parse %s as a ip:port node failed\n", token.c_str()); - return true; + std::vector nodes; + do { + if (node_type.empty()) { + for (const auto &node_str : nodes_str) { + const auto node = dsn::host_port::from_string(node_str); + if (!node) { + SHELL_PRINTLN_ERROR("parse '{}' as host:port failed", node_str); + return false; + } + nodes.emplace_back("user-specified", node); } - node_list.emplace_back("user-specified", node); + break; } - } - fprintf(stderr, "COMMAND: %s", cmd.c_str()); - for (auto &s : arguments) { - fprintf(stderr, " %s", s.c_str()); - } - fprintf(stderr, "\n\n"); + if (!fill_nodes(sc, node_type, nodes)) { + SHELL_PRINTLN_ERROR("prepare nodes failed, node_type = {}", node_type); + return false; + } + } while (false); - std::vector> results = - call_remote_command(sc, node_list, cmd, arguments); + nlohmann::json info; + info["command"] = fmt::format("{} {}", command, fmt::join(pos_args, " ")); + + uint32_t sample_interval_ms = 0; + PARSE_OPT_UINT( + sample_interval_ms, FLAGS_nodes_sample_interval_ms, {"-i", "--sample_interval_ms"}); + + const auto &results = call_nodes(sc, nodes, command, pos_args, sample_interval_ms); + CHECK_EQ(results.size(), nodes.size()); int succeed = 0; int failed = 0; - // TODO (yingchun) output is hard to read, need do some refactor - for (int i = 0; i < node_list.size(); ++i) { - const auto &node = node_list[i]; - const auto hostname = replication_ddl_client::node_name(node.hp, resolve_ip); - fprintf(stderr, "CALL [%s] [%s] ", node.desc.c_str(), hostname.c_str()); + for (int i = 0; i < nodes.size(); ++i) { + nlohmann::json node_info; + node_info["role"] = nodes[i].desc; + node_info["acked"] = results[i].first; + try { + // Treat the message as a JSON object by default. + node_info["message"] = nlohmann::json::parse(results[i].second); + } catch (nlohmann::json::exception &exp) { + // Treat it as a string if failed to parse as a JSON object. + node_info["message"] = results[i].second; + } if (results[i].first) { - fprintf(stderr, "succeed:\n%s\n", results[i].second.c_str()); succeed++; } else { - fprintf(stderr, "failed:\n%s\n", results[i].second.c_str()); failed++; } + info["details"].emplace(replication_ddl_client::node_name(nodes[i].hp, resolve_ip), + node_info); } - - fprintf(stderr, "\nSucceed count: %d\n", succeed); - fprintf(stderr, "Failed count: %d\n", failed); - + info["succeed_count"] = succeed; + info["failed_count"] = failed; + fmt::println(stdout, "{}", info.dump(2)); return true; } - -bool flush_log(command_executor *e, shell_context *sc, arguments args) -{ - char *argv[args.argc + 1]; - memcpy(argv, args.argv, sizeof(char *) * args.argc); - argv[args.argc] = (char *)"flush-log"; - arguments new_args; - new_args.argc = args.argc + 1; - new_args.argv = argv; - return remote_command(e, sc, new_args); -} diff --git a/src/shell/commands/rebalance.cpp b/src/shell/commands/rebalance.cpp index 42a5ac5e2d..9b6f19d814 100644 --- a/src/shell/commands/rebalance.cpp +++ b/src/shell/commands/rebalance.cpp @@ -32,7 +32,7 @@ #include "common/gpid.h" #include "meta/load_balance_policy.h" #include "meta_admin_types.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "shell/command_executor.h" #include "shell/command_utils.h" #include "shell/commands.h" diff --git a/src/shell/commands/recovery.cpp b/src/shell/commands/recovery.cpp index b7c8ce1063..85789f57fa 100644 --- a/src/shell/commands/recovery.cpp +++ b/src/shell/commands/recovery.cpp @@ -33,7 +33,7 @@ #include "dsn.layer2_types.h" #include "meta/load_balance_policy.h" #include "meta_admin_types.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "shell/command_executor.h" #include "shell/commands.h" #include "utils/error_code.h" @@ -118,7 +118,7 @@ bool recover(command_executor *e, shell_context *sc, arguments args) for (std::string &token : tokens) { const auto node = dsn::host_port::from_string(token); if (!node) { - fprintf(stderr, "parse %s as a ip:port node failed\n", token.c_str()); + fprintf(stderr, "parse %s as a host:port node failed\n", token.c_str()); return true; } node_list.push_back(node); @@ -140,7 +140,7 @@ bool recover(command_executor *e, shell_context *sc, arguments args) const auto node = dsn::host_port::from_string(str); if (!node) { fprintf(stderr, - "parse %s at file %s line %d as ip:port failed\n", + "parse %s at file %s line %d as host:port failed\n", str.c_str(), node_list_file.c_str(), lineno); @@ -165,8 +165,9 @@ bool recover(command_executor *e, shell_context *sc, arguments args) dsn::host_port diagnose_recommend(const ddd_partition_info &pinfo) { - if (pinfo.config.hp_last_drops.size() < 2) + if (pinfo.config.hp_last_drops.size() < 2) { return dsn::host_port(); + } std::vector last_two_nodes(pinfo.config.hp_last_drops.end() - 2, pinfo.config.hp_last_drops.end()); @@ -290,11 +291,13 @@ bool ddd_diagnose(command_executor *e, shell_context *sc, arguments args) << "last_committed(" << pinfo.config.last_committed_decree << ")" << std::endl; out << " ----" << std::endl; dsn::host_port latest_dropped, secondary_latest_dropped; - if (pinfo.config.hp_last_drops.size() > 0) + if (pinfo.config.hp_last_drops.size() > 0) { latest_dropped = pinfo.config.hp_last_drops[pinfo.config.hp_last_drops.size() - 1]; - if (pinfo.config.hp_last_drops.size() > 1) + } + if (pinfo.config.hp_last_drops.size() > 1) { secondary_latest_dropped = pinfo.config.hp_last_drops[pinfo.config.hp_last_drops.size() - 2]; + } int j = 0; for (const ddd_node_info &n : pinfo.dropped) { dsn::host_port hp_node; diff --git a/src/shell/commands/table_management.cpp b/src/shell/commands/table_management.cpp index dd7995480b..07dc658217 100644 --- a/src/shell/commands/table_management.cpp +++ b/src/shell/commands/table_management.cpp @@ -38,7 +38,7 @@ #include "dsn.layer2_types.h" #include "meta_admin_types.h" #include "pegasus_utils.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "shell/command_executor.h" #include "shell/command_helper.h" #include "shell/command_utils.h" @@ -285,15 +285,15 @@ bool app_disk(command_executor *e, shell_context *sc, arguments args) int32_t app_id = 0; int32_t partition_count = 0; int32_t max_replica_count = 0; - std::vector partitions; + std::vector pcs; - dsn::error_code err = sc->ddl_client->list_app(app_name, app_id, partition_count, partitions); + dsn::error_code err = sc->ddl_client->list_app(app_name, app_id, partition_count, pcs); if (err != ::dsn::ERR_OK) { std::cout << "ERROR: list app " << app_name << " failed, error=" << err << std::endl; return true; } - if (!partitions.empty()) { - max_replica_count = partitions[0].max_replica_count; + if (!pcs.empty()) { + max_replica_count = pcs[0].max_replica_count; } std::vector nodes; @@ -312,7 +312,7 @@ bool app_disk(command_executor *e, shell_context *sc, arguments args) RETURN_SHELL_IF_PARSE_METRICS_FAILED( parse_sst_stat(results[i].body(), count_map[nodes[i].hp], disk_map[nodes[i].hp]), nodes[i], - "sst"); + "parse sst stats"); } ::dsn::utils::table_printer tp_general("result"); @@ -333,27 +333,15 @@ bool app_disk(command_executor *e, shell_context *sc, arguments args) int primary_replicas_count = 0; double disk_used_for_all_replicas = 0; int all_replicas_count = 0; - for (int i = 0; i < partitions.size(); i++) { - const dsn::partition_configuration &p = partitions[i]; - int replica_count = 0; - if (p.hp_primary) { - replica_count++; - } - replica_count += p.hp_secondaries.size(); - std::string replica_count_str; - { - std::stringstream oss; - oss << replica_count << "/" << p.max_replica_count; - replica_count_str = oss.str(); - } + for (const auto &pc : pcs) { std::string primary_str("-"); - if (p.hp_primary) { + if (pc.hp_primary) { bool disk_found = false; double disk_value = 0; - auto f1 = disk_map.find(p.hp_primary); + auto f1 = disk_map.find(pc.hp_primary); if (f1 != disk_map.end()) { auto &sub_map = f1->second; - auto f2 = sub_map.find(p.pid.get_partition_index()); + auto f2 = sub_map.find(pc.pid.get_partition_index()); if (f2 != sub_map.end()) { disk_found = true; disk_value = f2->second; @@ -365,17 +353,17 @@ bool app_disk(command_executor *e, shell_context *sc, arguments args) } bool count_found = false; double count_value = 0; - auto f3 = count_map.find(p.hp_primary); + auto f3 = count_map.find(pc.hp_primary); if (f3 != count_map.end()) { auto &sub_map = f3->second; - auto f4 = sub_map.find(p.pid.get_partition_index()); + auto f4 = sub_map.find(pc.pid.get_partition_index()); if (f4 != sub_map.end()) { count_found = true; count_value = f4->second; } } std::stringstream oss; - oss << replication_ddl_client::node_name(p.hp_primary, resolve_ip) << "("; + oss << replication_ddl_client::node_name(pc.hp_primary, resolve_ip) << "("; if (disk_found) oss << disk_value; else @@ -392,15 +380,15 @@ bool app_disk(command_executor *e, shell_context *sc, arguments args) { std::stringstream oss; oss << "["; - for (int j = 0; j < p.hp_secondaries.size(); j++) { + for (int j = 0; j < pc.hp_secondaries.size(); j++) { if (j != 0) oss << ","; bool found = false; double value = 0; - auto f1 = disk_map.find(p.hp_secondaries[j]); + auto f1 = disk_map.find(pc.hp_secondaries[j]); if (f1 != disk_map.end()) { auto &sub_map = f1->second; - auto f2 = sub_map.find(p.pid.get_partition_index()); + auto f2 = sub_map.find(pc.pid.get_partition_index()); if (f2 != sub_map.end()) { found = true; value = f2->second; @@ -410,17 +398,17 @@ bool app_disk(command_executor *e, shell_context *sc, arguments args) } bool count_found = false; double count_value = 0; - auto f3 = count_map.find(p.hp_secondaries[j]); + auto f3 = count_map.find(pc.hp_secondaries[j]); if (f3 != count_map.end()) { auto &sub_map = f3->second; - auto f3 = sub_map.find(p.pid.get_partition_index()); + auto f3 = sub_map.find(pc.pid.get_partition_index()); if (f3 != sub_map.end()) { count_found = true; count_value = f3->second; } } - oss << replication_ddl_client::node_name(p.hp_secondaries[j], resolve_ip) << "("; + oss << replication_ddl_client::node_name(pc.hp_secondaries[j], resolve_ip) << "("; if (found) oss << value; else @@ -437,9 +425,10 @@ bool app_disk(command_executor *e, shell_context *sc, arguments args) } if (detailed) { - tp_details.add_row(std::to_string(p.pid.get_partition_index())); - tp_details.append_data(p.ballot); - tp_details.append_data(replica_count_str); + tp_details.add_row(std::to_string(pc.pid.get_partition_index())); + tp_details.append_data(pc.ballot); + tp_details.append_data(fmt::format( + "{}/{}", pc.hp_secondaries.size() + (pc.hp_primary ? 1 : 0), pc.max_replica_count)); tp_details.append_data(primary_str); tp_details.append_data(secondary_str); } @@ -463,15 +452,15 @@ bool app_disk(command_executor *e, shell_context *sc, arguments args) return true; } -bool app_stat(command_executor *e, shell_context *sc, arguments args) +bool app_stat(command_executor *, shell_context *sc, arguments args) { - static struct option long_options[] = {{"app_name", required_argument, 0, 'a'}, - {"only_qps", no_argument, 0, 'q'}, - {"only_usage", no_argument, 0, 'u'}, - {"json", no_argument, 0, 'j'}, - {"output", required_argument, 0, 'o'}, - {"sample_interval_ms", required_argument, 0, 't'}, - {0, 0, 0, 0}}; + static struct option long_options[] = {{"app_name", required_argument, nullptr, 'a'}, + {"only_qps", no_argument, nullptr, 'q'}, + {"only_usage", no_argument, nullptr, 'u'}, + {"json", no_argument, nullptr, 'j'}, + {"output", required_argument, nullptr, 'o'}, + {"sample_interval_ms", required_argument, nullptr, 'i'}, + {nullptr, 0, nullptr, 0}}; std::string app_name; std::string out_file; @@ -483,7 +472,7 @@ bool app_stat(command_executor *e, shell_context *sc, arguments args) optind = 0; while (true) { int option_index = 0; - int c = getopt_long(args.argc, args.argv, "a:qujo:t:", long_options, &option_index); + int c = getopt_long(args.argc, args.argv, "a:qujo:i:", long_options, &option_index); if (c == -1) { // -1 means all command-line options have been parsed. break; @@ -505,7 +494,7 @@ bool app_stat(command_executor *e, shell_context *sc, arguments args) case 'o': out_file = optarg; break; - case 't': + case 'i': RETURN_FALSE_IF_SAMPLE_INTERVAL_MS_INVALID(); break; default: diff --git a/src/shell/main.cpp b/src/shell/main.cpp index 3b279824c3..41dd95d144 100644 --- a/src/shell/main.cpp +++ b/src/shell/main.cpp @@ -60,10 +60,16 @@ bool help_info(command_executor *e, shell_context *sc, arguments args) static command_executor commands[] = { { - "help", "print help info", "", help_info, + "help", + "print help info", + "", + help_info, }, { - "version", "get the shell version", "", version, + "version", + "get the shell version", + "", + version, }, { "cluster_info", @@ -95,7 +101,7 @@ static command_executor commands[] = { "get the node status for this cluster", "[-d|--detailed] [-j|--json] [-r|--resolve_ip] [-u|--resource_usage] " "[-o|--output file_name] [-s|--status all|alive|unalive] [-q|--qps] " - "[-t|--sample_interval_ms num]", + "[-i|--sample_interval_ms num]", ls_nodes, }, { @@ -106,13 +112,22 @@ static command_executor commands[] = { create_app, }, { - "drop", "drop an app", " [-r|--reserve_seconds num]", drop_app, + "drop", + "drop an app", + " [-r|--reserve_seconds num]", + drop_app, }, { - "recall", "recall an app", " [new_app_name]", recall_app, + "recall", + "recall an app", + " [new_app_name]", + recall_app, }, { - "rename", "rename an app", " ", rename_app, + "rename", + "rename an app", + " ", + rename_app, }, { "set_meta_level", @@ -121,7 +136,10 @@ static command_executor commands[] = { set_meta_level, }, { - "get_meta_level", "get the meta function level", "", get_meta_level, + "get_meta_level", + "get the meta function level", + "", + get_meta_level, }, { "balance", @@ -145,7 +163,10 @@ static command_executor commands[] = { use_app_as_current, }, { - "cc", "change to the specified cluster", "[cluster_name]", cc_command, + "cc", + "change to the specified cluster", + "[cluster_name]", + cc_command, }, { "escape_all", @@ -166,7 +187,10 @@ static command_executor commands[] = { calculate_hash_value, }, { - "set", "set value", " [ttl_in_seconds]", data_operations, + "set", + "set value", + " [ttl_in_seconds]", + data_operations, }, { "multi_set", @@ -175,7 +199,10 @@ static command_executor commands[] = { data_operations, }, { - "get", "get value", " ", data_operations, + "get", + "get value", + " ", + data_operations, }, { "multi_get", @@ -200,7 +227,10 @@ static command_executor commands[] = { data_operations, }, { - "del", "delete a key", " ", data_operations, + "del", + "delete a key", + " ", + data_operations, }, { "multi_del", @@ -254,13 +284,22 @@ static command_executor commands[] = { data_operations, }, { - "exist", "check value exist", " ", data_operations, + "exist", + "check value exist", + " ", + data_operations, }, { - "count", "get sort key count for a single hash key", "", data_operations, + "count", + "get sort key count for a single hash key", + "", + data_operations, }, { - "ttl", "query ttl for a specific key", " ", data_operations, + "ttl", + "query ttl for a specific key", + " ", + data_operations, }, { "hash_scan", @@ -333,37 +372,41 @@ static command_executor commands[] = { { "remote_command", "send remote command to servers", - "[-t all|meta-server|replica-server] [-r|--resolve_ip] [-l ip:port,ip:port...]" - " [arguments...]", + "[-t all|meta-server|replica-server] [-r|--resolve_ip] [-l host:port,host:port...] " + "[-i|--sample_interval_ms num] [arguments...]", remote_command, }, { "server_info", "get info of servers", - "[-t all|meta-server|replica-server] [-l ip:port,ip:port...] [-r|--resolve_ip]", + "[-t all|meta-server|replica-server] [-l host:port,host:port...] [-r|--resolve_ip]", server_info, }, { "server_stat", "get stat of servers", - "[-t all|meta-server|replica-server] [-l ip:port,ip:port...] [-r|--resolve_ip]", + "[-t all|meta-server|replica-server] [-l host:port,host:port...] [-r|--resolve_ip] " + "[-i|--sample_interval_ms num]", server_stat, }, { "app_stat", "get stat of apps", "[-a|--app_name str] [-q|--only_qps] [-u|--only_usage] [-j|--json] " - "[-o|--output file_name] [-t|--sample_interval_ms num]", + "[-o|--output file_name] [-i|--sample_interval_ms num]", app_stat, }, { "flush_log", "flush log of servers", - "[-t all|meta-server|replica-server] [-l ip:port,ip:port...][-r|--resolve_ip]", + "[-t all|meta-server|replica-server] [-l host:port,host:port...][-r|--resolve_ip]", flush_log, }, { - "local_get", "get value from local db", " ", local_get, + "local_get", + "get value from local db", + " ", + local_get, }, { "rdb_key_str2hex", @@ -413,11 +456,16 @@ static command_executor commands[] = { "<-c|--backup_history_cnt num>", add_backup_policy, }, - {"ls_backup_policy", "list the names of the subsistent backup policies", "", ls_backup_policy}, + { + "ls_backup_policy", + "list the names of the subsistent backup policies", + "[-j|--json]", + ls_backup_policy, + }, { "query_backup_policy", "query subsistent backup policy and last backup infos", - "<-p|--policy_name p1,p2...> [-b|--backup_info_cnt num]", + "<-p|--policy_name p1,p2...> [-b|--backup_info_cnt num] [-j|--json]", query_backup_policy, }, { @@ -431,7 +479,7 @@ static command_executor commands[] = { { "disable_backup_policy", "stop policy continue backup", - "<-p|--policy_name str>", + disable_backup_policy_help.c_str(), disable_backup_policy, }, { @@ -445,7 +493,8 @@ static command_executor commands[] = { "restore app from backup media", "<-c|--old_cluster_name str> <-p|--old_policy_name str> <-a|--old_app_name str> " "<-i|--old_app_id id> <-t|--timestamp/backup_id timestamp> " - "<-b|--backup_provider_type str> [-n|--new_app_name str] [-s|--skip_bad_partition]", + "<-b|--backup_provider_type str> [-n|--new_app_name str] [-s|--skip_bad_partition] " + "[-r|--restore_path str]", restore, }, { @@ -455,16 +504,28 @@ static command_executor commands[] = { query_restore_status, }, { - "get_app_envs", "get current app envs", "[-j|--json]", get_app_envs, + "get_app_envs", + "get current app envs", + "[-j|--json]", + get_app_envs, }, { - "set_app_envs", "set current app envs", " [key value...]", set_app_envs, + "set_app_envs", + "set current app envs", + " [key value...]", + set_app_envs, }, { - "del_app_envs", "delete current app envs", " [key...]", del_app_envs, + "del_app_envs", + "delete current app envs", + " [key...]", + del_app_envs, }, { - "clear_app_envs", "clear current app envs", "[-a|--all] [-p|--prefix str]", clear_app_envs, + "clear_app_envs", + "clear current app envs", + "[-a|--all] [-p|--prefix str]", + clear_app_envs, }, { "ddd_diagnose", @@ -500,10 +561,16 @@ static command_executor commands[] = { query_bulk_load_status, }, { - "pause_bulk_load", "pause app bulk load", "<-a --app_name str>", pause_bulk_load, + "pause_bulk_load", + "pause app bulk load", + "<-a --app_name str>", + pause_bulk_load, }, { - "restart_bulk_load", "restart app bulk load", "<-a --app_name str>", restart_bulk_load, + "restart_bulk_load", + "restart app bulk load", + "<-a --app_name str>", + restart_bulk_load, }, { "cancel_bulk_load", @@ -512,7 +579,10 @@ static command_executor commands[] = { cancel_bulk_load, }, { - "clear_bulk_load", "clear app bulk load result", "<-a --app_name str>", clear_bulk_load, + "clear_bulk_load", + "clear app bulk load result", + "<-a --app_name str>", + clear_bulk_load, }, { "detect_hotkey", @@ -568,10 +638,16 @@ static command_executor commands[] = { local_partition_split, }, { - "exit", "exit shell", "", exit_shell, + "exit", + "exit shell", + "", + exit_shell, }, { - nullptr, nullptr, nullptr, nullptr, + nullptr, + nullptr, + nullptr, + nullptr, }}; void print_help(command_executor *e, size_t name_width, size_t option_width) diff --git a/src/runtime/task/CMakeLists.txt b/src/task/CMakeLists.txt similarity index 83% rename from src/runtime/task/CMakeLists.txt rename to src/task/CMakeLists.txt index ad09763cf6..e0eff8836b 100644 --- a/src/runtime/task/CMakeLists.txt +++ b/src/task/CMakeLists.txt @@ -22,16 +22,11 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. -set(MY_PROJ_NAME dsn.task) - -# Search mode for source files under CURRENT project directory? -# "GLOB_RECURSE" for recursive search -# "GLOB" for non-recursive search +set(MY_PROJ_NAME dsn_task) set(MY_SRC_SEARCH_MODE "GLOB") - -set(MY_PROJ_LIBS "") - -# Extra files that will be installed -set(MY_BINPLACES "") - +set(MY_PROJ_SRC "") +set(MY_PROJ_LIBS + dsn_runtime + dsn_utils) dsn_add_object() +add_subdirectory(tests) diff --git a/src/runtime/task/async_calls.h b/src/task/async_calls.h similarity index 98% rename from src/runtime/task/async_calls.h rename to src/task/async_calls.h index e991e32276..8a44217577 100644 --- a/src/runtime/task/async_calls.h +++ b/src/task/async_calls.h @@ -26,14 +26,14 @@ #pragma once +#include "aio/file_io.h" #include "runtime/api_task.h" #include "runtime/api_layer1.h" #include "runtime/app_model.h" +#include "rpc/serialization.h" +#include "task/task_tracker.h" #include "utils/api_utilities.h" #include "utils/function_traits.h" -#include "aio/file_io.h" -#include "runtime/task/task_tracker.h" -#include "runtime/rpc/serialization.h" namespace dsn { @@ -131,7 +131,7 @@ create_rpc_response_task(dsn::message_ex *req, req, tracker, [cb_fwd = std::move(callback)]( - error_code err, dsn::message_ex * req, dsn::message_ex * resp) mutable { + error_code err, dsn::message_ex *req, dsn::message_ex *resp) mutable { typename is_typed_rpc_callback::response_t response = {}; if (err == ERR_OK) { unmarshall(resp, response); diff --git a/src/runtime/task/future_types.h b/src/task/future_types.h similarity index 98% rename from src/runtime/task/future_types.h rename to src/task/future_types.h index 1a88c8d426..1f6251c711 100644 --- a/src/runtime/task/future_types.h +++ b/src/task/future_types.h @@ -34,4 +34,4 @@ namespace dsn { typedef std::function err_callback; typedef future_task error_code_future; typedef dsn::ref_ptr error_code_future_ptr; -} +} // namespace dsn diff --git a/src/runtime/task/hpc_task_queue.cpp b/src/task/hpc_task_queue.cpp similarity index 94% rename from src/runtime/task/hpc_task_queue.cpp rename to src/task/hpc_task_queue.cpp index 8af5103ca6..5d0c435591 100644 --- a/src/runtime/task/hpc_task_queue.cpp +++ b/src/task/hpc_task_queue.cpp @@ -26,13 +26,13 @@ #include "hpc_task_queue.h" -#include +#include "concurrentqueue/lightweightsemaphore.h" #include "boost/iterator/function_output_iterator.hpp" #include "concurrentqueue/concurrentqueue.h" -#include "runtime/task/task.h" -#include "runtime/task/task_queue.h" -#include "runtime/task/task_spec.h" +#include "task.h" +#include "task_queue.h" +#include "task_spec.h" namespace dsn { class task_worker_pool; @@ -80,5 +80,5 @@ task *hpc_concurrent_task_queue::dequeue(int &batch_size) } while (count != 0); return head; } -} -} +} // namespace tools +} // namespace dsn diff --git a/src/runtime/task/hpc_task_queue.h b/src/task/hpc_task_queue.h similarity index 94% rename from src/runtime/task/hpc_task_queue.h rename to src/task/hpc_task_queue.h index 8a53697eb4..e30bce713f 100644 --- a/src/runtime/task/hpc_task_queue.h +++ b/src/task/hpc_task_queue.h @@ -26,10 +26,10 @@ #pragma once -#include +#include "concurrentqueue/concurrentqueue.h" #include "concurrentqueue/lightweightsemaphore.h" -#include "runtime/task/task_code.h" +#include "task_code.h" #include "task_queue.h" namespace dsn { @@ -52,5 +52,5 @@ class hpc_concurrent_task_queue : public task_queue task *dequeue(/*inout*/ int &batch_size) override; }; -} -} +} // namespace tools +} // namespace dsn diff --git a/src/runtime/task/simple_task_queue.cpp b/src/task/simple_task_queue.cpp similarity index 96% rename from src/runtime/task/simple_task_queue.cpp rename to src/task/simple_task_queue.cpp index 879a9700c6..d582203781 100644 --- a/src/runtime/task/simple_task_queue.cpp +++ b/src/task/simple_task_queue.cpp @@ -26,7 +26,6 @@ #include "simple_task_queue.h" -#include #include #include #include @@ -38,12 +37,15 @@ #include "boost/asio/error.hpp" #include "boost/asio/impl/io_context.hpp" #include "boost/asio/impl/io_context.ipp" +#include "boost/asio/io_service.hpp" #include "boost/date_time/posix_time/posix_time_duration.hpp" #include "boost/system/error_code.hpp" -#include "runtime/task/task.h" -#include "runtime/task/task_spec.h" -#include "runtime/task/task_worker.h" #include "runtime/tool_api.h" +#include "task.h" +#include "task/task_queue.h" +#include "task/timer_service.h" +#include "task_spec.h" +#include "task_worker.h" #include "utils/fmt_logging.h" #include "utils/threadpool_spec.h" diff --git a/src/runtime/task/simple_task_queue.h b/src/task/simple_task_queue.h similarity index 95% rename from src/runtime/task/simple_task_queue.h rename to src/task/simple_task_queue.h index 62aac25f72..bd7f0efa5c 100644 --- a/src/runtime/task/simple_task_queue.h +++ b/src/task/simple_task_queue.h @@ -30,9 +30,9 @@ #include #include "boost/asio/io_service.hpp" -#include "runtime/task/task_code.h" -#include "runtime/task/task_queue.h" -#include "runtime/task/timer_service.h" +#include "task_code.h" +#include "task_queue.h" +#include "timer_service.h" #include "utils/priority_queue.h" namespace dsn { diff --git a/src/runtime/task/task.cpp b/src/task/task.cpp similarity index 99% rename from src/runtime/task/task.cpp rename to src/task/task.cpp index 7d83948f3b..164f1cec6a 100644 --- a/src/runtime/task/task.cpp +++ b/src/task/task.cpp @@ -33,10 +33,11 @@ #include "runtime/global_config.h" #include "runtime/node_scoper.h" #include "runtime/service_engine.h" -#include "runtime/task/task_spec.h" -#include "runtime/task/task_tracker.h" -#include "runtime/task/task_worker.h" +#include "task/task_code.h" #include "task_engine.h" +#include "task_spec.h" +#include "task_tracker.h" +#include "task_worker.h" #include "utils/fmt_logging.h" #include "utils/process_utils.h" #include "utils/rand.h" diff --git a/src/runtime/task/task.h b/src/task/task.h similarity index 98% rename from src/runtime/task/task.h rename to src/task/task.h index 9fbe022dd8..49a5ba05dc 100644 --- a/src/runtime/task/task.h +++ b/src/task/task.h @@ -26,8 +26,8 @@ #pragma once -#include #include +#include #include #include #include @@ -36,10 +36,10 @@ #include #include +#include "rpc/rpc_message.h" #include "runtime/api_layer1.h" #include "runtime/api_task.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task_code.h" +#include "task_code.h" #include "task_spec.h" #include "task_tracker.h" #include "utils/autoref_ptr.h" @@ -249,7 +249,7 @@ class task : public ref_counter, public extensible_object static void set_tls_dsn_context( service_node *node, // cannot be null task_worker *worker // null for io or timer threads if they are not worker threads - ); + ); protected: void enqueue(task_worker_pool *pool); @@ -392,13 +392,13 @@ class future_task : public task } virtual void exec() override { absl::apply(_cb, std::move(_values)); } - void enqueue_with(const First &t, const Remaining &... r, int delay_ms = 0) + void enqueue_with(const First &t, const Remaining &...r, int delay_ms = 0) { _values = std::make_tuple(t, r...); set_delay(delay_ms); enqueue(); } - void enqueue_with(First &&t, Remaining &&... r, int delay_ms = 0) + void enqueue_with(First &&t, Remaining &&...r, int delay_ms = 0) { _values = std::make_tuple(std::move(t), std::forward(r)...); set_delay(delay_ms); diff --git a/src/runtime/task/task_code.cpp b/src/task/task_code.cpp similarity index 99% rename from src/runtime/task/task_code.cpp rename to src/task/task_code.cpp index 9abf2193fa..1f36264443 100644 --- a/src/runtime/task/task_code.cpp +++ b/src/task/task_code.cpp @@ -67,7 +67,7 @@ void task_code_mgr::register_commands() return ss.str(); })); } -} +} // namespace utils /*static*/ int task_code::max() { return task_code_mgr::instance().max_value(); } @@ -123,4 +123,4 @@ const char *task_code::to_string() const { return task_code_mgr::instance().get_name(_internal_code); } -} +} // namespace dsn diff --git a/src/runtime/task/task_code.h b/src/task/task_code.h similarity index 99% rename from src/runtime/task/task_code.h rename to src/task/task_code.h index 2dd03bf701..ae7c258bf2 100644 --- a/src/runtime/task/task_code.h +++ b/src/task/task_code.h @@ -43,7 +43,8 @@ class TProtocol; } // namespace thrift } // namespace apache -typedef enum dsn_task_type_t { +typedef enum dsn_task_type_t +{ TASK_TYPE_RPC_REQUEST, ///< task handling rpc request TASK_TYPE_RPC_RESPONSE, ///< task handling rpc response or timeout TASK_TYPE_COMPUTE, ///< async calls or timers @@ -64,7 +65,8 @@ ENUM_REG(TASK_TYPE_AIO) ENUM_REG(TASK_TYPE_CONTINUATION) ENUM_END(dsn_task_type_t) -typedef enum dsn_task_priority_t { +typedef enum dsn_task_priority_t +{ TASK_PRIORITY_LOW, TASK_PRIORITY_COMMON, TASK_PRIORITY_HIGH, diff --git a/src/runtime/task/task_engine.cpp b/src/task/task_engine.cpp similarity index 94% rename from src/runtime/task/task_engine.cpp rename to src/task/task_engine.cpp index 179b045626..69b0b37d2a 100644 --- a/src/runtime/task/task_engine.cpp +++ b/src/task/task_engine.cpp @@ -24,7 +24,7 @@ * THE SOFTWARE. */ -#include "runtime/task/task_engine.h" +#include "task_engine.h" // IWYU pragma: no_include #include @@ -34,11 +34,12 @@ #include "nlohmann/json.hpp" #include "runtime/global_config.h" #include "runtime/service_engine.h" -#include "runtime/task/task.h" -#include "runtime/task/task_queue.h" -#include "runtime/task/task_spec.h" -#include "runtime/task/task_worker.h" -#include "runtime/task/timer_service.h" +#include "task.h" +#include "task/task_code.h" +#include "task_queue.h" +#include "task_spec.h" +#include "task_worker.h" +#include "timer_service.h" #include "utils/command_manager.h" #include "utils/factory_store.h" #include "utils/fmt_logging.h" @@ -139,10 +140,10 @@ void task_worker_pool::add_timer(task *t) CHECK_GT_MSG( t->delay_milliseconds(), 0, "task delayed should be dispatched to timer service first"); - unsigned int idx = (_spec.partitioned - ? static_cast(t->hash()) % - static_cast(_per_queue_timer_svcs.size()) - : 0); + unsigned int idx = + (_spec.partitioned ? static_cast(t->hash()) % + static_cast(_per_queue_timer_svcs.size()) + : 0); _per_queue_timer_svcs[idx]->add_timer(t); } @@ -157,10 +158,9 @@ void task_worker_pool::enqueue(task *t) "worker pool {} must be started before enqueue task {}", spec().name, t->spec().name); - unsigned int idx = - (_spec.partitioned - ? static_cast(t->hash()) % static_cast(_queues.size()) - : 0); + unsigned int idx = (_spec.partitioned ? static_cast(t->hash()) % + static_cast(_queues.size()) + : 0); return _queues[idx]->enqueue_internal(t); } diff --git a/src/runtime/task/task_engine.h b/src/task/task_engine.h similarity index 99% rename from src/runtime/task/task_engine.h rename to src/task/task_engine.h index 6a8fe2a0ad..5a737a5224 100644 --- a/src/runtime/task/task_engine.h +++ b/src/task/task_engine.h @@ -32,7 +32,7 @@ #include #include "nlohmann/json_fwd.hpp" -#include "runtime/task/task_code.h" +#include "task_code.h" #include "utils/command_manager.h" #include "utils/threadpool_spec.h" diff --git a/src/runtime/task/task_engine.sim.cpp b/src/task/task_engine.sim.cpp similarity index 98% rename from src/runtime/task/task_engine.sim.cpp rename to src/task/task_engine.sim.cpp index 1424585221..9ed7115a25 100644 --- a/src/runtime/task/task_engine.sim.cpp +++ b/src/task/task_engine.sim.cpp @@ -28,8 +28,8 @@ #include #include "runtime/scheduler.h" -#include "runtime/task/task.h" -#include "runtime/task/task_queue.h" +#include "task.h" +#include "task_queue.h" #include "task_engine.sim.h" #include "utils/fmt_logging.h" #include "utils/process_utils.h" @@ -247,5 +247,5 @@ void sim_lock_nr_provider::unlock() _current_holder = -1; _sema.signal(1); } -} -} // end namespace +} // namespace tools +} // namespace dsn diff --git a/src/runtime/task/task_engine.sim.h b/src/task/task_engine.sim.h similarity index 99% rename from src/runtime/task/task_engine.sim.h rename to src/task/task_engine.sim.h index 85ded58eab..e2abe08f74 100644 --- a/src/runtime/task/task_engine.sim.h +++ b/src/task/task_engine.sim.h @@ -142,5 +142,5 @@ class sim_rwlock_nr_provider : public rwlock_nr_provider private: sim_lock_nr_provider _l; }; -} -} // end namespace +} // namespace tools +} // namespace dsn diff --git a/src/runtime/task/task_queue.cpp b/src/task/task_queue.cpp similarity index 95% rename from src/runtime/task/task_queue.cpp rename to src/task/task_queue.cpp index 8b0de42b34..0fcea9e60b 100644 --- a/src/runtime/task/task_queue.cpp +++ b/src/task/task_queue.cpp @@ -26,14 +26,15 @@ #include "task_queue.h" -#include "absl/strings/string_view.h" +#include + #include "fmt/core.h" -#include "runtime/rpc/network.h" -#include "runtime/rpc/rpc_engine.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task.h" -#include "runtime/task/task_spec.h" +#include "rpc/network.h" +#include "rpc/rpc_engine.h" +#include "rpc/rpc_message.h" +#include "task.h" #include "task_engine.h" +#include "task_spec.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/exp_delay.h" diff --git a/src/runtime/task/task_queue.h b/src/task/task_queue.h similarity index 100% rename from src/runtime/task/task_queue.h rename to src/task/task_queue.h diff --git a/src/runtime/task/task_spec.cpp b/src/task/task_spec.cpp similarity index 98% rename from src/runtime/task/task_spec.cpp rename to src/task/task_spec.cpp index 044fbe5c1b..ff38b1fbdc 100644 --- a/src/runtime/task/task_spec.cpp +++ b/src/task/task_spec.cpp @@ -31,7 +31,8 @@ #include #include -#include "runtime/rpc/rpc_message.h" +#include "rpc/rpc_message.h" +#include "task/task_code.h" #include "utils/flags.h" #include "utils/fmt_logging.h" #include "utils/threadpool_spec.h" @@ -231,8 +232,8 @@ bool task_spec::init() } if (spec->rpc_call_channel == RPC_CHANNEL_UDP && !FLAGS_enable_udp) { - LOG_ERROR("task rpc_call_channel RPC_CHANNEL_UCP need udp service, make sure " - "[network].enable_udp"); + LOG_ERROR("task rpc_call_channel RPC_CHANNEL_UDP need udp service, make sure " + "[network].enable_udp is enabled"); return false; } } @@ -288,4 +289,4 @@ bool threadpool_spec::init(/*out*/ std::vector &specs) return true; } -} // end namespace +} // namespace dsn diff --git a/src/runtime/task/task_spec.h b/src/task/task_spec.h similarity index 98% rename from src/runtime/task/task_spec.h rename to src/task/task_spec.h index 3b31537e04..ed709d829d 100644 --- a/src/runtime/task/task_spec.h +++ b/src/task/task_spec.h @@ -31,7 +31,7 @@ #include #include -#include "runtime/task/task_code.h" +#include "task_code.h" #include "utils/config_api.h" #include "utils/config_helper.h" #include "utils/customizable_id.h" @@ -61,7 +61,8 @@ ENUM_REG(TASK_STATE_FINISHED) ENUM_REG(TASK_STATE_CANCELLED) ENUM_END(task_state) -typedef enum grpc_mode_t { +typedef enum grpc_mode_t +{ GRPC_TO_LEADER, // the rpc is sent to the leader (if exist) GRPC_TO_ALL, // the rpc is sent to all GRPC_TO_ANY, // the rpc is sent to one of the group member @@ -76,7 +77,8 @@ ENUM_REG(GRPC_TO_ALL) ENUM_REG(GRPC_TO_ANY) ENUM_END(grpc_mode_t) -typedef enum throttling_mode_t { +typedef enum throttling_mode_t +{ TM_NONE, // no throttling applied TM_REJECT, // reject the incoming request TM_DELAY, // delay network receive ops to reducing incoming rate @@ -91,7 +93,8 @@ ENUM_REG(TM_REJECT) ENUM_REG(TM_DELAY) ENUM_END(throttling_mode_t) -typedef enum dsn_msg_serialize_format { +typedef enum dsn_msg_serialize_format +{ DSF_INVALID = 0, DSF_THRIFT_BINARY = 1, DSF_THRIFT_COMPACT = 2, @@ -317,4 +320,4 @@ CONFIG_FLD(bool, "greater than its timeout value") CONFIG_END -} // end namespace +} // namespace dsn diff --git a/src/runtime/task/task_tracker.cpp b/src/task/task_tracker.cpp similarity index 99% rename from src/runtime/task/task_tracker.cpp rename to src/task/task_tracker.cpp index fbb0dde4fa..23242d6376 100644 --- a/src/runtime/task/task_tracker.cpp +++ b/src/task/task_tracker.cpp @@ -166,4 +166,4 @@ int task_tracker::cancel_but_not_wait_outstanding_tasks() } return not_finished; } -} +} // namespace dsn diff --git a/src/runtime/task/task_tracker.h b/src/task/task_tracker.h similarity index 99% rename from src/runtime/task/task_tracker.h rename to src/task/task_tracker.h index 11a9ffc6fb..dd510f7ffc 100644 --- a/src/runtime/task/task_tracker.h +++ b/src/task/task_tracker.h @@ -226,4 +226,4 @@ inline void trackable_task::owner_delete_commit() _deleting_owner.store(OWNER_DELETE_FINISHED, std::memory_order_relaxed); } -} +} // namespace dsn diff --git a/src/runtime/task/task_worker.cpp b/src/task/task_worker.cpp similarity index 97% rename from src/runtime/task/task_worker.cpp rename to src/task/task_worker.cpp index 83b8aa4b96..01fec9887e 100644 --- a/src/runtime/task/task_worker.cpp +++ b/src/task/task_worker.cpp @@ -39,10 +39,10 @@ #include #include "runtime/service_engine.h" -#include "runtime/task/task.h" -#include "runtime/task/task_engine.h" -#include "runtime/task/task_queue.h" -#include "runtime/task/task_worker.h" +#include "task.h" +#include "task_engine.h" +#include "task_queue.h" +#include "task_worker.h" #include "utils/fmt_logging.h" #include "utils/join_point.h" #include "utils/ports.h" @@ -259,4 +259,4 @@ void task_worker::loop() const threadpool_spec &task_worker::pool_spec() const { return pool()->spec(); } -} // end namespace +} // namespace dsn diff --git a/src/runtime/task/task_worker.h b/src/task/task_worker.h similarity index 99% rename from src/runtime/task/task_worker.h rename to src/task/task_worker.h index 426491508b..bf62852491 100644 --- a/src/runtime/task/task_worker.h +++ b/src/task/task_worker.h @@ -108,4 +108,4 @@ class task_worker : public extensible_object /*@}*/ }; /*@}*/ -} // end namespace +} // namespace dsn diff --git a/src/task/tests/CMakeLists.txt b/src/task/tests/CMakeLists.txt new file mode 100644 index 0000000000..82440e1dfa --- /dev/null +++ b/src/task/tests/CMakeLists.txt @@ -0,0 +1,32 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +set(MY_PROJ_NAME dsn_task_tests) +set(MY_PROJ_SRC "") +set(MY_SRC_SEARCH_MODE "GLOB") +set(MY_PROJ_LIBS + dsn_task + dsn_runtime + dsn_meta_server + dsn_replication_common + dsn_utils + gtest) +set(MY_BINPLACES + config-test.ini + config-test-sim.ini + run.sh) +dsn_add_test() diff --git a/src/runtime/test/async_call.cpp b/src/task/tests/async_call_test.cpp similarity index 87% rename from src/runtime/test/async_call.cpp rename to src/task/tests/async_call_test.cpp index 38398fd184..ddc6172c02 100644 --- a/src/runtime/test/async_call.cpp +++ b/src/task/tests/async_call_test.cpp @@ -35,15 +35,16 @@ #include #include "gtest/gtest.h" +#include "rpc/rpc_address.h" +#include "rpc/rpc_message.h" #include "runtime/api_task.h" -#include "runtime/rpc/rpc_address.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_tracker.h" #include "runtime/test_utils.h" +#include "task/async_calls.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_tracker.h" #include "utils/autoref_ptr.h" +#include "utils/enum_helper.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" #include "utils/thread_access_checker.h" @@ -81,7 +82,7 @@ class tracker_class void callback_function3() { ++global_value; } }; -TEST(async_call, task_call) +TEST(async_call_test, task_call) { /* normal lpc*/ tracker_class *tc = new tracker_class(); @@ -95,24 +96,27 @@ TEST(async_call, task_call) /* task tracking */ tc = new tracker_class(); std::vector test_tasks; - t = tasking::enqueue(LPC_TEST_CLIENTLET, - &tc->_tracker, - [=] { tc->callback_function1(); }, - 0, - std::chrono::seconds(30)); + t = tasking::enqueue( + LPC_TEST_CLIENTLET, + &tc->_tracker, + [=] { tc->callback_function1(); }, + 0, + std::chrono::seconds(30)); test_tasks.push_back(t); - t = tasking::enqueue(LPC_TEST_CLIENTLET, - &tc->_tracker, - [tc] { tc->callback_function1(); }, - 0, - std::chrono::seconds(30)); + t = tasking::enqueue( + LPC_TEST_CLIENTLET, + &tc->_tracker, + [tc] { tc->callback_function1(); }, + 0, + std::chrono::seconds(30)); test_tasks.push_back(t); - t = tasking::enqueue_timer(LPC_TEST_CLIENTLET, - &tc->_tracker, - [tc] { tc->callback_function1(); }, - std::chrono::seconds(20), - 0, - std::chrono::seconds(30)); + t = tasking::enqueue_timer( + LPC_TEST_CLIENTLET, + &tc->_tracker, + [tc] { tc->callback_function1(); }, + std::chrono::seconds(20), + 0, + std::chrono::seconds(30)); test_tasks.push_back(t); delete tc; @@ -120,7 +124,7 @@ TEST(async_call, task_call) EXPECT_FALSE(test_tasks[i]->cancel(true)); } -TEST(async_call, rpc_call) +TEST(async_call_test, rpc_call) { const auto addr = rpc_address::from_host_port("localhost", 20101); const auto addr2 = rpc_address::from_host_port("localhost", TEST_PORT_END); @@ -207,7 +211,7 @@ bool spin_wait(const std::function &pred, int wait_times) } return pred(); } -TEST(async_call, task_destructor) +TEST(async_call_test, task_destructor) { { task_ptr t(new simple_task(LPC_TEST_CLIENTLET, nullptr)); diff --git a/src/task/tests/config-test-sim.ini b/src/task/tests/config-test-sim.ini new file mode 100644 index 0000000000..a0cd215f44 --- /dev/null +++ b/src/task/tests/config-test-sim.ini @@ -0,0 +1,38 @@ +; Licensed to the Apache Software Foundation (ASF) under one +; or more contributor license agreements. See the NOTICE file +; distributed with this work for additional information +; regarding copyright ownership. The ASF licenses this file +; to you under the Apache License, Version 2.0 (the +; "License"); you may not use this file except in compliance +; with the License. You may obtain a copy of the License at +; +; http://www.apache.org/licenses/LICENSE-2.0 +; +; Unless required by applicable law or agreed to in writing, +; software distributed under the License is distributed on an +; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +; KIND, either express or implied. See the License for the +; specific language governing permissions and limitations +; under the License. + +[apps..default] +type = test +run = true +count = 1 +pools = THREAD_POOL_DEFAULT, THREAD_POOL_TEST_SERVER + +[apps.client] +arguments = localhost 20101 +ports = +delay_seconds = 1 + +[apps.server_group] +arguments = +ports = 20201 +count = 3 + +[core] +tool = simulator +pause_on_start = false +logging_start_level = LOG_LEVEL_DEBUG +logging_factory_name = dsn::tools::screen_logger diff --git a/src/task/tests/config-test.ini b/src/task/tests/config-test.ini new file mode 100644 index 0000000000..a86fc8c3f8 --- /dev/null +++ b/src/task/tests/config-test.ini @@ -0,0 +1,52 @@ +; Licensed to the Apache Software Foundation (ASF) under one +; or more contributor license agreements. See the NOTICE file +; distributed with this work for additional information +; regarding copyright ownership. The ASF licenses this file +; to you under the Apache License, Version 2.0 (the +; "License"); you may not use this file except in compliance +; with the License. You may obtain a copy of the License at +; +; http://www.apache.org/licenses/LICENSE-2.0 +; +; Unless required by applicable law or agreed to in writing, +; software distributed under the License is distributed on an +; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +; KIND, either express or implied. See the License for the +; specific language governing permissions and limitations +; under the License. + +[apps..default] +type = test +run = true +count = 1 +pools = THREAD_POOL_DEFAULT, THREAD_POOL_TEST_SERVER, THREAD_POOL_FOR_TEST_1, THREAD_POOL_FOR_TEST_2 + +[apps.client] +arguments = localhost 20101 +ports = +delay_seconds = 1 + +[apps.server_group] +arguments = +ports = 20201 +count = 3 + +[core] +tool = nativerun +pause_on_start = false +logging_start_level = LOG_LEVEL_DEBUG +logging_factory_name = dsn::tools::screen_logger + +[threadpool.THREAD_POOL_FOR_TEST_1] +worker_count = 2 +worker_priority = THREAD_xPRIORITY_HIGHEST +worker_share_core = false +worker_affinity_mask = 1 +partitioned = false + +[threadpool.THREAD_POOL_FOR_TEST_2] +worker_count = 2 +worker_priority = THREAD_xPRIORITY_NORMAL +worker_share_core = true +worker_affinity_mask = 1 +partitioned = true diff --git a/src/runtime/test/lpc.cpp b/src/task/tests/lpc_test.cpp similarity index 93% rename from src/runtime/test/lpc.cpp rename to src/task/tests/lpc_test.cpp index 0f6d7e2bac..d293b2710c 100644 --- a/src/runtime/test/lpc.cpp +++ b/src/task/tests/lpc_test.cpp @@ -29,9 +29,9 @@ #include "gtest/gtest.h" #include "runtime/api_task.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_worker.h" +#include "task/task.h" +#include "task/task_code.h" +#include "task/task_worker.h" #include "runtime/test_utils.h" #include "utils/autoref_ptr.h" @@ -43,7 +43,7 @@ void on_lpc_test(void *p) result = ::dsn::task::get_current_worker()->name(); } -TEST(core, lpc) +TEST(lpc_test, basic) { std::string result = "heheh"; dsn::task_ptr t(new dsn::raw_task(LPC_TEST_HASH, std::bind(&on_lpc_test, (void *)&result), 1)); diff --git a/src/task/tests/main.cpp b/src/task/tests/main.cpp new file mode 100644 index 0000000000..fd8aaa6405 --- /dev/null +++ b/src/task/tests/main.cpp @@ -0,0 +1,79 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include +#include + +#include +#include "runtime/app_model.h" +#include "runtime/service_app.h" +#include "runtime/test_utils.h" +#include "utils/flags.h" +#include "utils/strings.h" + +DSN_DEFINE_string(core, tool, "simulator", ""); + +int g_test_count = 0; +int g_test_ret = 0; + +GTEST_API_ int main(int argc, char **argv) +{ + testing::InitGoogleTest(&argc, argv); + + // register all possible services + dsn::service_app::register_factory("test"); + + // specify what services and tools will run in config file, then run + dsn_run(argc, argv, false); + + // run in-rDSN tests + while (g_test_count == 0) { + std::this_thread::sleep_for(std::chrono::seconds(1)); + } + + if (g_test_ret != 0) { +#ifndef ENABLE_GCOV + dsn_exit(g_test_ret); +#endif + return g_test_ret; + } + + if (!dsn::utils::equals("simulator", FLAGS_tool)) { + // run out-rDSN tests in other threads + std::cout << "=========================================================== " << std::endl; + std::cout << "================== run in non-rDSN threads ================ " << std::endl; + std::cout << "=========================================================== " << std::endl; + std::thread t([]() { + dsn_mimic_app("client", 1); + exec_tests(); + }); + t.join(); + if (g_test_ret != 0) { +#ifndef ENABLE_GCOV + dsn_exit(g_test_ret); +#endif + return g_test_ret; + } + } + +// exit without any destruction +#ifndef ENABLE_GCOV + dsn_exit(0); +#endif + return 0; +} diff --git a/src/task/tests/run.sh b/src/task/tests/run.sh new file mode 100755 index 0000000000..02d23173b6 --- /dev/null +++ b/src/task/tests/run.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +if [ -z "${REPORT_DIR}" ]; then + REPORT_DIR="." +fi + +test_cases=(config-test.ini config-test-sim.ini) +for test_case in ${test_cases[*]}; do + output_xml="${REPORT_DIR}/dsn_task_tests_${test_case/.ini/.xml}" + echo "============ run dsn_task_tests ${test_case} ============" + rm -f core* pegasus.log.* + GTEST_OUTPUT="xml:${output_xml}" ./dsn_task_tests ${test_case} + + if [ $? -ne 0 ]; then + echo "run dsn_task_tests $test_case failed" + echo "---- ls ----" + ls -l + if [ "$(find . -name 'pegasus.log.*' | wc -l)" -ne 0 ]; then + echo "---- tail -n 100 pegasus.log.* ----" + tail -n 100 "$(find . -name 'pegasus.log.*')" + fi + if [ -f core ]; then + echo "---- gdb ./dsn_task_tests core ----" + gdb ./dsn_task_tests core -ex "thread apply all bt" -ex "set pagination 0" -batch + fi + exit 1 + fi + echo "============ done dsn_task_tests ${test_case} ============" +done + +echo "============ done dsn_task_tests ============" diff --git a/src/runtime/test/task_engine.cpp b/src/task/tests/task_engine_test.cpp similarity index 91% rename from src/runtime/test/task_engine.cpp rename to src/task/tests/task_engine_test.cpp index be02295b95..5e038ff652 100644 --- a/src/runtime/test/task_engine.cpp +++ b/src/task/tests/task_engine_test.cpp @@ -24,7 +24,7 @@ * THE SOFTWARE. */ -#include "runtime/task/task_engine.h" +#include "task/task_engine.h" #include #include @@ -34,7 +34,7 @@ #include "gtest/gtest.h" #include "runtime/global_config.h" #include "runtime/service_engine.h" -#include "runtime/task/task.h" +#include "task/task.h" #include "runtime/test_utils.h" #include "utils/enum_helper.h" #include "utils/threadpool_code.h" @@ -48,10 +48,13 @@ using namespace ::dsn; DEFINE_THREAD_POOL_CODE(THREAD_POOL_FOR_TEST_1) DEFINE_THREAD_POOL_CODE(THREAD_POOL_FOR_TEST_2) -TEST(core, task_engine) +TEST(task_engine_test, basic) { - if (dsn::service_engine::instance().spec().tool == "simulator") - return; + if (dsn::service_engine::instance().spec().tool == "simulator") { + GTEST_SKIP() << "Skip the test in simulator mode, set 'tool = nativerun' in '[core]' " + "section in config file to enable it."; + } + ASSERT_EQ("nativerun", dsn::service_engine::instance().spec().tool); service_node *node = task::get_current_node2(); ASSERT_NE(nullptr, node); ASSERT_STREQ("client", node->full_name()); diff --git a/src/runtime/test/task_test.cpp b/src/task/tests/task_test.cpp similarity index 96% rename from src/runtime/test/task_test.cpp rename to src/task/tests/task_test.cpp index 6052bdec7d..c3c1b2d000 100644 --- a/src/runtime/test/task_test.cpp +++ b/src/task/tests/task_test.cpp @@ -15,15 +15,15 @@ // specific language governing permissions and limitations // under the License. -#include "runtime/task/task.h" +#include "task/task.h" #include #include "aio/aio_task.h" #include "aio/file_io.h" #include "gtest/gtest.h" -#include "runtime/task/task_code.h" -#include "runtime/task/task_spec.h" +#include "task/task_code.h" +#include "task/task_spec.h" #include "utils/flags.h" #include "utils/threadpool_code.h" diff --git a/src/runtime/task/timer_service.h b/src/task/timer_service.h similarity index 99% rename from src/runtime/task/timer_service.h rename to src/task/timer_service.h index 86f4b775c9..20118898e3 100644 --- a/src/runtime/task/timer_service.h +++ b/src/task/timer_service.h @@ -67,4 +67,4 @@ class timer_service service_node *_node; }; /*@}*/ -} // end namespace +} // namespace dsn diff --git a/src/test/function_test/base_api/test_basic.cpp b/src/test/function_test/base_api/test_basic.cpp index 674a348548..64ec71dcfc 100644 --- a/src/test/function_test/base_api/test_basic.cpp +++ b/src/test/function_test/base_api/test_basic.cpp @@ -309,7 +309,8 @@ TEST_F(basic, multi_get) std::map new_values; ASSERT_EQ(PERR_OK, client_->multi_get("basic_test_multi_get", "", "", options, new_values)); std::map expect_kvs({ - {"1", "1"}, {"1-abcdefg", "1-abcdefg"}, + {"1", "1"}, + {"1-abcdefg", "1-abcdefg"}, }); ASSERT_EQ(expect_kvs, new_values); } diff --git a/src/test/function_test/base_api/test_batch_get.cpp b/src/test/function_test/base_api/test_batch_get.cpp index f196fc7bde..ff0b15a2e2 100644 --- a/src/test/function_test/base_api/test_batch_get.cpp +++ b/src/test/function_test/base_api/test_batch_get.cpp @@ -1,21 +1,21 @@ /* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, -* software distributed under the License is distributed on an -* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -* KIND, either express or implied. See the License for the -* specific language governing permissions and limitations -* under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ // IWYU pragma: no_include #include diff --git a/src/test/function_test/base_api/test_copy.cpp b/src/test/function_test/base_api/test_copy.cpp index fb3b33293b..bf04a1dce4 100644 --- a/src/test/function_test/base_api/test_copy.cpp +++ b/src/test/function_test/base_api/test_copy.cpp @@ -32,7 +32,7 @@ #include "gtest/gtest.h" #include "include/pegasus/client.h" #include "pegasus/error.h" -#include "runtime/task/async_calls.h" +#include "task/async_calls.h" #include "shell/command_helper.h" #include "test/function_test/utils/test_util.h" #include "test/function_test/utils/utils.h" diff --git a/src/test/function_test/base_api/test_scan.cpp b/src/test/function_test/base_api/test_scan.cpp index bda35b25ec..0b6cd2ff1d 100644 --- a/src/test/function_test/base_api/test_scan.cpp +++ b/src/test/function_test/base_api/test_scan.cpp @@ -177,8 +177,8 @@ TEST_F(scan_test, OVERALL_COUNT_ONLY) data_count += kv_count; i++; } - ASSERT_EQ(PERR_SCAN_COMPLETE, ret) << "Error occurred when scan. error=" - << client_->get_error_string(ret); + ASSERT_EQ(PERR_SCAN_COMPLETE, ret) + << "Error occurred when scan. error=" << client_->get_error_string(ret); delete scanner; } LOG_INFO("scan count {}", i); @@ -206,8 +206,8 @@ TEST_F(scan_test, ALL_SORT_KEY) check_and_put(data, expected_hash_key_, sort_key, value); } delete scanner; - ASSERT_EQ(PERR_SCAN_COMPLETE, ret) << "Error occurred when scan. error=" - << client_->get_error_string(ret); + ASSERT_EQ(PERR_SCAN_COMPLETE, ret) + << "Error occurred when scan. error=" << client_->get_error_string(ret); ASSERT_NO_FATAL_FAILURE(compare(expect_kvs_[expected_hash_key_], data, expected_hash_key_)); } @@ -271,8 +271,8 @@ TEST_F(scan_test, BOUND_EXCLUSIVE) check_and_put(data, expected_hash_key_, sort_key, value); } delete scanner; - ASSERT_EQ(PERR_SCAN_COMPLETE, ret) << "Error occurred when scan. error=" - << client_->get_error_string(ret); + ASSERT_EQ(PERR_SCAN_COMPLETE, ret) + << "Error occurred when scan. error=" << client_->get_error_string(ret); ++it1; ASSERT_NO_FATAL_FAILURE( compare(data, std::map(it1, it2), expected_hash_key_)); @@ -363,8 +363,8 @@ TEST_F(scan_test, OVERALL) while (PERR_OK == (ret = (scanner->next(hash_key, sort_key, value)))) { check_and_put(data, hash_key, sort_key, value); } - ASSERT_EQ(PERR_SCAN_COMPLETE, ret) << "Error occurred when scan. error=" - << client_->get_error_string(ret); + ASSERT_EQ(PERR_SCAN_COMPLETE, ret) + << "Error occurred when scan. error=" << client_->get_error_string(ret); delete scanner; } ASSERT_NO_FATAL_FAILURE(compare(expect_kvs_, data)); @@ -406,8 +406,8 @@ TEST_F(scan_test, REQUEST_EXPIRE_TS) } else if (err == pegasus::PERR_SCAN_COMPLETE) { split_completed.store(true); } else { - ASSERT_TRUE(false) << "Error occurred when scan. error=" - << client_->get_error_string(err); + ASSERT_TRUE(false) + << "Error occurred when scan. error=" << client_->get_error_string(err); } op_completed.notify(); }); diff --git a/src/test/function_test/bulk_load/test_bulk_load.cpp b/src/test/function_test/bulk_load/test_bulk_load.cpp index 8e191264f7..825e7b8bf4 100644 --- a/src/test/function_test/bulk_load/test_bulk_load.cpp +++ b/src/test/function_test/bulk_load/test_bulk_load.cpp @@ -147,8 +147,9 @@ class bulk_load_test : public test_util // Find the generated files. std::vector src_files; - ASSERT_TRUE(dsn::utils::filesystem::get_subfiles( - partition_path, src_files, /* recursive */ false)); + ASSERT_TRUE(dsn::utils::filesystem::get_subfiles(partition_path, + src_files, + /* recursive */ false)); ASSERT_FALSE(src_files.empty()); bulk_load_metadata blm; diff --git a/src/test/function_test/detect_hotspot/test_detect_hotspot.cpp b/src/test/function_test/detect_hotspot/test_detect_hotspot.cpp index 56fbc75391..3c549d4cf6 100644 --- a/src/test/function_test/detect_hotspot/test_detect_hotspot.cpp +++ b/src/test/function_test/detect_hotspot/test_detect_hotspot.cpp @@ -26,6 +26,7 @@ #include "client/replication_ddl_client.h" #include "common/gpid.h" +#include "dsn.layer2_types.h" #include "gtest/gtest.h" #include "include/pegasus/client.h" #include "include/pegasus/error.h" @@ -96,11 +97,9 @@ class detect_hotspot_test : public test_util bool find_hotkey = false; dsn::replication::detect_hotkey_response resp; - for (int partition_index = 0; partition_index < partitions_.size(); partition_index++) { - req.pid = dsn::gpid(table_id_, partition_index); - ASSERT_EQ( - dsn::ERR_OK, - ddl_client_->detect_hotkey(partitions_[partition_index].hp_primary, req, resp)); + for (const auto &pc : pcs_) { + req.pid = pc.pid; + ASSERT_EQ(dsn::ERR_OK, ddl_client_->detect_hotkey(pc.hp_primary, req, resp)); if (!resp.hotkey_result.empty()) { find_hotkey = true; break; @@ -118,19 +117,15 @@ class detect_hotspot_test : public test_util sleep(15); req.action = dsn::replication::detect_action::STOP; - for (int partition_index = 0; partition_index < partitions_.size(); partition_index++) { - ASSERT_EQ( - dsn::ERR_OK, - ddl_client_->detect_hotkey(partitions_[partition_index].hp_primary, req, resp)); + for (const auto &pc : pcs_) { + ASSERT_EQ(dsn::ERR_OK, ddl_client_->detect_hotkey(pc.hp_primary, req, resp)); ASSERT_EQ(dsn::ERR_OK, resp.err); } req.action = dsn::replication::detect_action::QUERY; - for (int partition_index = 0; partition_index < partitions_.size(); partition_index++) { - req.pid = dsn::gpid(table_id_, partition_index); - ASSERT_EQ( - dsn::ERR_OK, - ddl_client_->detect_hotkey(partitions_[partition_index].hp_primary, req, resp)); + for (const auto &pc : pcs_) { + req.pid = pc.pid; + ASSERT_EQ(dsn::ERR_OK, ddl_client_->detect_hotkey(pc.hp_primary, req, resp)); ASSERT_EQ("Can't get hotkey now, now state: hotkey_collector_state::STOPPED", resp.err_hint); } @@ -162,12 +157,12 @@ class detect_hotspot_test : public test_util dsn::replication::detect_hotkey_response resp; ASSERT_EQ(dsn::ERR_OK, - ddl_client_->detect_hotkey(partitions_[target_partition].hp_primary, req, resp)); + ddl_client_->detect_hotkey(pcs_[target_partition].hp_primary, req, resp)); ASSERT_EQ(dsn::ERR_OK, resp.err); req.action = dsn::replication::detect_action::QUERY; ASSERT_EQ(dsn::ERR_OK, - ddl_client_->detect_hotkey(partitions_[target_partition].hp_primary, req, resp)); + ddl_client_->detect_hotkey(pcs_[target_partition].hp_primary, req, resp)); ASSERT_EQ("Can't get hotkey now, now state: hotkey_collector_state::COARSE_DETECTING", resp.err_hint); @@ -178,7 +173,7 @@ class detect_hotspot_test : public test_util req.action = dsn::replication::detect_action::QUERY; ASSERT_EQ(dsn::ERR_OK, - ddl_client_->detect_hotkey(partitions_[target_partition].hp_primary, req, resp)); + ddl_client_->detect_hotkey(pcs_[target_partition].hp_primary, req, resp)); ASSERT_EQ("Can't get hotkey now, now state: hotkey_collector_state::STOPPED", resp.err_hint); } diff --git a/src/test/function_test/recovery/test_recovery.cpp b/src/test/function_test/recovery/test_recovery.cpp index 04d59933f4..33d1b0adf1 100644 --- a/src/test/function_test/recovery/test_recovery.cpp +++ b/src/test/function_test/recovery/test_recovery.cpp @@ -32,7 +32,7 @@ #include "gtest/gtest.h" #include "include/pegasus/client.h" #include "pegasus/error.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "test/function_test/utils/test_util.h" #include "utils/error_code.h" #include "utils/rand.h" diff --git a/src/test/function_test/restore/test_restore.cpp b/src/test/function_test/restore/test_restore.cpp index 42e13161a4..141e7c8ac0 100644 --- a/src/test/function_test/restore/test_restore.cpp +++ b/src/test/function_test/restore/test_restore.cpp @@ -78,7 +78,8 @@ class restore_test : public test_util table_name_, table_id_, kNewTableName, - false)); + false, + "")); NO_FATALS(wait_table_healthy(kNewTableName)); } @@ -100,10 +101,11 @@ class restore_test : public test_util { std::string pegasus_root_dir = global_env::instance()._pegasus_root; CHECK_EQ(0, ::chdir(pegasus_root_dir.c_str())); - std::string cmd = "cd " + backup_path_ + "; " - "ls -c > restore_app_from_backup_test_tmp; " - "tail -n 1 restore_app_from_backup_test_tmp; " - "rm restore_app_from_backup_test_tmp"; + std::string cmd = "cd " + backup_path_ + + "; " + "ls -c > restore_app_from_backup_test_tmp; " + "tail -n 1 restore_app_from_backup_test_tmp; " + "rm restore_app_from_backup_test_tmp"; std::stringstream ss; int ret = dsn::utils::pipe_execute(cmd.c_str(), ss); std::cout << cmd << " output: " << ss.str() << std::endl; diff --git a/src/test/function_test/utils/global_env.cpp b/src/test/function_test/utils/global_env.cpp index a703fefd2f..12395bea77 100644 --- a/src/test/function_test/utils/global_env.cpp +++ b/src/test/function_test/utils/global_env.cpp @@ -27,7 +27,7 @@ #include "gtest/gtest.h" // IWYU pragma: no_include "gtest/gtest-message.h" // IWYU pragma: no_include "gtest/gtest-test-part.h" -#include "runtime/rpc/rpc_address.h" +#include "rpc/rpc_address.h" #include "test/function_test/utils/utils.h" #include "utils/fmt_logging.h" diff --git a/src/test/function_test/utils/test_util.cpp b/src/test/function_test/utils/test_util.cpp index c3ca95b8cd..7a41f18822 100644 --- a/src/test/function_test/utils/test_util.cpp +++ b/src/test/function_test/utils/test_util.cpp @@ -1,20 +1,20 @@ /* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, -* software distributed under the License is distributed on an -* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -* KIND, either express or implied. See the License for the -* specific language governing permissions and limitations -* under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ #include "test_util.h" @@ -55,9 +55,9 @@ class rpc_address; } // namespace dsn using dsn::partition_configuration; +using dsn::rpc_address; using dsn::replication::replica_helper; using dsn::replication::replication_ddl_client; -using dsn::rpc_address; using nlohmann::json; using std::map; using std::string; @@ -108,11 +108,10 @@ void test_util::SetUp() ASSERT_TRUE(client_ != nullptr); int32_t partition_count; - ASSERT_EQ(dsn::ERR_OK, - ddl_client_->list_app(table_name_, table_id_, partition_count, partitions_)); + ASSERT_EQ(dsn::ERR_OK, ddl_client_->list_app(table_name_, table_id_, partition_count, pcs_)); ASSERT_NE(0, table_id_); ASSERT_EQ(partition_count_, partition_count); - ASSERT_EQ(partition_count_, partitions_.size()); + ASSERT_EQ(partition_count_, pcs_.size()); } void test_util::run_cmd_from_project_root(const string &cmd) diff --git a/src/test/function_test/utils/test_util.h b/src/test/function_test/utils/test_util.h index 8e3a1663ce..3fb7c16f6f 100644 --- a/src/test/function_test/utils/test_util.h +++ b/src/test/function_test/utils/test_util.h @@ -1,21 +1,21 @@ /* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, -* software distributed under the License is distributed on an -* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -* KIND, either express or implied. See the License for the -* specific language governing permissions and limitations -* under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ #pragma once @@ -28,7 +28,7 @@ #include #include "dsn.layer2_types.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" // TODO(yingchun): it's too tricky, but I don't know how does it happen, we can fix it later. #define TRICKY_CODE_TO_AVOID_LINK_ERROR \ @@ -115,7 +115,7 @@ class test_util : public ::testing::Test std::string table_name_; int32_t table_id_; int32_t partition_count_ = 8; - std::vector partitions_; + std::vector pcs_; pegasus_client *client_ = nullptr; std::vector meta_list_; std::shared_ptr ddl_client_; diff --git a/src/test/function_test/utils/utils.h b/src/test/function_test/utils/utils.h index 526bd3ab2e..1c1df69c22 100644 --- a/src/test/function_test/utils/utils.h +++ b/src/test/function_test/utils/utils.h @@ -176,9 +176,9 @@ inline void compare(const std::map &expect, { for (auto it1 = actual.begin(), it2 = expect.begin();; ++it1, ++it2) { if (it1 == actual.end()) { - ASSERT_EQ(expect.end(), it2) << "Only in expect: hash_key=" << hash_key - << ", sort_key=" << it2->first - << ", value=" << it2->second; + ASSERT_EQ(expect.end(), it2) + << "Only in expect: hash_key=" << hash_key << ", sort_key=" << it2->first + << ", value=" << it2->second; break; } ASSERT_NE(expect.end(), it2) << "Only in actual: hash_key=" << hash_key @@ -200,8 +200,8 @@ inline void compare(const T &expect, const U &actual) break; } ASSERT_NE(expect.end(), it2) << "Only in actual: hash_key=" << it1->first; - ASSERT_EQ(it1->first, it2->first) << "Diff: actual_hash_key=" << it1->first - << ", expected_hash_key=" << it2->first; + ASSERT_EQ(it1->first, it2->first) + << "Diff: actual_hash_key=" << it1->first << ", expected_hash_key=" << it2->first; ASSERT_NO_FATAL_FAILURE(compare(it1->second, it2->second, it1->first)); } } diff --git a/src/test/kill_test/job.cpp b/src/test/kill_test/job.cpp index b73789d943..ef0a18c55a 100644 --- a/src/test/kill_test/job.cpp +++ b/src/test/kill_test/job.cpp @@ -37,5 +37,5 @@ std::string job::get_addr_by_index(int index) } void job::set_name(const std::string &_name) { name = _name; } -} -} // end namespace +} // namespace test +} // namespace pegasus diff --git a/src/test/kill_test/job.h b/src/test/kill_test/job.h index 2c05f55edf..a1d7574d61 100644 --- a/src/test/kill_test/job.h +++ b/src/test/kill_test/job.h @@ -61,5 +61,5 @@ struct job std::string get_addr_by_index(int index); void set_name(const std::string &_name); }; -} -} // end namespace +} // namespace test +} // namespace pegasus diff --git a/src/test/kill_test/kill_testor.cpp b/src/test/kill_test/kill_testor.cpp index fbafa349b2..50570bf1b4 100644 --- a/src/test/kill_test/kill_testor.cpp +++ b/src/test/kill_test/kill_testor.cpp @@ -17,6 +17,8 @@ * under the License. */ +#include +#include #include #include #include @@ -102,38 +104,30 @@ dsn::error_code kill_testor::get_partition_info(bool debug_unhealthy, healthy_partition_cnt = 0, unhealthy_partition_cnt = 0; int32_t app_id; int32_t partition_count; - partitions.clear(); - dsn::error_code err = - ddl_client->list_app(FLAGS_verify_app_name, app_id, partition_count, partitions); + pcs.clear(); + dsn::error_code err = ddl_client->list_app(FLAGS_verify_app_name, app_id, partition_count, pcs); if (err == ::dsn::ERR_OK) { LOG_DEBUG("access meta and query partition status success"); - for (int i = 0; i < partitions.size(); i++) { - const dsn::partition_configuration &p = partitions[i]; + for (const auto &pc : pcs) { int replica_count = 0; - if (p.hp_primary) { + if (pc.hp_primary) { replica_count++; } - replica_count += p.hp_secondaries.size(); - if (replica_count == p.max_replica_count) { + replica_count += pc.hp_secondaries.size(); + if (replica_count == pc.max_replica_count) { healthy_partition_cnt++; } else { - std::stringstream info; - info << "gpid=" << p.pid.get_app_id() << "." << p.pid.get_partition_index() << ", "; - info << "primay=" << p.hp_primary << ", "; - info << "secondaries=["; - for (int idx = 0; idx < p.hp_secondaries.size(); idx++) { - if (idx != 0) - info << "," << p.hp_secondaries[idx]; - else - info << p.hp_secondaries[idx]; - } - info << "], "; - info << "last_committed_decree=" << p.last_committed_decree; + const auto &info = + fmt::format("gpid={}, primary={}, secondaries=[{}], last_committed_decree={}", + pc.pid, + pc.hp_primary, + fmt::join(pc.hp_secondaries, ", "), + pc.last_committed_decree); if (debug_unhealthy) { - LOG_INFO("found unhealthy partition, {}", info.str()); + LOG_INFO("found unhealthy partition, {}", info); } else { - LOG_DEBUG("found unhealthy partition, {}", info.str()); + LOG_DEBUG("found unhealthy partition, {}", info); } } } diff --git a/src/test/kill_test/kill_testor.h b/src/test/kill_test/kill_testor.h index f66f31540f..23b309d316 100644 --- a/src/test/kill_test/kill_testor.h +++ b/src/test/kill_test/kill_testor.h @@ -24,7 +24,7 @@ #include #include "dsn.layer2_types.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "utils/error_code.h" namespace dsn { @@ -66,7 +66,7 @@ class kill_testor shared_ptr ddl_client; vector meta_list; - std::vector partitions; + std::vector pcs; }; } // namespace test } // namespace pegasus diff --git a/src/test/kill_test/killer_handler.h b/src/test/kill_test/killer_handler.h index 4662663fd9..c895910fa9 100644 --- a/src/test/kill_test/killer_handler.h +++ b/src/test/kill_test/killer_handler.h @@ -73,5 +73,5 @@ class killer_handler return new T(); } }; -} -} // end namespace +} // namespace test +} // namespace pegasus diff --git a/src/test/kill_test/killer_handler_shell.cpp b/src/test/kill_test/killer_handler_shell.cpp index 764baba9a8..ecccb6f397 100644 --- a/src/test/kill_test/killer_handler_shell.cpp +++ b/src/test/kill_test/killer_handler_shell.cpp @@ -191,5 +191,5 @@ bool killer_handler_shell::check(const std::string &job, int index, const std::s // not implement, just return true return true; } -} -} // end namespace +} // namespace test +} // namespace pegasus diff --git a/src/test/kill_test/killer_handler_shell.h b/src/test/kill_test/killer_handler_shell.h index c3c797fb52..45c1fb5357 100644 --- a/src/test/kill_test/killer_handler_shell.h +++ b/src/test/kill_test/killer_handler_shell.h @@ -59,5 +59,5 @@ class killer_handler_shell : public killer_handler // check whether the command execute success. bool check(const std::string &job, int index, const std::string &type); }; -} -} // end namespace +} // namespace test +} // namespace pegasus diff --git a/src/test/kill_test/partition_kill_testor.cpp b/src/test/kill_test/partition_kill_testor.cpp index d6f0054755..9ab0da1b53 100644 --- a/src/test/kill_test/partition_kill_testor.cpp +++ b/src/test/kill_test/partition_kill_testor.cpp @@ -29,7 +29,7 @@ #include "dsn.layer2_types.h" #include "partition_kill_testor.h" #include "remote_cmd/remote_command.h" -#include "runtime/task/task.h" +#include "task/task.h" #include "test/kill_test/kill_testor.h" #include "utils/autoref_ptr.h" #include "utils/error_code.h" @@ -59,14 +59,14 @@ void partition_kill_testor::Run() void partition_kill_testor::run() { - if (partitions.size() == 0) { + if (pcs.empty()) { LOG_INFO("partitions empty"); return; } - int random_num = generate_one_number(0, partitions.size() - 1); + int random_num = generate_one_number(0, pcs.size() - 1); std::vector random_indexs; - generate_random(random_indexs, random_num, 0, partitions.size() - 1); + generate_random(random_indexs, random_num, 0, pcs.size() - 1); std::vector tasks(random_num); std::vector> results(random_num); @@ -74,10 +74,10 @@ void partition_kill_testor::run() std::vector arguments(2); for (int i = 0; i < random_indexs.size(); ++i) { int index = random_indexs[i]; - const auto &p = partitions[index]; + const auto &pc = pcs[index]; - arguments[0] = to_string(p.pid.get_app_id()); - arguments[1] = to_string(p.pid.get_partition_index()); + arguments[0] = to_string(pc.pid.get_app_id()); + arguments[1] = to_string(pc.pid.get_partition_index()); auto callback = [&results, i](::dsn::error_code err, const std::string &resp) { if (err == ::dsn::ERR_OK) { @@ -88,7 +88,7 @@ void partition_kill_testor::run() results[i].second = err.to_string(); } }; - tasks[i] = dsn::dist::cmd::async_call_remote(p.primary, + tasks[i] = dsn::dist::cmd::async_call_remote(pc.primary, "replica.kill_partition", arguments, callback, diff --git a/src/test/pressure_test/main.cpp b/src/test/pressure_test/main.cpp index 876df3789d..914a61f4e3 100644 --- a/src/test/pressure_test/main.cpp +++ b/src/test/pressure_test/main.cpp @@ -29,9 +29,9 @@ #include "pegasus/client.h" #include "pegasus/error.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "task/async_calls.h" +#include "task/task.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/flags.h" #include "utils/fmt_logging.h" diff --git a/src/tools/mutation_log_tool.cpp b/src/tools/mutation_log_tool.cpp index 0e88d06f4a..ef367e7461 100644 --- a/src/tools/mutation_log_tool.cpp +++ b/src/tools/mutation_log_tool.cpp @@ -39,8 +39,8 @@ #include "replica/mutation_log.h" #include "replica/replica.h" #include "replica/replica_stub.h" -#include "runtime/rpc/rpc_message.h" -#include "runtime/task/task_spec.h" +#include "rpc/rpc_message.h" +#include "task/task_spec.h" #include "utils/autoref_ptr.h" #include "utils/blob.h" #include "utils/defer.h" diff --git a/src/utils/TokenBucket.h b/src/utils/TokenBucket.h index 9a5a2327d8..c0f9e8e2f9 100644 --- a/src/utils/TokenBucket.h +++ b/src/utils/TokenBucket.h @@ -272,8 +272,8 @@ class BasicDynamicTokenBucket * * Thread-safe (but returned value may immediately be outdated). */ - double available(double rate, double burstSize, double nowInSeconds = defaultClockNow()) const - noexcept + double + available(double rate, double burstSize, double nowInSeconds = defaultClockNow()) const noexcept { assert(rate > 0); assert(burstSize > 0); diff --git a/src/utils/binary_reader.cpp b/src/utils/binary_reader.cpp index 227a98ed31..64e1e3a788 100644 --- a/src/utils/binary_reader.cpp +++ b/src/utils/binary_reader.cpp @@ -102,10 +102,10 @@ int binary_reader::inner_read(blob &blob, int len) blob = _blob.range(static_cast(_ptr - _blob.data()), len); // optimization: zero-copy - if (!blob.buffer_ptr()) { + if (!blob.buffer()) { std::shared_ptr buffer(::dsn::utils::make_shared_array(len)); memcpy(buffer.get(), blob.data(), blob.length()); - blob = ::dsn::blob(buffer, 0, blob.length()); + blob = ::dsn::blob(buffer, blob.length()); } _ptr += len; diff --git a/src/utils/binary_reader.h b/src/utils/binary_reader.h index 65d0749fb2..415da55935 100644 --- a/src/utils/binary_reader.h +++ b/src/utils/binary_reader.h @@ -39,13 +39,13 @@ class binary_reader { public: // given bb on ctor - binary_reader(const blob &blob); - binary_reader(blob &&blob); + explicit binary_reader(const blob &blob); + explicit binary_reader(blob &&blob); // or delayed init - binary_reader() {} + binary_reader() = default; - virtual ~binary_reader() {} + virtual ~binary_reader() = default; void init(const blob &bb); void init(blob &&bb); diff --git a/src/utils/binary_writer.cpp b/src/utils/binary_writer.cpp index 1796f425bb..9234ce9140 100644 --- a/src/utils/binary_writer.cpp +++ b/src/utils/binary_writer.cpp @@ -38,8 +38,8 @@ binary_writer::binary_writer(int reserveBufferSize) { _total_size = 0; _buffers.reserve(1); - _reserved_size_per_buffer = - (reserveBufferSize == 0) ? _reserved_size_per_buffer_static : reserveBufferSize; + _reserved_size_per_buffer = (reserveBufferSize == 0) ? _reserved_size_per_buffer_static + : reserveBufferSize; _current_buffer = nullptr; _current_offset = 0; _current_buffer_length = 0; @@ -200,4 +200,4 @@ bool binary_writer::backup(int count) _total_size -= count; return true; } -} +} // namespace dsn diff --git a/src/utils/binary_writer.h b/src/utils/binary_writer.h index eb33adf8ae..2640567f05 100644 --- a/src/utils/binary_writer.h +++ b/src/utils/binary_writer.h @@ -129,4 +129,4 @@ inline void binary_writer::write(const blob &val) if (len > 0) write((const char *)val.data(), len); } -} +} // namespace dsn diff --git a/src/utils/blob.h b/src/utils/blob.h index fea1a7a0e0..07675e9431 100644 --- a/src/utils/blob.h +++ b/src/utils/blob.h @@ -29,10 +29,11 @@ #include #include -#include "absl/strings/string_view.h" +#include #include #include +#include "utils/fmt_logging.h" #include "utils/fmt_utils.h" #include "utils.h" @@ -51,15 +52,7 @@ class blob { } - blob(std::shared_ptr buffer, int offset, unsigned int length) - : _holder(std::move(buffer)), - _buffer(_holder.get()), - _data(_holder.get() + offset), - _length(length) - { - } - - /// NOTE: Use absl::string_view whenever possible. + /// NOTE: Use std::string_view whenever possible. /// blob is designed for shared buffer, never use it as constant view. /// Maybe we could deprecate this function in the future. blob(const char *buffer, int offset, unsigned int length) @@ -67,21 +60,70 @@ class blob { } + blob(const blob &rhs) noexcept = default; + + blob &operator=(const blob &rhs) noexcept + { + if (this == &rhs) { + return *this; + } + + _holder = rhs._holder; + _buffer = rhs._buffer; + _data = rhs._data; + _length = rhs._length; + + return *this; + } + + blob(blob &&rhs) noexcept + : _holder(std::move(rhs._holder)), + _buffer(rhs._buffer), + _data(rhs._data), + _length(rhs._length) + { + rhs._buffer = nullptr; + rhs._data = nullptr; + rhs._length = 0; + } + + blob &operator=(blob &&rhs) noexcept + { + if (this == &rhs) { + return *this; + } + + _holder = std::move(rhs._holder); + _buffer = rhs._buffer; + _data = rhs._data; + _length = rhs._length; + + rhs._buffer = nullptr; + rhs._data = nullptr; + rhs._length = 0; + + return *this; + } + /// Create shared buffer from allocated raw bytes. /// NOTE: this operation is not efficient since it involves a memory copy. - static blob create_from_bytes(const char *s, size_t len) + [[nodiscard]] static blob create_from_bytes(const char *s, size_t len) { + DCHECK(s != nullptr || len == 0, + "null source pointer with non-zero length would lead to " + "undefined behaviour"); + std::shared_ptr s_arr(new char[len], std::default_delete()); memcpy(s_arr.get(), s, len); - return blob(std::move(s_arr), 0, static_cast(len)); + return {std::move(s_arr), static_cast(len)}; } /// Create shared buffer without copying data. - static blob create_from_bytes(std::string &&bytes) + [[nodiscard]] static blob create_from_bytes(std::string &&bytes) { - auto s = new std::string(std::move(bytes)); + auto *s = new std::string(std::move(bytes)); std::shared_ptr buf(const_cast(s->data()), [s](char *) { delete s; }); - return blob(std::move(buf), 0, static_cast(s->length())); + return {std::move(buf), static_cast(s->length())}; } void assign(const std::shared_ptr &buffer, int offset, unsigned int length) @@ -95,12 +137,12 @@ class blob void assign(std::shared_ptr &&buffer, int offset, unsigned int length) { _holder = std::move(buffer); - _buffer = (_holder.get()); - _data = (_holder.get() + offset); + _buffer = _holder.get(); + _data = _holder.get() + offset; _length = length; } - /// Deprecated. Use absl::string_view whenever possible. + /// Deprecated. Use std::string_view whenever possible. void assign(const char *buffer, int offset, unsigned int length) { _holder = nullptr; @@ -109,21 +151,22 @@ class blob _length = length; } - const char *data() const noexcept { return _data; } + [[nodiscard]] const char *data() const noexcept { return _data; } - unsigned int length() const noexcept { return _length; } - unsigned int size() const noexcept { return _length; } - bool empty() const noexcept { return _length == 0; } + [[nodiscard]] unsigned int length() const noexcept { return _length; } + [[nodiscard]] unsigned int size() const noexcept { return _length; } + [[nodiscard]] bool empty() const noexcept { return _length == 0; } - std::shared_ptr buffer() const { return _holder; } + [[nodiscard]] std::shared_ptr buffer() const { return _holder; } - const char *buffer_ptr() const { return _holder.get(); } + [[nodiscard]] const char *buffer_ptr() const { return _holder.get(); } - // offset can be negative for buffer dereference - blob range(int offset) const + // `offset` can be negative for buffer dereference. + [[nodiscard]] blob range(int offset) const { - // offset cannot exceed the current length value - assert(offset <= static_cast(_length)); + DCHECK_LE_MSG(offset, + static_cast(_length), + "the required offset cannot exceed the current length"); blob temp = *this; temp._data += offset; @@ -131,33 +174,38 @@ class blob return temp; } - blob range(int offset, unsigned int len) const + [[nodiscard]] blob range(int offset, unsigned int len) const { - // offset cannot exceed the current length value - assert(offset <= static_cast(_length)); + DCHECK_LE_MSG(offset, + static_cast(_length), + "the required offset cannot exceed the current length"); blob temp = *this; temp._data += offset; temp._length -= offset; - // buffer length must exceed the required length - assert(temp._length >= len); + DCHECK_LE_MSG( + len, temp._length, "the required length cannot exceed remaining buffer length"); + temp._length = len; return temp; } - bool operator==(const blob &r) const + // Could NOT be declared with "= delete", since many thrift-generated classes would + // access this in their own `operator==`. + bool operator==(const blob &) const { - // not implemented - assert(false); + CHECK(false, "not implemented"); return false; } - std::string to_string() const + [[nodiscard]] std::string to_string() const { - if (_length == 0) + if (_length == 0) { return {}; - return std::string(_data, _length); + } + + return {_data, _length}; } friend std::ostream &operator<<(std::ostream &os, const blob &bb) @@ -165,7 +213,7 @@ class blob return os << bb.to_string(); } - absl::string_view to_string_view() const { return absl::string_view(_data, _length); } + [[nodiscard]] std::string_view to_string_view() const { return {_data, _length}; } uint32_t read(::apache::thrift::protocol::TProtocol *iprot); uint32_t write(::apache::thrift::protocol::TProtocol *oprot) const; diff --git a/src/utils/builtin_metrics.cpp b/src/utils/builtin_metrics.cpp index 3ffb8f1474..6c8b8ee5a8 100644 --- a/src/utils/builtin_metrics.cpp +++ b/src/utils/builtin_metrics.cpp @@ -17,7 +17,7 @@ #include "utils/builtin_metrics.h" -#include +#include #include #include diff --git a/src/utils/chrono_literals.h b/src/utils/chrono_literals.h index 250e73ed52..9c28f69b11 100644 --- a/src/utils/chrono_literals.h +++ b/src/utils/chrono_literals.h @@ -73,6 +73,6 @@ constexpr std::chrono::nanoseconds operator"" _ns(unsigned long long v) return std::chrono::nanoseconds{v}; } -} // inline namespace chrono_literals -} // inline namespace literals +} // namespace chrono_literals +} // namespace literals } // namespace dsn diff --git a/src/utils/command_manager.cpp b/src/utils/command_manager.cpp index bcf37bb873..b5d56e2afb 100644 --- a/src/utils/command_manager.cpp +++ b/src/utils/command_manager.cpp @@ -29,13 +29,17 @@ #include // IWYU pragma: no_include #include +#include #include #include #include // IWYU pragma: keep #include +#include #include #include +#include "gutil/map_util.h" + namespace dsn { std::unique_ptr @@ -44,19 +48,19 @@ command_manager::register_command(const std::vector &commands, const std::string &args, command_handler handler) { - auto *c = new command_instance(); - c->commands = commands; - c->help = help; - c->args = args; - c->handler = std::move(handler); + auto ch = std::make_shared(); + ch->commands = commands; + ch->help = help; + ch->args = args; + ch->handler = std::move(handler); utils::auto_write_lock l(_lock); for (const auto &cmd : commands) { CHECK(!cmd.empty(), "should not register empty command"); - CHECK(_handlers.emplace(cmd, c).second, "command '{}' already registered", cmd); + gutil::InsertOrDie(&_handler_by_cmd, cmd, ch); } - return std::make_unique(reinterpret_cast(c)); + return std::make_unique(reinterpret_cast(ch.get())); } std::unique_ptr command_manager::register_bool_command( @@ -94,96 +98,103 @@ command_manager::register_multiple_commands(const std::vector &comm handler); } -void command_manager::deregister_command(uintptr_t handle) +void command_manager::deregister_command(uintptr_t cmd_id) { - auto c = reinterpret_cast(handle); - CHECK_NOTNULL(c, "cannot deregister a null handle"); + const auto ch = reinterpret_cast(cmd_id); + CHECK_NOTNULL(ch, "cannot deregister a null command id"); utils::auto_write_lock l(_lock); - for (const std::string &cmd : c->commands) { - _handlers.erase(cmd); + for (const auto &cmd : ch->commands) { + _handler_by_cmd.erase(cmd); } } +void command_manager::add_global_cmd(std::unique_ptr cmd) +{ + utils::auto_write_lock l(_lock); + _cmds.push_back(std::move(cmd)); +} + bool command_manager::run_command(const std::string &cmd, const std::vector &args, /*out*/ std::string &output) { - command_instance *h = nullptr; + std::shared_ptr ch; { utils::auto_read_lock l(_lock); - auto it = _handlers.find(cmd); - if (it != _handlers.end()) - h = it->second; + ch = gutil::FindPtrOrNull(_handler_by_cmd, cmd); } - if (h == nullptr) { - output = std::string("unknown command '") + cmd + "'"; + if (!ch) { + output = fmt::format("unknown command '{}'", cmd); return false; - } else { - output = h->handler(args); - return true; } + + output = ch->handler(args); + return true; } std::string command_manager::set_bool(bool &value, const std::string &name, const std::vector &args) { + nlohmann::json msg; + msg["error"] = "ok"; // Query. if (args.empty()) { - return value ? "true" : "false"; + msg[name] = value ? "true" : "false"; + return msg.dump(2); } // Invalid arguments size. if (args.size() > 1) { - return fmt::format("ERR: invalid arguments, only one boolean argument is acceptable"); + msg["error"] = "ERR: invalid arguments, only one boolean argument is acceptable"; + return msg.dump(2); } // Invalid argument. bool new_value; if (!dsn::buf2bool(args[0], new_value, /* ignore_case */ true)) { - return fmt::format("ERR: invalid arguments, '{}' is not a boolean", args[0]); + msg["error"] = fmt::format("ERR: invalid arguments, '{}' is not a boolean", args[0]); + return msg.dump(2); } // Set to a new value. value = new_value; LOG_INFO("set {} to {} by remote command", name, new_value); - return "OK"; + return msg.dump(2); } command_manager::command_manager() { - _cmds.emplace_back( - register_multiple_commands({"help", "h", "H", "Help"}, - "Display help information", - "[command]", - [this](const std::vector &args) { - std::stringstream ss; - if (args.empty()) { - std::unordered_set cmds; - utils::auto_read_lock l(_lock); - for (const auto &c : this->_handlers) { - // Multiple commands with the same handler are print - // only once. - if (cmds.insert(c.second.get()).second) { - ss << c.second->help << std::endl; - } - } - } else { - utils::auto_read_lock l(_lock); - auto it = _handlers.find(args[0]); - if (it == _handlers.end()) { - ss << "cannot find command '" << args[0] << "'"; - } else { - ss.width(6); - ss << std::left << it->second->help << std::endl - << it->second->args << std::endl; - } - } - - return ss.str(); - })); + _cmds.emplace_back(register_multiple_commands( + {"help", "h", "H", "Help"}, + "Display help information", + "[command]", + [this](const std::vector &args) { + std::stringstream ss; + if (args.empty()) { + std::unordered_set chs; + utils::auto_read_lock l(_lock); + for (const auto &[_, ch] : _handler_by_cmd) { + // Multiple commands with the same handler are print only once. + if (gutil::InsertIfNotPresent(&chs, ch.get())) { + ss << ch->help << std::endl; + } + } + } else { + utils::auto_read_lock l(_lock); + const auto ch = gutil::FindPtrOrNull(_handler_by_cmd, args[0]); + if (!ch) { + ss << "cannot find command '" << args[0] << "'"; + } else { + ss.width(6); + ss << std::left << ch->help << std::endl << ch->args << std::endl; + } + } + + return ss.str(); + })); _cmds.emplace_back(register_multiple_commands( {"repeat", "r", "R", "Repeat"}, @@ -236,10 +247,10 @@ command_manager::command_manager() command_manager::~command_manager() { _cmds.clear(); - CHECK(_handlers.empty(), + CHECK(_handler_by_cmd.empty(), "All commands must be deregistered before command_manager is destroyed, however '{}' is " "still registered", - _handlers.begin()->first); + _handler_by_cmd.begin()->first); } } // namespace dsn diff --git a/src/utils/command_manager.h b/src/utils/command_manager.h index a73966845c..b971522d01 100644 --- a/src/utils/command_manager.h +++ b/src/utils/command_manager.h @@ -27,6 +27,9 @@ #pragma once #include +#include +#include +#include // IWYU pragma: no_include #include #include @@ -35,7 +38,6 @@ #include #include -#include "utils/autoref_ptr.h" #include "utils/fmt_logging.h" #include "utils/ports.h" #include "utils/singleton.h" @@ -44,7 +46,6 @@ #include "utils/synchronize.h" namespace dsn { - class command_deregister; class command_manager : public ::dsn::utils::singleton @@ -64,13 +65,14 @@ class command_manager : public ::dsn::utils::singleton // 'validator' is used to validate the new value. // The value is reset to 'default_value' if passing "DEFAULT" argument. template - WARN_UNUSED_RESULT std::unique_ptr - register_int_command(T &value, - T default_value, - const std::string &command, - const std::string &help, - std::function validator = - [](int64_t new_value) -> bool { return new_value >= 0; }) + WARN_UNUSED_RESULT std::unique_ptr register_int_command( + T &value, + T default_value, + const std::string &command, + const std::string &help, + std::function validator = [](int64_t new_value) -> bool { + return new_value >= 0; + }) { return register_single_command( command, @@ -81,7 +83,7 @@ class command_manager : public ::dsn::utils::singleton }); } - // Register a single 'command' with the 'help' description, its arguments are describe in + // Register a single 'command' with the 'help' description, its arguments are described in // 'args'. std::unique_ptr register_single_command(const std::string &command, @@ -89,7 +91,7 @@ class command_manager : public ::dsn::utils::singleton const std::string &args, command_handler handler) WARN_UNUSED_RESULT; - // Register multiple 'commands' with the 'help' description, their arguments are describe in + // Register multiple 'commands' with the 'help' description, their arguments are described in // 'args'. std::unique_ptr register_multiple_commands(const std::vector &commands, @@ -97,6 +99,9 @@ class command_manager : public ::dsn::utils::singleton const std::string &args, command_handler handler) WARN_UNUSED_RESULT; + // Register a global command which is not associated with any objects. + void add_global_cmd(std::unique_ptr cmd); + bool run_command(const std::string &cmd, const std::vector &args, /*out*/ std::string &output); @@ -108,7 +113,7 @@ class command_manager : public ::dsn::utils::singleton command_manager(); ~command_manager(); - struct command_instance : public ref_counter + struct commands_handler { std::vector commands; std::string help; @@ -122,7 +127,7 @@ class command_manager : public ::dsn::utils::singleton const std::string &args, command_handler handler) WARN_UNUSED_RESULT; - void deregister_command(uintptr_t handle); + void deregister_command(uintptr_t cmd_id); static std::string set_bool(bool &value, const std::string &name, const std::vector &args); @@ -134,39 +139,47 @@ class command_manager : public ::dsn::utils::singleton const std::vector &args, const std::function &validator) { + nlohmann::json msg; + msg["error"] = "ok"; // Query. if (args.empty()) { - return std::to_string(value); + msg[name] = fmt::format("{}", std::to_string(value)); + return msg.dump(2); } // Invalid arguments size. if (args.size() > 1) { - return fmt::format("ERR: invalid arguments, only one integer argument is acceptable"); + msg["error"] = + fmt::format("ERR: invalid arguments '{}', only one argument is acceptable", + fmt::join(args, " ")); + return msg.dump(2); } // Reset to the default value. if (dsn::utils::iequals(args[0], "DEFAULT")) { value = default_value; - return "OK"; + msg[name] = default_value; + return msg.dump(2); } // Invalid argument. T new_value = 0; if (!internal::buf2signed(args[0], new_value) || !validator(static_cast(new_value))) { - return {"ERR: invalid arguments"}; + msg["error"] = + fmt::format("ERR: invalid argument '{}', the value is not acceptable", args[0]); + return msg.dump(2); } // Set to a new value. value = new_value; LOG_INFO("set {} to {} by remote command", name, new_value); - return "OK"; + return msg.dump(2); } - typedef ref_ptr command_instance_ptr; utils::rw_lock_nr _lock; - std::map _handlers; + std::map> _handler_by_cmd; std::vector> _cmds; }; diff --git a/src/utils/config_helper.h b/src/utils/config_helper.h index aeda3048c9..1010b68ab6 100644 --- a/src/utils/config_helper.h +++ b/src/utils/config_helper.h @@ -49,9 +49,12 @@ section, #fld, default_value ? default_value->fld : default_fld_value, dsptr); #define CONFIG_FLD_STRING(fld, default_fld_value, dsptr) \ + CONFIG_FLD_STRING_BY_KEY(fld, #fld, default_fld_value, dsptr) + +#define CONFIG_FLD_STRING_BY_KEY(fld, key, default_fld_value, dsptr) \ val.fld = dsn_config_get_value_string( \ section, \ - #fld, \ + key, \ (val.fld.length() > 0 && val.fld != std::string(default_fld_value)) \ ? val.fld.c_str() \ : (default_value ? default_value->fld.c_str() : default_fld_value), \ diff --git a/src/utils/configuration.cpp b/src/utils/configuration.cpp index d9d4c1ac64..449d1d4c16 100644 --- a/src/utils/configuration.cpp +++ b/src/utils/configuration.cpp @@ -438,4 +438,4 @@ bool configuration::has_key(const char *section, const char *key) } return false; } -} +} // namespace dsn diff --git a/src/utils/crc.cpp b/src/utils/crc.cpp index b0d608eba8..c710870404 100644 --- a/src/utils/crc.cpp +++ b/src/utils/crc.cpp @@ -439,8 +439,8 @@ uint64_t crc64::_crc_table[sizeof(crc64::_crc_table) / sizeof(crc64::_crc_table[ #undef crc64_POLY #undef BIT64 #undef BIT32 -} -} +} // namespace utils +} // namespace dsn namespace dsn { namespace utils { @@ -477,5 +477,5 @@ uint64_t crc64_concat(uint32_t xy_init, return ::dsn::utils::crc64::concatenate( 0, x_init, x_final, (uint64_t)x_size, y_init, y_final, (uint64_t)y_size); } -} -} +} // namespace utils +} // namespace dsn diff --git a/src/utils/crc.h b/src/utils/crc.h index e2e4ae100b..c18c2137a5 100644 --- a/src/utils/crc.h +++ b/src/utils/crc.h @@ -71,5 +71,5 @@ uint64_t crc64_concat(uint32_t xy_init, uint64_t y_init, uint64_t y_final, size_t y_size); -} -} +} // namespace utils +} // namespace dsn diff --git a/src/utils/customizable_id.h b/src/utils/customizable_id.h index 46acfcbfe3..e0efae8863 100644 --- a/src/utils/customizable_id.h +++ b/src/utils/customizable_id.h @@ -207,5 +207,5 @@ int customized_id_mgr::register_id(const char *name) _names2.push_back(std::string(name)); return code; } -} -} // end namespace dsn::utils +} // namespace utils +} // namespace dsn diff --git a/src/utils/distributed_lock_service.h b/src/utils/distributed_lock_service.h index 0a8749021c..271d0b90fc 100644 --- a/src/utils/distributed_lock_service.h +++ b/src/utils/distributed_lock_service.h @@ -26,24 +26,24 @@ #pragma once -#include "runtime/api_task.h" +#include +#include +#include + +#include "common/gpid.h" #include "runtime/api_layer1.h" #include "runtime/app_model.h" -#include "utils/api_utilities.h" -#include "utils/error_code.h" -#include "utils/threadpool_code.h" -#include "runtime/task/task_code.h" -#include "common/gpid.h" -#include "runtime/rpc/serialization.h" -#include "runtime/rpc/rpc_stream.h" +#include "runtime/api_task.h" +#include "rpc/serialization.h" +#include "rpc/rpc_stream.h" #include "runtime/serverlet.h" #include "runtime/service_app.h" -#include "runtime/rpc/rpc_address.h" +#include "rpc/rpc_address.h" +#include "task/task_code.h" +#include "task/future_types.h" +#include "utils/api_utilities.h" #include "utils/error_code.h" -#include "runtime/task/future_types.h" -#include -#include -#include +#include "utils/threadpool_code.h" namespace dsn { namespace dist { @@ -116,17 +116,17 @@ class distributed_lock_service const lock_options &opt) = 0; /* - * cancel the lock operation that is on pending - * cb_code: the task code specifies where to execute the callback - * lock_id should be valid, and cb should not be empty - * - * possible ec: - * ERR_INVALID_PARAMETERS - * ERR_OK, the pending lock is cancelled successfully - * ERR_OBJECT_NOT_FOUND, the caller is not found in pending list, check - * returned owner to see whether it already succeedes - * - */ + * cancel the lock operation that is on pending + * cb_code: the task code specifies where to execute the callback + * lock_id should be valid, and cb should not be empty + * + * possible ec: + * ERR_INVALID_PARAMETERS + * ERR_OK, the pending lock is cancelled successfully + * ERR_OBJECT_NOT_FOUND, the caller is not found in pending list, check + * returned owner to see whether it already succeedes + * + */ virtual task_ptr cancel_pending_lock(const std::string &lock_id, const std::string &myself_id, task_code cb_code, @@ -173,5 +173,5 @@ class distributed_lock_service /*out*/ std::string &owner, /*out*/ uint64_t &version) = 0; }; -} -} +} // namespace dist +} // namespace dsn diff --git a/src/utils/endians.h b/src/utils/endians.h index f41a4b7759..1d0739b4ea 100644 --- a/src/utils/endians.h +++ b/src/utils/endians.h @@ -23,7 +23,7 @@ #include "api_utilities.h" #include "fmt_logging.h" #include "ports.h" -#include "absl/strings/string_view.h" +#include namespace dsn { @@ -108,7 +108,7 @@ class data_output class data_input { public: - explicit data_input(absl::string_view s) : _p(s.data()), _size(s.size()) {} + explicit data_input(std::string_view s) : _p(s.data()), _size(s.size()) {} uint8_t read_u8() { return read_unsigned(); } @@ -118,7 +118,7 @@ class data_input uint64_t read_u64() { return read_unsigned(); } - absl::string_view read_str() { return {_p, _size}; } + std::string_view read_str() { return {_p, _size}; } void skip(size_t sz) { diff --git a/src/utils/error_code.cpp b/src/utils/error_code.cpp index 4b26fcc57d..e5930d4598 100644 --- a/src/utils/error_code.cpp +++ b/src/utils/error_code.cpp @@ -65,4 +65,4 @@ const char *error_code::to_string() const { return dsn::utils::customized_id_mgr::instance().get_name(_internal_code); } -} +} // namespace dsn diff --git a/src/utils/error_code.h b/src/utils/error_code.h index 04df97947a..dfdc680304 100644 --- a/src/utils/error_code.h +++ b/src/utils/error_code.h @@ -182,6 +182,11 @@ DEFINE_ERR_CODE(ERR_RDB_CORRUPTION) DEFINE_ERR_CODE(ERR_DISK_IO_ERROR) DEFINE_ERR_CODE(ERR_CURL_FAILED) + +DEFINE_ERR_CODE(ERR_DUP_EXIST) + +DEFINE_ERR_CODE(ERR_HTTP_ERROR) + } // namespace dsn USER_DEFINED_STRUCTURE_FORMATTER(::dsn::error_code); diff --git a/src/utils/errors.h b/src/utils/errors.h index c611e1beff..734ee9375b 100644 --- a/src/utils/errors.h +++ b/src/utils/errors.h @@ -33,7 +33,7 @@ #include "utils/fmt_logging.h" #include "utils/fmt_utils.h" #include "utils/ports.h" -#include "absl/strings/string_view.h" +#include namespace dsn { @@ -72,7 +72,7 @@ class error_s error_s(error_s &&rhs) noexcept = default; error_s &operator=(error_s &&) noexcept = default; - static error_s make(error_code code, absl::string_view reason) { return error_s(code, reason); } + static error_s make(error_code code, std::string_view reason) { return error_s(code, reason); } static error_s make(error_code code) { @@ -136,7 +136,7 @@ class error_s return os << s.description(); } - friend bool operator==(const error_s lhs, const error_s &rhs) + friend bool operator==(const error_s &lhs, const error_s &rhs) { if (lhs._info && rhs._info) { return lhs._info->code == rhs._info->code && lhs._info->msg == rhs._info->msg; @@ -145,14 +145,14 @@ class error_s } private: - error_s(error_code code, absl::string_view msg) noexcept : _info(new error_info(code, msg)) {} + error_s(error_code code, std::string_view msg) noexcept : _info(new error_info(code, msg)) {} struct error_info { error_code code; std::string msg; // TODO(wutao1): use raw char* to improve performance? - error_info(error_code c, absl::string_view s) : code(c), msg(s) {} + error_info(error_code c, std::string_view s) : code(c), msg(s) {} }; void copy(const error_s &rhs) diff --git a/src/utils/exp_delay.h b/src/utils/exp_delay.h index 09b5389348..b6d3af346d 100644 --- a/src/utils/exp_delay.h +++ b/src/utils/exp_delay.h @@ -119,4 +119,4 @@ class shared_exp_delay private: int _delay[DELAY_COUNT]; }; -} +} // namespace dsn diff --git a/src/utils/factory_store.h b/src/utils/factory_store.h index 72d203e0c7..2d1a2bef1a 100644 --- a/src/utils/factory_store.h +++ b/src/utils/factory_store.h @@ -157,5 +157,5 @@ class factory_store } }; }; -} -} // end namespace dsn::utils +} // namespace utils +} // namespace dsn diff --git a/src/utils/fail_point.cpp b/src/utils/fail_point.cpp index 45c809c020..49790832c8 100644 --- a/src/utils/fail_point.cpp +++ b/src/utils/fail_point.cpp @@ -34,7 +34,7 @@ #include #include -#include "absl/strings/string_view.h" +#include #include "fail_point_impl.h" #include "utils/fail_point.h" #include "utils/fmt_logging.h" @@ -45,7 +45,7 @@ namespace fail { static fail_point_registry REGISTRY; -/*extern*/ const std::string *eval(absl::string_view name) +/*extern*/ const std::string *eval(std::string_view name) { fail_point *p = REGISTRY.try_get(name); if (!p) { @@ -71,7 +71,7 @@ inline const char *task_type_to_string(fail_point::task_type t) } } -/*extern*/ void cfg(absl::string_view name, absl::string_view action) +/*extern*/ void cfg(std::string_view name, std::string_view action) { fail_point &p = REGISTRY.create_if_not_exists(name); p.set_action(action); @@ -93,14 +93,14 @@ inline const char *task_type_to_string(fail_point::task_type t) _S_FAIL_POINT_ENABLED = false; } -void fail_point::set_action(absl::string_view action) +void fail_point::set_action(std::string_view action) { if (!parse_from_string(action)) { LOG_FATAL("unrecognized command: {}", action); } } -bool fail_point::parse_from_string(absl::string_view action) +bool fail_point::parse_from_string(std::string_view action) { _max_cnt = -1; _freq = 100; diff --git a/src/utils/fail_point.h b/src/utils/fail_point.h index 663dcbd5a3..2c0e4111bf 100644 --- a/src/utils/fail_point.h +++ b/src/utils/fail_point.h @@ -36,7 +36,7 @@ #include #include "utils/ports.h" -#include "absl/strings/string_view.h" +#include // The only entry to define a fail point with `return` function: lambda function must be // return non-void type. When a fail point is defined, it's referenced via the name. @@ -75,14 +75,14 @@ namespace dsn { namespace fail { -extern const std::string *eval(absl::string_view name); +extern const std::string *eval(std::string_view name); /// Set new actions to a fail point at runtime. /// The format of an action is `[p%][cnt*]task[(arg)]`. `p%` is the expected probability that /// the action is triggered, and `cnt*` is the max times the action can be triggered. /// For example, `20%3*print(still alive!)` means the fail point has 20% chance to print a /// message "still alive!". And the message will be printed at most 3 times. -extern void cfg(absl::string_view name, absl::string_view action); +extern void cfg(std::string_view name, std::string_view action); extern void setup(); diff --git a/src/utils/fail_point_impl.h b/src/utils/fail_point_impl.h index 75ad2ef399..449d8fd534 100644 --- a/src/utils/fail_point_impl.h +++ b/src/utils/fail_point_impl.h @@ -61,11 +61,11 @@ struct fail_point Void, }; - void set_action(absl::string_view action); + void set_action(std::string_view action); const std::string *eval(); - explicit fail_point(absl::string_view name) : _name(name) {} + explicit fail_point(std::string_view name) : _name(name) {} /// for test only fail_point(task_type t, std::string arg, int freq, int max_cnt) @@ -76,7 +76,7 @@ struct fail_point /// for test only fail_point() = default; - bool parse_from_string(absl::string_view action); + bool parse_from_string(std::string_view action); friend inline bool operator==(const fail_point &p1, const fail_point &p2) { @@ -103,7 +103,7 @@ USER_DEFINED_ENUM_FORMATTER(fail_point::task_type) struct fail_point_registry { - fail_point &create_if_not_exists(absl::string_view name) + fail_point &create_if_not_exists(std::string_view name) { std::lock_guard guard(_mu); @@ -111,7 +111,7 @@ struct fail_point_registry return it->second; } - fail_point *try_get(absl::string_view name) + fail_point *try_get(std::string_view name) { std::lock_guard guard(_mu); diff --git a/src/utils/filesystem.cpp b/src/utils/filesystem.cpp index ec414767cc..75ad72f5a1 100644 --- a/src/utils/filesystem.cpp +++ b/src/utils/filesystem.cpp @@ -25,10 +25,12 @@ */ #include +#include #include #include #include #include +#include #include #include #include @@ -41,7 +43,8 @@ #include #include -#include "absl/strings/string_view.h" +#include +#include "errors.h" #include "utils/defer.h" #include "utils/env.h" #include "utils/fail_point.h" @@ -492,6 +495,12 @@ bool create_file(const std::string &path) return true; } +bool is_absolute_path(const std::string &path) +{ + boost::filesystem::path p(path); + return p.is_absolute(); +} + bool get_absolute_path(const std::string &path1, std::string &path2) { bool succ; @@ -639,9 +648,9 @@ error_code get_process_image_path(int pid, std::string &path) bool get_disk_space_info(const std::string &path, disk_space_info &info) { - FAIL_POINT_INJECT_F("filesystem_get_disk_space_info", [&info](absl::string_view str) { + FAIL_POINT_INJECT_F("filesystem_get_disk_space_info", [&info](std::string_view str) { info.capacity = 100 * 1024 * 1024; - if (str.find("insufficient") != absl::string_view::npos) { + if (str.find("insufficient") != std::string_view::npos) { info.available = 512 * 1024; } else { info.available = 50 * 1024 * 1024; @@ -841,11 +850,11 @@ bool verify_file_size(const std::string &fname, FileDataType type, const int64_t bool create_directory(const std::string &path, std::string &absolute_path, std::string &err_msg) { - FAIL_POINT_INJECT_F("filesystem_create_directory", [path](absl::string_view str) { + FAIL_POINT_INJECT_F("filesystem_create_directory", [path](std::string_view str) { // when str contains 'false', and path contains broken_disk_dir, mock create fail(return // false) std::string broken_disk_dir = "disk1"; - return str.find("false") == absl::string_view::npos || + return str.find("false") == std::string_view::npos || path.find(broken_disk_dir) == std::string::npos; }); @@ -862,11 +871,11 @@ bool create_directory(const std::string &path, std::string &absolute_path, std:: bool check_dir_rw(const std::string &path, std::string &err_msg) { - FAIL_POINT_INJECT_F("filesystem_check_dir_rw", [path](absl::string_view str) { + FAIL_POINT_INJECT_F("filesystem_check_dir_rw", [path](std::string_view str) { // when str contains 'false', and path contains broken_disk_dir, mock check fail(return // false) std::string broken_disk_dir = "disk1"; - return str.find("false") == absl::string_view::npos || + return str.find("false") == std::string_view::npos || path.find(broken_disk_dir) == std::string::npos; }); @@ -901,6 +910,36 @@ bool check_dir_rw(const std::string &path, std::string &err_msg) return true; } +error_s glob(const std::string &path_pattern, std::vector &path_list) +{ + glob_t result; + auto cleanup = dsn::defer([&] { ::globfree(&result); }); + + errno = 0; + int ret = ::glob(path_pattern.c_str(), GLOB_TILDE | GLOB_ERR, NULL, &result); + switch (ret) { + case 0: + break; + + case GLOB_NOMATCH: + return error_s::ok(); + + case GLOB_NOSPACE: + return error_s::make(ERR_FS_INTERNAL, "glob out of memory"); + + default: + std::string error(errno == 0 ? "unknown error" : safe_strerror(errno)); + return error_s::make(ERR_FS_INTERNAL, + fmt::format("glob failed for '{}': {}", path_pattern, error)); + } + + for (size_t i = 0; i < result.gl_pathc; ++i) { + path_list.emplace_back(result.gl_pathv[i]); + } + + return error_s::ok(); +} + } // namespace filesystem } // namespace utils } // namespace dsn diff --git a/src/utils/filesystem.h b/src/utils/filesystem.h index 29524c9c3c..3c370044ce 100644 --- a/src/utils/filesystem.h +++ b/src/utils/filesystem.h @@ -32,6 +32,7 @@ #include #include +#include "utils/errors.h" #include "utils/error_code.h" #ifndef _XOPEN_SOURCE @@ -69,6 +70,8 @@ namespace filesystem { void get_normalized_path(const std::string &path, std::string &npath); +bool is_absolute_path(const std::string &path); + bool get_absolute_path(const std::string &path1, std::string &path2); std::string remove_file_name(const std::string &path); @@ -165,6 +168,13 @@ bool create_directory(const std::string &path, // call `create_directory` before to make `path` exist bool check_dir_rw(const std::string &path, /*out*/ std::string &err_msg); +// Finds paths on the filesystem matching a pattern. +// +// The found pathnames are added to the 'paths' vector. If no pathnames are +// found matching the pattern, no paths are added to the vector and an OK +// status is returned. +error_s glob(const std::string &path_pattern, std::vector &path_list); + } // namespace filesystem } // namespace utils } // namespace dsn diff --git a/src/utils/function_traits.h b/src/utils/function_traits.h index 1cc3c25103..f060a22fab 100644 --- a/src/utils/function_traits.h +++ b/src/utils/function_traits.h @@ -119,4 +119,4 @@ template struct function_traits : public function_traits { }; -} +} // namespace dsn diff --git a/src/utils/gpid.cpp b/src/utils/gpid.cpp index 1d3bb3e27c..18c5433efd 100644 --- a/src/utils/gpid.cpp +++ b/src/utils/gpid.cpp @@ -43,4 +43,4 @@ const char *gpid::to_string() const snprintf(b, bf.get_chunk_size(), "%d.%d", _value.u.app_id, _value.u.partition_index); return b; } -} +} // namespace dsn diff --git a/src/utils/je_ctl.cpp b/src/utils/je_ctl.cpp index 38f5bb1be0..3cfacf4abc 100644 --- a/src/utils/je_ctl.cpp +++ b/src/utils/je_ctl.cpp @@ -66,7 +66,10 @@ void je_dump_malloc_stats(const char *opts, size_t buf_sz, std::string &stats) const char *je_stats_type_to_opts(je_stats_type type) { static const char *opts_map[] = { - "gmdablxe", "mdablxe", "gblxe", "", + "gmdablxe", + "mdablxe", + "gblxe", + "", }; RETURN_ARRAY_ELEM_BY_ENUM_TYPE(type, opts_map); @@ -75,7 +78,10 @@ const char *je_stats_type_to_opts(je_stats_type type) size_t je_stats_type_to_default_buf_sz(je_stats_type type) { static const size_t buf_sz_map[] = { - 2 * 1024, 4 * 1024, 8 * 1024 * 1024, 8 * 1024 * 1024, + 2 * 1024, + 4 * 1024, + 8 * 1024 * 1024, + 8 * 1024 * 1024, }; RETURN_ARRAY_ELEM_BY_ENUM_TYPE(type, buf_sz_map); diff --git a/src/utils/latency_tracer.cpp b/src/utils/latency_tracer.cpp index e8408dd938..f5c97f511b 100644 --- a/src/utils/latency_tracer.cpp +++ b/src/utils/latency_tracer.cpp @@ -17,7 +17,7 @@ #include "utils/latency_tracer.h" -#include +#include #include #include #include diff --git a/src/utils/latency_tracer.h b/src/utils/latency_tracer.h index e1136d475a..6fbf2a85a4 100644 --- a/src/utils/latency_tracer.h +++ b/src/utils/latency_tracer.h @@ -23,7 +23,7 @@ #include #include "common/replication.codes.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "utils/flags.h" #include "utils/ports.h" #include "utils/synchronize.h" diff --git a/src/utils/lockp.std.h b/src/utils/lockp.std.h index b61b6f093e..64cc8095c1 100644 --- a/src/utils/lockp.std.h +++ b/src/utils/lockp.std.h @@ -93,5 +93,5 @@ class std_semaphore_provider : public semaphore_provider private: dsn::utils::semaphore _sema; }; -} -} // end namespace dsn::tools +} // namespace tools +} // namespace dsn diff --git a/src/utils/logging.cpp b/src/utils/logging.cpp index 294305b3fa..66aeeb8aef 100644 --- a/src/utils/logging.cpp +++ b/src/utils/logging.cpp @@ -28,6 +28,7 @@ #include #include #include +#include #include "runtime/tool_api.h" #include "simple_logger.h" @@ -61,7 +62,7 @@ std::function log_prefixed_message_func = []() -> std::string { r void set_log_prefixed_message_func(std::function func) { - log_prefixed_message_func = func; + log_prefixed_message_func = std::move(func); } } // namespace dsn @@ -72,8 +73,9 @@ static void log_on_sys_exit(::dsn::sys_exit_type) } void dsn_log_init(const std::string &logging_factory_name, - const std::string &dir_log, - std::function dsn_log_prefixed_message_func) + const std::string &log_dir, + const std::string &role_name, + const std::function &dsn_log_prefixed_message_func) { log_start_level = enum_from_string(FLAGS_logging_start_level, LOG_LEVEL_INVALID); @@ -86,7 +88,7 @@ void dsn_log_init(const std::string &logging_factory_name, } dsn::logging_provider *logger = dsn::utils::factory_store::create( - logging_factory_name.c_str(), dsn::PROVIDER_TYPE_MAIN, dir_log.c_str()); + logging_factory_name.c_str(), dsn::PROVIDER_TYPE_MAIN, log_dir.c_str(), role_name.c_str()); dsn::logging_provider::set_logger(logger); if (dsn_log_prefixed_message_func != nullptr) { @@ -117,7 +119,7 @@ logging_provider *logging_provider::instance() logging_provider *logging_provider::create_default_instance() { - return new tools::screen_logger(true); + return new tools::screen_logger(nullptr, nullptr); } void logging_provider::set_logger(logging_provider *logger) { _logger.reset(logger); } diff --git a/src/utils/logging_provider.h b/src/utils/logging_provider.h index 5061cee49f..264d721ce8 100644 --- a/src/utils/logging_provider.h +++ b/src/utils/logging_provider.h @@ -40,12 +40,12 @@ class logging_provider { public: template - static logging_provider *create(const char *log_dir) + static logging_provider *create(const char *log_dir, const char *role_name) { - return new T(log_dir); + return new T(log_dir, role_name); } - typedef logging_provider *(*factory)(const char *); + typedef logging_provider *(*factory)(const char *, const char *); public: virtual ~logging_provider() = default; @@ -64,14 +64,14 @@ class logging_provider virtual void flush() = 0; - void deregister_commands() { _cmds.clear(); } - protected: static std::unique_ptr _logger; static logging_provider *create_default_instance(); - std::vector> _cmds; + logging_provider(log_level_t stderr_start_level) : _stderr_start_level(stderr_start_level) {} + + const log_level_t _stderr_start_level; }; void set_log_prefixed_message_func(std::function func); @@ -87,5 +87,6 @@ bool register_component_provider(const char *name, } // namespace dsn extern void dsn_log_init(const std::string &logging_factory_name, - const std::string &dir_log, - std::function dsn_log_prefixed_message_func); + const std::string &log_dir, + const std::string &role_name, + const std::function &dsn_log_prefixed_message_func); diff --git a/src/utils/metrics.cpp b/src/utils/metrics.cpp index 1677584bed..099ee9fd1a 100644 --- a/src/utils/metrics.cpp +++ b/src/utils/metrics.cpp @@ -17,7 +17,6 @@ #include "utils/metrics.h" -#include #include #include #include @@ -25,15 +24,16 @@ #include #include #include +#include #include "http/http_method.h" #include "http/http_status_code.h" +#include "rpc/rpc_engine.h" +#include "rpc/rpc_host_port.h" #include "runtime/api_layer1.h" -#include "runtime/rpc/rpc_engine.h" -#include "runtime/rpc/rpc_host_port.h" #include "runtime/service_app.h" #include "runtime/service_engine.h" -#include "runtime/task/task.h" +#include "task/task.h" #include "utils/flags.h" #include "utils/rand.h" #include "utils/shared_io_service.h" diff --git a/src/utils/metrics.h b/src/utils/metrics.h index e8a9cdc6a3..b399d9313a 100644 --- a/src/utils/metrics.h +++ b/src/utils/metrics.h @@ -40,11 +40,13 @@ #include #include -#include "absl/strings/string_view.h" +#include #include "common/json_helper.h" +#include "gutil/map_util.h" #include "http/http_server.h" #include "utils/alloc.h" #include "utils/autoref_ptr.h" +#include "utils/blob.h" #include "utils/casts.h" #include "utils/enum_helper.h" #include "utils/error_code.h" @@ -377,7 +379,7 @@ class metric_entity : public ref_counter // `args` are the parameters that are used to construct the object of MetricType. template - ref_ptr find_or_create(const metric_prototype *prototype, Args &&... args); + ref_ptr find_or_create(const metric_prototype *prototype, Args &&...args); void take_snapshot(metric_json_writer &writer, const metric_filters &filters) const; @@ -852,22 +854,22 @@ class metric_prototype public: struct ctor_args { - const absl::string_view entity_type; + const std::string_view entity_type; const metric_type type; - const absl::string_view name; + const std::string_view name; const metric_unit unit; - const absl::string_view desc; + const std::string_view desc; }; - absl::string_view entity_type() const { return _args.entity_type; } + std::string_view entity_type() const { return _args.entity_type; } metric_type type() const { return _args.type; } - absl::string_view name() const { return _args.name; } + std::string_view name() const { return _args.name; } metric_unit unit() const { return _args.unit; } - absl::string_view description() const { return _args.desc; } + std::string_view description() const { return _args.desc; } protected: explicit metric_prototype(const ctor_args &args); @@ -890,7 +892,7 @@ class metric_prototype_with : public metric_prototype // Construct a metric object based on the instance of metric_entity. template - ref_ptr instantiate(const metric_entity_ptr &entity, Args &&... args) const + ref_ptr instantiate(const metric_entity_ptr &entity, Args &&...args) const { return entity->find_or_create(this, std::forward(args)...); } @@ -900,8 +902,7 @@ class metric_prototype_with : public metric_prototype }; template -ref_ptr metric_entity::find_or_create(const metric_prototype *prototype, - Args &&... args) +ref_ptr metric_entity::find_or_create(const metric_prototype *prototype, Args &&...args) { CHECK_STREQ_MSG(prototype->entity_type().data(), _prototype->name(), @@ -1662,7 +1663,7 @@ class auto_count struct metric_brief_##field##_snapshot \ { \ std::string name; \ - double field; \ + double field = 0.0; \ \ DEFINE_JSON_SERIALIZATION(name, field) \ } @@ -1700,31 +1701,84 @@ DEF_ALL_METRIC_BRIEF_SNAPSHOTS(value); DEF_ALL_METRIC_BRIEF_SNAPSHOTS(p99); -#define DESERIALIZE_METRIC_QUERY_BRIEF_SNAPSHOT(field, json_string, query_snapshot) \ - dsn::metric_query_brief_##field##_snapshot query_snapshot; \ +// Deserialize the json string into the snapshot. +template +inline error_s deserialize_metric_snapshot(const std::string &json_string, + TMetricSnapshot &snapshot) +{ + dsn::blob bb(json_string.data(), 0, json_string.size()); + if (dsn_unlikely(!dsn::json::json_forwarder::decode(bb, snapshot))) { + return FMT_ERR(dsn::ERR_INVALID_DATA, "invalid json string: {}", json_string); + } + + return error_s::ok(); +} + +#define DESERIALIZE_METRIC_SNAPSHOT(json_string, query_snapshot) \ do { \ - dsn::blob bb(json_string.data(), 0, json_string.size()); \ - if (dsn_unlikely( \ - !dsn::json::json_forwarder::decode( \ - bb, query_snapshot))) { \ - return FMT_ERR(dsn::ERR_INVALID_DATA, "invalid json string: {}", json_string); \ + const auto &res = deserialize_metric_snapshot(json_string, query_snapshot); \ + if (dsn_unlikely(!res)) { \ + return res; \ } \ } while (0) +// Deserialize the json string into the snapshot specially for metric query which is declared +// internally. +#define DESERIALIZE_METRIC_QUERY_BRIEF_SNAPSHOT(field, json_string, query_snapshot) \ + dsn::metric_query_brief_##field##_snapshot query_snapshot; \ + DESERIALIZE_METRIC_SNAPSHOT(json_string, query_snapshot) + +// Deserialize both json string samples into respective snapshots. +template +inline error_s deserialize_metric_2_samples(const std::string &json_string_start, + const std::string &json_string_end, + TMetricSnapshot &snapshot_start, + TMetricSnapshot &snapshot_end) +{ + DESERIALIZE_METRIC_SNAPSHOT(json_string_start, snapshot_start); + DESERIALIZE_METRIC_SNAPSHOT(json_string_end, snapshot_end); + return error_s::ok(); +} + +// Deserialize both json string samples into respective snapshots specially for metric queries. +template +inline error_s deserialize_metric_query_2_samples(const std::string &json_string_start, + const std::string &json_string_end, + TMetricQuerySnapshot &snapshot_start, + TMetricQuerySnapshot &snapshot_end) +{ + const auto &res = deserialize_metric_2_samples( + json_string_start, json_string_end, snapshot_start, snapshot_end); + if (!res) { + return res; + } + + if (snapshot_end.timestamp_ns <= snapshot_start.timestamp_ns) { + return FMT_ERR(dsn::ERR_INVALID_DATA, + "duration for metric samples should be > 0: timestamp_ns_start={}, " + "timestamp_ns_end={}", + snapshot_start.timestamp_ns, + snapshot_end.timestamp_ns); + } + + return error_s::ok(); +} + +// Deserialize both json string samples into respective snapshots specially for metric queries +// which are declared internally. +// // Currently only Gauge and Counter are considered to have "increase" and "rate", which means // samples are needed. Thus brief `value` field is enough. #define DESERIALIZE_METRIC_QUERY_BRIEF_2_SAMPLES( \ json_string_start, json_string_end, query_snapshot_start, query_snapshot_end) \ - DESERIALIZE_METRIC_QUERY_BRIEF_SNAPSHOT(value, json_string_start, query_snapshot_start); \ - DESERIALIZE_METRIC_QUERY_BRIEF_SNAPSHOT(value, json_string_end, query_snapshot_end); \ + dsn::metric_query_brief_value_snapshot query_snapshot_start; \ + dsn::metric_query_brief_value_snapshot query_snapshot_end; \ \ do { \ - if (query_snapshot_end.timestamp_ns <= query_snapshot_start.timestamp_ns) { \ - return FMT_ERR(dsn::ERR_INVALID_DATA, \ - "duration for metric samples should be > 0: timestamp_ns_start={}, " \ - "timestamp_ns_end={}", \ - query_snapshot_start.timestamp_ns, \ - query_snapshot_end.timestamp_ns); \ + const auto &res = deserialize_metric_query_2_samples( \ + json_string_start, json_string_end, query_snapshot_start, query_snapshot_end); \ + if (dsn_unlikely(!res)) { \ + return res; \ } \ } while (0) @@ -1747,16 +1801,16 @@ inline error_s parse_metric_attribute(const metric_entity::attr_map &attrs, const std::string &name, TAttrValue &value) { - const auto &iter = attrs.find(name); - if (dsn_unlikely(iter == attrs.end())) { + const auto *value_ptr = gutil::FindOrNull(attrs, name); + if (dsn_unlikely(value_ptr == nullptr)) { return FMT_ERR(dsn::ERR_INVALID_DATA, "{} field was not found", name); } - if (dsn_unlikely(!dsn::buf2numeric(iter->second, value))) { - return FMT_ERR(dsn::ERR_INVALID_DATA, "invalid {}: {}", name, iter->second); + if (dsn_unlikely(!dsn::buf2numeric(*value_ptr, value))) { + return FMT_ERR(dsn::ERR_INVALID_DATA, "invalid {}: {}", name, *value_ptr); } - return dsn::error_s::ok(); + return error_s::ok(); } inline error_s parse_metric_table_id(const metric_entity::attr_map &attrs, int32_t &table_id) diff --git a/src/utils/optional.h b/src/utils/optional.h index 0f2f97d9ad..630e323907 100644 --- a/src/utils/optional.h +++ b/src/utils/optional.h @@ -62,7 +62,7 @@ class optional that.reset(); } template - /*implicit*/ optional(Args &&... args) : _is_some(true) + /*implicit*/ optional(Args &&...args) : _is_some(true) { new (_data_placeholder) T{std::forward(args)...}; } @@ -90,7 +90,7 @@ class optional } } template - void reset(Args &&... args) + void reset(Args &&...args) { if (_is_some) { reinterpret_cast(_data_placeholder)->~T(); @@ -101,4 +101,4 @@ class optional } ~optional() { reset(); } }; -} +} // namespace dsn diff --git a/src/utils/preloadable.h b/src/utils/preloadable.h index afca5b065d..c547337121 100644 --- a/src/utils/preloadable.h +++ b/src/utils/preloadable.h @@ -35,5 +35,5 @@ class preloadable template T preloadable::s_instance; -} -} +} // namespace utils +} // namespace dsn diff --git a/src/utils/priority_queue.h b/src/utils/priority_queue.h index 1c46d11bc1..b149f3a242 100644 --- a/src/utils/priority_queue.h +++ b/src/utils/priority_queue.h @@ -138,5 +138,5 @@ class blocking_priority_queue : public priority_queue private: semaphore _sema; }; -} -} // end namespace +} // namespace utils +} // namespace dsn diff --git a/src/utils/process_utils.cpp b/src/utils/process_utils.cpp index 2859560796..eaa42b69a8 100644 --- a/src/utils/process_utils.cpp +++ b/src/utils/process_utils.cpp @@ -32,8 +32,8 @@ #include "utils/process_utils.h" #include "utils/time_utils.h" -using std::ios_base; using std::ifstream; +using std::ios_base; using std::string; namespace dsn { @@ -114,5 +114,5 @@ const char *process_start_date_time_mills() { return record_process_start_time::s_instance.date_time_mills; } -} -} +} // namespace utils +} // namespace dsn diff --git a/src/utils/process_utils.h b/src/utils/process_utils.h index 74d65adcb2..611e3d0698 100644 --- a/src/utils/process_utils.h +++ b/src/utils/process_utils.h @@ -74,5 +74,5 @@ inline int get_current_tid() /// uint64_t process_start_millis(); const char *process_start_date_time_mills(); -} -} +} // namespace utils +} // namespace dsn diff --git a/src/utils/safe_strerror_posix.cpp b/src/utils/safe_strerror_posix.cpp index 45ea67dd4e..a95496af50 100644 --- a/src/utils/safe_strerror_posix.cpp +++ b/src/utils/safe_strerror_posix.cpp @@ -113,5 +113,5 @@ std::string safe_strerror(int err) safe_strerror_r(err, buf, sizeof(buf)); return std::string(buf); } -} -} +} // namespace utils +} // namespace dsn diff --git a/src/utils/safe_strerror_posix.h b/src/utils/safe_strerror_posix.h index 872b163104..05c0dbf56a 100644 --- a/src/utils/safe_strerror_posix.h +++ b/src/utils/safe_strerror_posix.h @@ -32,5 +32,5 @@ void safe_strerror_r(int err, char *buf, size_t len); // more robust in the case of heap corruption errors, since it doesn't need to // allocate a string. std::string safe_strerror(int err); -} -} +} // namespace utils +} // namespace dsn diff --git a/src/utils/simple_logger.cpp b/src/utils/simple_logger.cpp index d54e03025f..881b97ecd8 100644 --- a/src/utils/simple_logger.cpp +++ b/src/utils/simple_logger.cpp @@ -26,27 +26,43 @@ #include "utils/simple_logger.h" +#include // IWYU pragma: no_include #include -#include +#include +#include +#include +#include +#include +#include #include -#include -#include +#include +#include +#include #include -#include "absl/strings/string_view.h" +#include #include "runtime/api_layer1.h" #include "utils/command_manager.h" +#include "utils/errors.h" #include "utils/fail_point.h" #include "utils/filesystem.h" #include "utils/flags.h" #include "utils/fmt_logging.h" #include "utils/ports.h" #include "utils/process_utils.h" +#include "utils/safe_strerror_posix.h" #include "utils/string_conv.h" #include "utils/strings.h" #include "utils/time_utils.h" +DSN_DEFINE_uint64(tools.simple_logger, + max_log_file_bytes, + 64 * 1024 * 1024, + "The maximum bytes of a log file. A new log file will be created if the current " + "log file exceeds this size."); +DSN_DEFINE_validator(max_log_file_bytes, [](int32_t value) -> bool { return value > 0; }); + DSN_DEFINE_bool(tools.simple_logger, fast_flush, false, "Whether to flush logs immediately"); DSN_DEFINE_bool(tools.simple_logger, short_header, @@ -60,23 +76,38 @@ DSN_DEFINE_uint64( max_number_of_log_files_on_disk, 20, "The maximum number of log files to be reserved on disk, older logs are deleted automatically"); +DSN_DEFINE_validator(max_number_of_log_files_on_disk, + [](int32_t value) -> bool { return value > 0; }); + +DSN_DEFINE_string(tools.screen_logger, + stderr_start_level_on_stdout, + "LOG_LEVEL_WARNING", + "The lowest level of log messages to be copied to stderr in addition to stdout"); +DSN_DEFINE_validator(stderr_start_level_on_stdout, [](const char *value) -> bool { + const auto level = enum_from_string(value, LOG_LEVEL_INVALID); + return LOG_LEVEL_DEBUG <= level && level <= LOG_LEVEL_FATAL; +}); DSN_DEFINE_string( tools.simple_logger, stderr_start_level, "LOG_LEVEL_WARNING", "The lowest level of log messages to be copied to stderr in addition to log files"); -DSN_DEFINE_validator(stderr_start_level, [](const char *level) -> bool { - return !dsn::utils::equals(level, "LOG_LEVEL_INVALID"); +DSN_DEFINE_validator(stderr_start_level, [](const char *value) -> bool { + const auto level = enum_from_string(value, LOG_LEVEL_INVALID); + return LOG_LEVEL_DEBUG <= level && level <= LOG_LEVEL_FATAL; }); +DSN_DEFINE_string(tools.simple_logger, base_name, "pegasus", "The default base name for log file"); + DSN_DECLARE_string(logging_start_level); namespace dsn { namespace tools { -static void print_header(FILE *fp, log_level_t log_level) +namespace { +int print_header(FILE *fp, log_level_t stderr_start_level, log_level_t log_level) { - // The leading character of each log lines, corresponding to the log level + // The leading character of each log line, corresponding to the log level // D: Debug // I: Info // W: Warning @@ -89,16 +120,43 @@ static void print_header(FILE *fp, log_level_t log_level) dsn::utils::time_ms_to_string(ts / 1000000, time_str); int tid = dsn::utils::get_current_tid(); - fmt::print(fp, - "{}{} ({} {}) {}", - s_level_char[log_level], - time_str, - ts, - tid, - log_prefixed_message_func().c_str()); + const auto header = fmt::format( + "{}{} ({} {}) {}", s_level_char[log_level], time_str, ts, tid, log_prefixed_message_func()); + const int written_size = fmt::fprintf(fp, "%s", header.c_str()); + if (log_level >= stderr_start_level) { + fmt::fprintf(stderr, "%s", header.c_str()); + } + return written_size; } -namespace { +int print_long_header(FILE *fp, + const char *file, + const char *function, + const int line, + bool short_header, + log_level_t stderr_start_level, + log_level_t log_level) +{ + if (short_header) { + return 0; + } + + const auto long_header = fmt::format("{}:{}:{}(): ", file, line, function); + const int written_size = fmt::fprintf(fp, "%s", long_header.c_str()); + if (log_level >= stderr_start_level) { + fmt::fprintf(stderr, "%s", long_header.c_str()); + } + return written_size; +} + +int print_body(FILE *fp, const char *body, log_level_t stderr_start_level, log_level_t log_level) +{ + const int written_size = fmt::fprintf(fp, "%s\n", body); + if (log_level >= stderr_start_level) { + fmt::fprintf(stderr, "%s\n", body); + } + return written_size; +} inline void process_fatal_log(log_level_t log_level) { @@ -107,7 +165,7 @@ inline void process_fatal_log(log_level_t log_level) } bool coredump = true; - FAIL_POINT_INJECT_NOT_RETURN_F("coredump_for_fatal_log", [&coredump](absl::string_view str) { + FAIL_POINT_INJECT_NOT_RETURN_F("coredump_for_fatal_log", [&coredump](std::string_view str) { CHECK(buf2bool(str, coredump), "invalid coredump toggle for fatal log, should be true or false: {}", str); @@ -120,21 +178,39 @@ inline void process_fatal_log(log_level_t log_level) } // anonymous namespace -screen_logger::screen_logger(bool short_header) : _short_header(short_header) {} +screen_logger::screen_logger(const char *, const char *) + : logging_provider(enum_from_string(FLAGS_stderr_start_level_on_stdout, LOG_LEVEL_INVALID)), + _short_header(true) +{ +} -screen_logger::~screen_logger(void) {} +void screen_logger::print_header(log_level_t log_level) +{ + ::dsn::tools::print_header(stdout, _stderr_start_level, log_level); +} + +void screen_logger::print_long_header(const char *file, + const char *function, + const int line, + log_level_t log_level) +{ + ::dsn::tools::print_long_header( + stdout, file, function, line, _short_header, _stderr_start_level, log_level); +} + +void screen_logger::print_body(const char *body, log_level_t log_level) +{ + ::dsn::tools::print_body(stdout, body, _stderr_start_level, log_level); +} void screen_logger::log( const char *file, const char *function, const int line, log_level_t log_level, const char *str) { utils::auto_lock<::dsn::utils::ex_lock_nr> l(_lock); - print_header(stdout, log_level); - if (!_short_header) { - printf("%s:%d:%s(): ", file, line, function); - } - printf("%s\n", str); - + print_header(log_level); + print_long_header(file, function, line, log_level); + print_body(str, log_level); if (log_level >= LOG_LEVEL_ERROR) { ::fflush(stdout); } @@ -144,146 +220,205 @@ void screen_logger::log( void screen_logger::flush() { ::fflush(stdout); } -simple_logger::simple_logger(const char *log_dir) - : _log_dir(std::string(log_dir)), +simple_logger::simple_logger(const char *log_dir, const char *role_name) + : logging_provider(enum_from_string(FLAGS_stderr_start_level, LOG_LEVEL_INVALID)), + _log_dir(std::string(log_dir)), _log(nullptr), - // we assume all valid entries are positive - _start_index(0), - _index(1), - _lines(0), - _stderr_start_level(enum_from_string(FLAGS_stderr_start_level, LOG_LEVEL_INVALID)) + _file_bytes(0) { - // check existing log files - std::vector sub_list; - CHECK(dsn::utils::filesystem::get_subfiles(_log_dir, sub_list, false), - "Fail to get subfiles in {}", - _log_dir); - for (auto &fpath : sub_list) { - auto &&name = dsn::utils::filesystem::get_file_name(fpath); - if (name.length() <= 8 || name.substr(0, 4) != "log.") { - continue; - } + // Use 'role_name' if it is specified, otherwise use 'base_name'. + const std::string symlink_name( + fmt::format("{}.log", utils::is_empty(role_name) ? FLAGS_base_name : role_name)); + _file_name_prefix = fmt::format("{}.", symlink_name); + _symlink_path = utils::filesystem::path_combine(_log_dir, symlink_name); - int index; - if (1 != sscanf(name.c_str(), "log.%d.txt", &index) || index <= 0) { - continue; - } + create_log_file(); - if (index > _index) { - _index = index; - } + static std::once_flag flag; + std::call_once(flag, [&]() { + ::dsn::command_manager::instance().add_global_cmd( + ::dsn::command_manager::instance().register_single_command( + "flush-log", + "Flush log to stderr or file", + "", + [this](const std::vector &args) { + this->flush(); + return "Flush done."; + })); + + ::dsn::command_manager::instance().add_global_cmd( + ::dsn::command_manager::instance().register_single_command( + "reset-log-start-level", + "Reset the log start level", + "[DEBUG | INFO | WARNING | ERROR | FATAL]", + [](const std::vector &args) { + log_level_t start_level; + if (args.size() == 0) { + start_level = + enum_from_string(FLAGS_logging_start_level, LOG_LEVEL_INVALID); + } else { + std::string level_str = "LOG_LEVEL_" + args[0]; + start_level = enum_from_string(level_str.c_str(), LOG_LEVEL_INVALID); + if (start_level == LOG_LEVEL_INVALID) { + return "ERROR: invalid level '" + args[0] + "'"; + } + } + set_log_start_level(start_level); + return std::string("OK, current level is ") + enum_to_string(start_level); + })); + }); +} - if (_start_index == 0 || index < _start_index) { - _start_index = index; - } +void simple_logger::create_log_file() +{ + // Close the current log file if it is opened. + if (_log != nullptr) { + ::fclose(_log); + _log = nullptr; } - sub_list.clear(); - if (_start_index == 0) { - _start_index = _index; - } else { - ++_index; + // Reset the file size. + _file_bytes = 0; + + // Open the new log file. + uint64_t ts = dsn::utils::get_current_physical_time_ns(); + std::string time_str; + ::dsn::utils::time_ms_to_sequent_string(ts / 1000000, time_str); + const std::string file_name(fmt::format("{}{}", _file_name_prefix, time_str)); + const std::string path(utils::filesystem::path_combine(_log_dir, file_name)); + _log = ::fopen(path.c_str(), "w+"); + CHECK_NOTNULL(_log, "Failed to fopen {}: {}", path, dsn::utils::safe_strerror(errno)); + + // Unlink the latest log file. + if (::unlink(_symlink_path.c_str()) != 0) { + if (errno != ENOENT) { + fmt::print(stderr, + "Failed to unlink {}: {}\n", + _symlink_path, + dsn::utils::safe_strerror(errno)); + } } - create_log_file(); + // Create a new symlink to the newly created log file. + if (::symlink(file_name.c_str(), _symlink_path.c_str()) != 0) { + fmt::print(stderr, + "Failed to symlink {} as {}: {}\n", + file_name, + _symlink_path, + dsn::utils::safe_strerror(errno)); + } - // TODO(yingchun): simple_logger is destroyed after command_manager, so will cause crash like - // "assertion expression: [_handlers.empty()] All commands must be deregistered before - // command_manager is destroyed, however 'flush-log' is still registered". - // We need to fix it. - _cmds.emplace_back(::dsn::command_manager::instance().register_single_command( - "flush-log", - "Flush log to stderr or file", - "", - [this](const std::vector &args) { - this->flush(); - return "Flush done."; - })); - - _cmds.emplace_back(::dsn::command_manager::instance().register_single_command( - "reset-log-start-level", - "Reset the log start level", - "[DEBUG | INFO | WARNING | ERROR | FATAL]", - [](const std::vector &args) { - log_level_t start_level; - if (args.size() == 0) { - start_level = enum_from_string(FLAGS_logging_start_level, LOG_LEVEL_INVALID); - } else { - std::string level_str = "LOG_LEVEL_" + args[0]; - start_level = enum_from_string(level_str.c_str(), LOG_LEVEL_INVALID); - if (start_level == LOG_LEVEL_INVALID) { - return "ERROR: invalid level '" + args[0] + "'"; - } - } - set_log_start_level(start_level); - return std::string("OK, current level is ") + enum_to_string(start_level); - })); + // Remove redundant log files. + remove_redundant_files(); } -void simple_logger::create_log_file() +void simple_logger::remove_redundant_files() { - if (_log != nullptr) { - ::fclose(_log); + // Collect log files. + const auto file_path_pattern = + fmt::format("{}*", utils::filesystem::path_combine(_log_dir, _file_name_prefix)); + std::vector matching_files; + const auto es = dsn::utils::filesystem::glob(file_path_pattern, matching_files); + if (!es) { + fmt::print( + stderr, "{}: Failed to glob '{}', error \n", es.description(), file_path_pattern); + return; } - _lines = 0; - - std::stringstream str; - str << _log_dir << "/log." << _index++ << ".txt"; - _log = ::fopen(str.str().c_str(), "w+"); - - // TODO: move gc out of criticial path - while (_index - _start_index > FLAGS_max_number_of_log_files_on_disk) { - std::stringstream str2; - str2 << "log." << _start_index++ << ".txt"; - auto dp = utils::filesystem::path_combine(_log_dir, str2.str()); - if (utils::filesystem::file_exists(dp)) { - if (::remove(dp.c_str()) != 0) { - // if remove failed, just print log and ignore it. - printf("Failed to remove garbage log file %s\n", dp.c_str()); - } + // Skip if the number of log files is not exceeded. + auto max_matches = static_cast(FLAGS_max_number_of_log_files_on_disk); + if (matching_files.size() <= max_matches) { + return; + } + + // Collect mtimes of log files. + std::vector> matching_file_mtimes; + for (auto &matching_file_path : matching_files) { + struct stat s; + if (::stat(matching_file_path.c_str(), &s) != 0) { + fmt::print(stderr, + "Failed to stat {}: {}\n", + matching_file_path, + dsn::utils::safe_strerror(errno)); + continue; + } + +#ifdef __APPLE__ + int64_t mtime = s.st_mtimespec.tv_sec * 1000000 + s.st_mtimespec.tv_nsec / 1000; +#else + int64_t mtime = s.st_mtim.tv_sec * 1000000 + s.st_mtim.tv_nsec / 1000; +#endif + matching_file_mtimes.emplace_back(mtime, std::move(matching_file_path)); + } + + // Use mtime to determine which matching files to delete. This could + // potentially be ambiguous, depending on the resolution of last-modified + // timestamp in the filesystem, but that is part of the contract. + std::sort(matching_file_mtimes.begin(), matching_file_mtimes.end()); + matching_file_mtimes.resize(matching_file_mtimes.size() - max_matches); + + // Remove redundant log files. + for (const auto &[_, matching_file] : matching_file_mtimes) { + if (::remove(matching_file.c_str()) != 0) { + // If remove failed, just print log and ignore it. + fmt::print(stderr, + "Failed to remove redundant log file {}: {}\n", + matching_file, + dsn::utils::safe_strerror(errno)); } } } -simple_logger::~simple_logger(void) +simple_logger::~simple_logger() { utils::auto_lock<::dsn::utils::ex_lock> l(_lock); ::fclose(_log); + _log = nullptr; } void simple_logger::flush() { utils::auto_lock<::dsn::utils::ex_lock> l(_lock); ::fflush(_log); + ::fflush(stderr); ::fflush(stdout); } +void simple_logger::print_header(log_level_t log_level) +{ + add_bytes_if_valid(::dsn::tools::print_header(_log, _stderr_start_level, log_level)); +} + +void simple_logger::print_long_header(const char *file, + const char *function, + const int line, + log_level_t log_level) +{ + add_bytes_if_valid(::dsn::tools::print_long_header( + _log, file, function, line, FLAGS_short_header, _stderr_start_level, log_level)); +} + +void simple_logger::print_body(const char *body, log_level_t log_level) +{ + add_bytes_if_valid(::dsn::tools::print_body(_log, body, _stderr_start_level, log_level)); +} + void simple_logger::log( const char *file, const char *function, const int line, log_level_t log_level, const char *str) { utils::auto_lock<::dsn::utils::ex_lock> l(_lock); - print_header(_log, log_level); - if (!FLAGS_short_header) { - fprintf(_log, "%s:%d:%s(): ", file, line, function); - } - fprintf(_log, "%s\n", str); + CHECK_NOTNULL(_log, "Log file hasn't been initialized yet"); + print_header(log_level); + print_long_header(file, function, line, log_level); + print_body(str, log_level); if (FLAGS_fast_flush || log_level >= LOG_LEVEL_ERROR) { ::fflush(_log); } - if (log_level >= _stderr_start_level) { - print_header(stdout, log_level); - if (!FLAGS_short_header) { - printf("%s:%d:%s(): ", file, line, function); - } - printf("%s\n", str); - } - process_fatal_log(log_level); - if (++_lines >= 200000) { + if (_file_bytes >= FLAGS_max_log_file_bytes) { create_log_file(); } } diff --git a/src/utils/simple_logger.h b/src/utils/simple_logger.h index e9657a8018..5437c0fe6a 100644 --- a/src/utils/simple_logger.h +++ b/src/utils/simple_logger.h @@ -26,11 +26,13 @@ #pragma once +#include #include #include #include "utils/api_utilities.h" #include "utils/logging_provider.h" +#include "utils/ports.h" #include "utils/synchronize.h" namespace dsn { @@ -42,8 +44,8 @@ namespace tools { class screen_logger : public logging_provider { public: - explicit screen_logger(bool short_header); - ~screen_logger() override; + explicit screen_logger(const char *, const char *); + ~screen_logger() override = default; void log(const char *file, const char *function, @@ -54,6 +56,13 @@ class screen_logger : public logging_provider virtual void flush(); private: + void print_header(log_level_t log_level); + void print_long_header(const char *file, + const char *function, + const int line, + log_level_t log_level); + void print_body(const char *body, log_level_t log_level); + ::dsn::utils::ex_lock_nr _lock; const bool _short_header; }; @@ -65,7 +74,9 @@ class screen_logger : public logging_provider class simple_logger : public logging_provider { public: - simple_logger(const char *log_dir); + // 'log_dir' is the directory to store log files, 'role_name' is the name of the process, + // such as 'replica_server', 'meta_server' in Pegasus. + simple_logger(const char *log_dir, const char *role_name); ~simple_logger() override; void log(const char *file, @@ -77,17 +88,37 @@ class simple_logger : public logging_provider void flush() override; private: + void print_header(log_level_t log_level); + void print_long_header(const char *file, + const char *function, + const int line, + log_level_t log_level); + void print_body(const char *body, log_level_t log_level); + + inline void add_bytes_if_valid(int bytes) + { + if (dsn_likely(bytes > 0)) { + _file_bytes += static_cast(bytes); + } + } + void create_log_file(); + void remove_redundant_files(); private: ::dsn::utils::ex_lock _lock; // use recursive lock to avoid dead lock when flush() is called // in signal handler if cored for bad logging format reason. + // The directory to store log files. const std::string _log_dir; + // The path of the symlink to the latest log file. + std::string _symlink_path; + // The prefix of the log file names. The actual log files are prefixed by '_file_name_prefix' + // and postfixed by timestamp. + std::string _file_name_prefix; + // The current log file descriptor. FILE *_log; - int _start_index; - int _index; - int _lines; - log_level_t _stderr_start_level; + // The byte size of the current log file. + uint64_t _file_bytes; }; } // namespace tools } // namespace dsn diff --git a/src/utils/singleton_store.h b/src/utils/singleton_store.h index 1db4bb008c..7c4257b4ca 100644 --- a/src/utils/singleton_store.h +++ b/src/utils/singleton_store.h @@ -120,5 +120,5 @@ class safe_singleton_store }; //------------- inline implementation ---------- -} -} // end namespace dsn::utils +} // namespace utils +} // namespace dsn diff --git a/src/utils/string_conv.h b/src/utils/string_conv.h index a92b1e6bb3..16b0793b28 100644 --- a/src/utils/string_conv.h +++ b/src/utils/string_conv.h @@ -27,14 +27,14 @@ #include #include -#include "absl/strings/string_view.h" +#include namespace dsn { namespace internal { template -bool buf2signed(absl::string_view buf, T &result) +bool buf2signed(std::string_view buf, T &result) { static_assert(std::is_signed::value, "buf2signed works only with signed integer"); @@ -65,7 +65,7 @@ bool buf2signed(absl::string_view buf, T &result) } template -bool buf2unsigned(absl::string_view buf, T &result) +bool buf2unsigned(std::string_view buf, T &result) { static_assert(std::is_unsigned::value, "buf2unsigned works only with unsigned integer"); @@ -104,32 +104,32 @@ bool buf2unsigned(absl::string_view buf, T &result) /// buf2*: `result` will keep unmodified if false is returned. -inline bool buf2int32(absl::string_view buf, int32_t &result) +inline bool buf2int32(std::string_view buf, int32_t &result) { return internal::buf2signed(buf, result); } -inline bool buf2int64(absl::string_view buf, int64_t &result) +inline bool buf2int64(std::string_view buf, int64_t &result) { return internal::buf2signed(buf, result); } -inline bool buf2uint32(absl::string_view buf, uint32_t &result) +inline bool buf2uint32(std::string_view buf, uint32_t &result) { return internal::buf2unsigned(buf, result); } -inline bool buf2uint64(absl::string_view buf, uint64_t &result) +inline bool buf2uint64(std::string_view buf, uint64_t &result) { return internal::buf2unsigned(buf, result); } -inline bool buf2uint16(absl::string_view buf, uint16_t &result) +inline bool buf2uint16(std::string_view buf, uint16_t &result) { return internal::buf2unsigned(buf, result); } -inline bool buf2bool(absl::string_view buf, bool &result, bool ignore_case = true) +inline bool buf2bool(std::string_view buf, bool &result, bool ignore_case = true) { std::string data(buf.data(), buf.length()); if (ignore_case) { @@ -146,7 +146,7 @@ inline bool buf2bool(absl::string_view buf, bool &result, bool ignore_case = tru return false; } -inline bool buf2double(absl::string_view buf, double &result) +inline bool buf2double(std::string_view buf, double &result) { if (buf.empty()) { return false; @@ -175,7 +175,7 @@ inline bool buf2double(absl::string_view buf, double &result) } #define DEF_BUF2NUMERIC_FUNC(type, postfix) \ - inline bool buf2numeric(absl::string_view buf, type &result) \ + inline bool buf2numeric(std::string_view buf, type &result) \ { \ return buf2##postfix(buf, result); \ } diff --git a/src/utils/strings.cpp b/src/utils/strings.cpp index c7c48a9c36..aa73ff520c 100644 --- a/src/utils/strings.cpp +++ b/src/utils/strings.cpp @@ -172,7 +172,7 @@ struct SequenceInserter // The new element is constructed through variadic template and appended at the end // of the sequence container. template - void emplace(SequenceContainer &container, Args &&... args) const + void emplace(SequenceContainer &container, Args &&...args) const { container.emplace_back(std::forward(args)...); } @@ -184,7 +184,7 @@ struct AssociativeInserter // The new element is constructed through variadic template and inserted into the associative // container. template - void emplace(AssociativeContainer &container, Args &&... args) const + void emplace(AssociativeContainer &container, Args &&...args) const { container.emplace(std::forward(args)...); } diff --git a/src/utils/synchronize.h b/src/utils/synchronize.h index 8f1a66cd20..3f863b2c02 100644 --- a/src/utils/synchronize.h +++ b/src/utils/synchronize.h @@ -41,6 +41,7 @@ class ex_lock __inline void lock() { _lock.lock(); } __inline bool try_lock() { return _lock.tryLock(); } __inline void unlock() { _lock.unlock(); } + private: RecursiveBenaphore _lock; }; @@ -51,6 +52,7 @@ class ex_lock_nr __inline void lock() { _lock.lock(); } __inline bool try_lock() { return _lock.tryLock(); } __inline void unlock() { _lock.unlock(); } + private: NonRecursiveBenaphore _lock; }; @@ -179,5 +181,5 @@ class auto_write_lock private: rw_lock_nr *_lock; }; -} -} +} // namespace utils +} // namespace dsn diff --git a/src/utils/test/TokenBucketTest.cpp b/src/utils/test/TokenBucketTest.cpp index aea2e739de..685b43a0d8 100644 --- a/src/utils/test/TokenBucketTest.cpp +++ b/src/utils/test/TokenBucketTest.cpp @@ -72,7 +72,10 @@ TEST_P(TokenBucketTest, sanity) } static std::vector> rateToConsumeSize = { - {100, 1}, {1000, 1}, {10000, 1}, {10000, 5}, + {100, 1}, + {1000, 1}, + {10000, 1}, + {10000, 5}, }; INSTANTIATE_TEST_SUITE_P(TokenBucket, TokenBucketTest, ::testing::ValuesIn(rateToConsumeSize)); diff --git a/src/utils/test/blob_test.cpp b/src/utils/test/blob_test.cpp new file mode 100644 index 0000000000..586a7d68d9 --- /dev/null +++ b/src/utils/test/blob_test.cpp @@ -0,0 +1,145 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include +#include + +#include "gtest/gtest.h" +#include "utils/blob.h" + +namespace dsn { + +TEST(BlobTest, CreateFromZeroLengthNullptr) +{ + const auto &obj = blob::create_from_bytes(nullptr, 0); + + EXPECT_EQ(0, obj.length()); + EXPECT_EQ(0, obj.size()); +} + +#ifndef NDEBUG + +TEST(BlobTest, CreateFromNonZeroLengthNullptr) +{ + ASSERT_DEATH({ const auto &obj = blob::create_from_bytes(nullptr, 1); }, + "null source pointer with non-zero length would lead to " + "undefined behaviour"); +} + +#endif + +struct blob_base_case +{ + std::string expected_str; +}; + +class BlobBaseTest : public testing::TestWithParam +{ +public: + BlobBaseTest() + { + const auto &test_case = GetParam(); + _expected_str = test_case.expected_str; + } + + void check_blob_value(const blob &obj) const + { + EXPECT_EQ(_expected_str, obj.to_string()); + + EXPECT_EQ(_expected_str.size(), obj.length()); + EXPECT_EQ(_expected_str.size(), obj.size()); + + if (_expected_str.empty()) { + EXPECT_TRUE(obj.empty()); + } else { + EXPECT_FALSE(obj.empty()); + } + } + +protected: + std::string _expected_str; +}; + +const std::vector blob_base_tests = { + // Test empty case. + {""}, + // Test non-empty case. + {"hello"}, +}; + +class BlobCreateTest : public BlobBaseTest +{ +}; + +TEST_P(BlobCreateTest, CreateFromCString) +{ + const auto &obj = blob::create_from_bytes(_expected_str.data(), _expected_str.size()); + check_blob_value(obj); +} + +TEST_P(BlobCreateTest, CreateFromString) +{ + const auto &obj = blob::create_from_bytes(std::string(_expected_str)); + check_blob_value(obj); +} + +INSTANTIATE_TEST_SUITE_P(BlobTest, BlobCreateTest, testing::ValuesIn(blob_base_tests)); + +class BlobInitTest : public BlobBaseTest +{ +public: + blob create() { return blob::create_from_bytes(std::string(_expected_str)); } +}; + +TEST_P(BlobInitTest, CopyConstructor) +{ + const auto &obj = create(); + + blob copy(obj); + check_blob_value(copy); +} + +TEST_P(BlobInitTest, CopyAssignment) +{ + const auto &obj = create(); + + blob copy; + copy = obj; + check_blob_value(copy); +} + +TEST_P(BlobInitTest, MoveConstructor) +{ + auto obj = create(); + + blob move(std::move(obj)); + check_blob_value(move); +} + +TEST_P(BlobInitTest, MoveAssignment) +{ + auto obj = create(); + + blob move; + move = std::move(obj); + check_blob_value(move); +} + +INSTANTIATE_TEST_SUITE_P(BlobTest, BlobInitTest, testing::ValuesIn(blob_base_tests)); + +} // namespace dsn diff --git a/src/utils/test/clear.sh b/src/utils/test/clear.sh index d113af3dbb..cc0c873c1b 100755 --- a/src/utils/test/clear.sh +++ b/src/utils/test/clear.sh @@ -24,4 +24,4 @@ # THE SOFTWARE. -rm -rf dsn.utils.tests.xml log*.txt +rm -rf dsn.utils.tests.xml pegasus*.txt diff --git a/src/utils/test/env.cpp b/src/utils/test/env.cpp index 3f0cd9c078..ee147eefb6 100644 --- a/src/utils/test/env.cpp +++ b/src/utils/test/env.cpp @@ -32,10 +32,10 @@ #include #include +#include "common/gpid.h" #include "gtest/gtest.h" #include "test_util/test_util.h" #include "utils/env.h" -#include "utils/error_code.h" #include "utils/filesystem.h" #include "utils/flags.h" #include "utils/rand.h" diff --git a/src/utils/test/fail_point_test.cpp b/src/utils/test/fail_point_test.cpp index 62b1c0c507..e04d39f54c 100644 --- a/src/utils/test/fail_point_test.cpp +++ b/src/utils/test/fail_point_test.cpp @@ -33,7 +33,7 @@ #include "gtest/gtest.h" #include "utils/fail_point.h" #include "utils/fail_point_impl.h" -#include "absl/strings/string_view.h" +#include namespace dsn { namespace fail { @@ -119,12 +119,12 @@ TEST(fail_point, parse) int test_func() { - FAIL_POINT_INJECT_F("test_1", [](absl::string_view str) -> int { + FAIL_POINT_INJECT_F("test_1", [](std::string_view str) -> int { EXPECT_EQ(str, "1"); return 1; }); - FAIL_POINT_INJECT_F("test_2", [](absl::string_view str) -> int { + FAIL_POINT_INJECT_F("test_2", [](std::string_view str) -> int { EXPECT_EQ(str, "2"); return 2; }); @@ -148,7 +148,7 @@ TEST(fail_point, macro_use) void test_func_return_void(int &a) { - FAIL_POINT_INJECT_F("test_1", [](absl::string_view str) {}); + FAIL_POINT_INJECT_F("test_1", [](std::string_view str) {}); a++; } TEST(fail_point, return_void) diff --git a/src/utils/test/file_system_test.cpp b/src/utils/test/file_system_test.cpp index 66a224f9e5..0a71272648 100644 --- a/src/utils/test/file_system_test.cpp +++ b/src/utils/test/file_system_test.cpp @@ -18,9 +18,12 @@ #include #include #include +#include #include #include #include +#include +#include #include "gtest/gtest.h" #include "utils/env.h" @@ -175,6 +178,40 @@ TEST(filesystem_test, verify_file_test) remove_path(fname); } +TEST(filesystem_test, absolute_path_test) +{ + const std::string kTestDir = "absolute_path_test"; + ASSERT_TRUE(create_directory(kTestDir)); + ASSERT_FALSE(is_absolute_path(kTestDir)); + + std::string abs_path; + ASSERT_TRUE(get_absolute_path(kTestDir, abs_path)); + ASSERT_TRUE(is_absolute_path(abs_path)); +} + +TEST(filesystem_test, glob_test) +{ + const std::string kTestDir = "glob_test"; + ASSERT_TRUE(create_directory(kTestDir)); + std::vector filenames = {"fuzz", "fuzzy", "fuzzyiest", "buzz"}; + std::vector> matchers = { + {"file", 0}, + {"fuzz", 1}, + {"fuzz*", 3}, + {"?uzz", 2}, + }; + + for (const auto &name : filenames) { + ASSERT_TRUE(create_file(path_combine(kTestDir, name))); + } + + for (const auto &[path_pattern, matched_count] : matchers) { + std::vector matches; + ASSERT_TRUE(glob(path_combine(kTestDir, path_pattern), matches)) << path_pattern; + ASSERT_EQ(matched_count, matches.size()) << path_pattern; + } +} + } // namespace filesystem } // namespace utils } // namespace dsn diff --git a/src/utils/test/fmt_logging_test.cpp b/src/utils/test/fmt_logging_test.cpp index 95e7d0dfd9..ae65906b1d 100644 --- a/src/utils/test/fmt_logging_test.cpp +++ b/src/utils/test/fmt_logging_test.cpp @@ -27,13 +27,14 @@ #include #include +#include #include "common/gpid.h" #include "common/replication.codes.h" #include "gtest/gtest.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "utils/error_code.h" #include "utils/errors.h" -#include "absl/strings/string_view.h" +#include "utils/fmt_logging.h" namespace dsn { namespace replication { @@ -45,8 +46,9 @@ TEST(fmt_logging, basic) ASSERT_EQ(fmt::format("{}", error_s::make(ERR_TIMEOUT, "yes")), "ERR_TIMEOUT: yes"); ASSERT_EQ(fmt::format("{}", ERR_OK), "ERR_OK"); ASSERT_EQ(fmt::format("{}", LPC_REPLICATION_LOW), "LPC_REPLICATION_LOW"); - ASSERT_EQ(absl::string_view("yes"), "yes"); - ASSERT_EQ(fmt::format("{}", absl::string_view("yes\0yes")), "yes\0yes"); + ASSERT_EQ(std::string_view("yes"), "yes"); + ASSERT_EQ(fmt::format("{}", std::string_view("yes\0yes")), "yes\0yes"); + ASSERT_DEATH(CHECK(false, "CHECK false in test"), "CHECK false in test"); } } // namespace replication diff --git a/src/utils/test/json_helper_test.cpp b/src/utils/test/json_helper_test.cpp index 6389e7b0ea..6027264931 100644 --- a/src/utils/test/json_helper_test.cpp +++ b/src/utils/test/json_helper_test.cpp @@ -36,7 +36,7 @@ #include "common/json_helper.h" #include "gtest/gtest.h" -#include "runtime/rpc/rpc_host_port.h" +#include "rpc/rpc_host_port.h" #include "utils/blob.h" namespace dsn { diff --git a/src/utils/test/logger.cpp b/src/utils/test/logger.cpp index 70d6ef1e2b..a06d85ef18 100644 --- a/src/utils/test/logger.cpp +++ b/src/utils/test/logger.cpp @@ -24,107 +24,146 @@ * THE SOFTWARE. */ -#include -#include #include -#include #include -#include #include +#include +#include #include #include #include #include "gtest/gtest.h" +#include "gutil/map_util.h" #include "utils/api_utilities.h" #include "utils/filesystem.h" #include "utils/flags.h" -#include "utils/logging_provider.h" -#include "utils/safe_strerror_posix.h" #include "utils/simple_logger.h" +#include "utils/test_macros.h" DSN_DECLARE_uint64(max_number_of_log_files_on_disk); namespace dsn { namespace tools { - -namespace { - -void get_log_file_index(std::vector &log_index) +class logger_test : public testing::Test { - std::vector sub_list; - ASSERT_TRUE(dsn::utils::filesystem::get_subfiles("./", sub_list, false)); +public: + void SetUp() override + { + std::string cwd; + ASSERT_TRUE(dsn::utils::filesystem::get_current_directory(cwd)); + // NOTE: Don't name the dir with "test", otherwise the whole utils test dir would be + // removed. + test_dir = dsn::utils::filesystem::path_combine(cwd, "logger_test"); + + NO_FATALS(prepare_test_dir()); + std::set files; + NO_FATALS(get_log_files(files)); + NO_FATALS(clear_files(files)); + } - for (const auto &path : sub_list) { - const auto &name = dsn::utils::filesystem::get_file_name(path); - if (!boost::algorithm::starts_with(name, "log.")) { - continue; - } - if (!boost::algorithm::ends_with(name, ".txt")) { - continue; + void get_log_files(std::set &file_names) + { + std::vector sub_list; + ASSERT_TRUE(utils::filesystem::get_subfiles(test_dir, sub_list, false)); + + file_names.clear(); + std::regex pattern(R"(SimpleLogger\.log\.[0-9]{8}_[0-9]{6}_[0-9]{3})"); + for (const auto &path : sub_list) { + std::string name(utils::filesystem::get_file_name(path)); + if (std::regex_match(name, pattern)) { + ASSERT_TRUE(gutil::InsertIfNotPresent(&file_names, name)); + } } + } - int index; - if (1 != sscanf(name.c_str(), "log.%d.txt", &index)) { - continue; + void compare_log_files(const std::set &before_files, + const std::set &after_files) + { + ASSERT_FALSE(after_files.empty()); + + // One new log file is created. + if (after_files.size() == before_files.size() + 1) { + // All the file names are the same. + for (auto it1 = before_files.begin(), it2 = after_files.begin(); + it1 != before_files.end(); + ++it1, ++it2) { + ASSERT_EQ(*it1, *it2); + } + // The number of log files is the same, but they have rolled. + } else if (after_files.size() == before_files.size()) { + auto it1 = before_files.begin(); + auto it2 = after_files.begin(); + // The first file is different, the one in 'before_files' is older. + ASSERT_NE(*it1, *it2); + + // The rest of the files are the same. + for (++it1; it1 != before_files.end(); ++it1, ++it2) { + ASSERT_EQ(*it1, *it2); + } + } else { + ASSERT_TRUE(false) << "Invalid number of log files, before=" << before_files.size() + << ", after=" << after_files.size(); } - log_index.push_back(index); } -} -// Don't name the dir with "./test", otherwise the whole utils test dir would be removed. -const std::string kTestDir("./test_logger"); + void clear_files(const std::set &file_names) + { + for (const auto &file_name : file_names) { + ASSERT_TRUE(dsn::utils::filesystem::remove_path(file_name)); + } + } -void prepare_test_dir() -{ - ASSERT_TRUE(dsn::utils::filesystem::create_directory(kTestDir)); - ASSERT_EQ(0, ::chdir(kTestDir.c_str())); -} + void prepare_test_dir() + { + ASSERT_TRUE(dsn::utils::filesystem::create_directory(test_dir)) << test_dir; + } -void remove_test_dir() -{ - ASSERT_EQ(0, ::chdir("..")) << "chdir failed, err = " << dsn::utils::safe_strerror(errno); - ASSERT_TRUE(dsn::utils::filesystem::remove_path(kTestDir)) << "remove_directory " << kTestDir - << " failed"; -} + void remove_test_dir() + { + ASSERT_TRUE(dsn::utils::filesystem::remove_path(test_dir)) << test_dir; + } -} // anonymous namespace +public: + std::string test_dir; +}; #define LOG_PRINT(logger, ...) \ (logger)->log( \ __FILE__, __FUNCTION__, __LINE__, LOG_LEVEL_DEBUG, fmt::format(__VA_ARGS__).c_str()) -TEST(LoggerTest, SimpleLogger) +TEST_F(logger_test, screen_logger_test) { - // Deregister commands to avoid re-register error. - dsn::logging_provider::instance()->deregister_commands(); - - { - auto logger = std::make_unique(true); - LOG_PRINT(logger.get(), "{}", "test_print"); - std::thread t([](screen_logger *lg) { LOG_PRINT(lg, "{}", "test_print"); }, logger.get()); - t.join(); - - logger->flush(); - } - - prepare_test_dir(); + auto logger = std::make_unique(nullptr, nullptr); + LOG_PRINT(logger.get(), "{}", "test_print"); + std::thread t([](screen_logger *lg) { LOG_PRINT(lg, "{}", "test_print"); }, logger.get()); + t.join(); + logger->flush(); +} +TEST_F(logger_test, redundant_log_test) +{ // Create redundant log files to test if their number could be restricted. for (unsigned int i = 0; i < FLAGS_max_number_of_log_files_on_disk + 10; ++i) { - auto logger = std::make_unique("./"); + std::set before_files; + NO_FATALS(get_log_files(before_files)); + + auto logger = std::make_unique(test_dir.c_str(), "SimpleLogger"); for (unsigned int i = 0; i != 1000; ++i) { LOG_PRINT(logger.get(), "{}", "test_print"); } logger->flush(); - } - std::vector index; - get_log_file_index(index); - ASSERT_FALSE(index.empty()); - ASSERT_EQ(FLAGS_max_number_of_log_files_on_disk, index.size()); + std::set after_files; + NO_FATALS(get_log_files(after_files)); + NO_FATALS(compare_log_files(before_files, after_files)); + ::usleep(2000); + } - remove_test_dir(); + std::set files; + NO_FATALS(get_log_files(files)); + ASSERT_FALSE(files.empty()); + ASSERT_EQ(FLAGS_max_number_of_log_files_on_disk, files.size()); } } // namespace tools diff --git a/src/utils/test/long_adder_test.cpp b/src/utils/test/long_adder_test.cpp index a6a910a92a..0fe82e188b 100644 --- a/src/utils/test/long_adder_test.cpp +++ b/src/utils/test/long_adder_test.cpp @@ -150,7 +150,7 @@ class long_adder_test // Define runner to time each case auto runner = [num_operations, num_threads]( - const char *name, std::function func, int64_t &result) { + const char *name, std::function func, int64_t &result) { uint64_t start = dsn_now_ns(); func(result); uint64_t end = dsn_now_ns(); diff --git a/src/utils/test/main.cpp b/src/utils/test/main.cpp index 2be6302e1a..0bdfb7f7a5 100644 --- a/src/utils/test/main.cpp +++ b/src/utils/test/main.cpp @@ -25,7 +25,7 @@ GTEST_API_ int main(int argc, char **argv) { testing::InitGoogleTest(&argc, argv); - dsn_log_init("dsn::tools::simple_logger", "./", nullptr); + dsn_log_init("dsn::tools::simple_logger", "./", "test", nullptr); dsn::flags_initialize(); diff --git a/src/utils/test/metrics_test.cpp b/src/utils/test/metrics_test.cpp index 9f3a4d1909..a9047b50f3 100644 --- a/src/utils/test/metrics_test.cpp +++ b/src/utils/test/metrics_test.cpp @@ -35,10 +35,11 @@ #include "http/http_message_parser.h" #include "percentile_utils.h" -#include "runtime/rpc/message_parser.h" -#include "runtime/rpc/rpc_message.h" +#include "rpc/message_parser.h" +#include "rpc/rpc_message.h" #include "utils/errors.h" #include "utils/flags.h" +#include "gutil/map_util.h" #include "utils/rand.h" #include "utils/strings.h" #include "utils/test/nth_element_utils.h" @@ -246,7 +247,7 @@ TEST(metrics_test, create_entity) auto attrs = entity->attributes(); ASSERT_EQ(attrs, test.entity_attrs); - ASSERT_EQ(entities.find(test.entity_id), entities.end()); + ASSERT_TRUE(!gutil::ContainsKey(entities, test.entity_id)); entities[test.entity_id] = entity; } @@ -313,25 +314,21 @@ TEST(metrics_test, create_metric) my_metric = test.prototype->instantiate(test.entity, test.value); } - ASSERT_EQ(my_metric->value(), test.value); + ASSERT_EQ(test.value, my_metric->value()); - auto iter = expected_entities.find(test.entity.get()); - if (iter == expected_entities.end()) { - expected_entities[test.entity.get()] = {{test.prototype, my_metric}}; - } else { - iter->second[test.prototype] = my_metric; - } + auto &iter = gutil::LookupOrInsert(&expected_entities, test.entity.get(), {}); + iter.emplace(test.prototype, my_metric); } entity_map actual_entities; - auto entities = metric_registry::instance().entities(); - for (const auto &entity : entities) { - if (expected_entities.find(entity.second.get()) != expected_entities.end()) { - actual_entities[entity.second.get()] = entity.second->metrics(); + const auto entities = metric_registry::instance().entities(); + for (const auto &[_, entity] : entities) { + if (gutil::ContainsKey(expected_entities, entity.get())) { + actual_entities[entity.get()] = entity->metrics(); } } - ASSERT_EQ(actual_entities, expected_entities); + ASSERT_EQ(expected_entities, actual_entities); } TEST(metrics_test, recreate_metric) @@ -778,7 +775,7 @@ void run_percentile(const metric_entity_ptr &my_entity, std::vector actual_elements; for (const auto &kth : kAllKthPercentileTypes) { T value; - if (kth_percentiles.find(kth) == kth_percentiles.end()) { + if (!gutil::ContainsKey(kth_percentiles, kth)) { ASSERT_FALSE(my_metric->get(kth, value)); checker(value, 0); } else { @@ -1099,8 +1096,7 @@ void compare_floating_metric_value_map(const metric_value_map &actual_value_m filters.with_metric_fields = metric_fields; \ \ metric_value_map expected_value_map; \ - if (expected_metric_fields.find(kMetricSingleValueField) != \ - expected_metric_fields.end()) { \ + if (gutil::ContainsKey(expected_metric_fields, kMetricSingleValueField)) { \ expected_value_map[kMetricSingleValueField] = test.expected_value; \ } \ \ @@ -1283,7 +1279,7 @@ void generate_metric_value_map(MetricType *my_metric, for (const auto &type : kth_percentiles) { auto name = kth_percentile_to_name(type); // Only add the chosen fields to the expected value map. - if (expected_metric_fields.find(name) != expected_metric_fields.end()) { + if (gutil::ContainsKey(expected_metric_fields, name)) { value_map[name] = *value; } ++value; @@ -2871,13 +2867,11 @@ TEST(metrics_test, http_get_metrics) for (const auto &test : tests) { entity_container expected_entities; for (const auto &entity_pair : test.expected_entity_metrics) { - const auto &iter = entities.find(entity_pair.first); - ASSERT_NE(entities.end(), iter); - - const auto &entity = iter->second; - expected_entities.emplace(entity->id(), - entity_properties{entity->prototype()->name(), - entity->attributes(), + const auto *entity = gutil::FindOrNull(entities, entity_pair.first); + ASSERT_NE(entity, nullptr); + expected_entities.emplace((*entity)->id(), + entity_properties{(*entity)->prototype()->name(), + (*entity)->attributes(), entity_pair.second}); } @@ -3131,11 +3125,11 @@ void scoped_entity::test_survival_immediately_after_initialization() const // Use internal member directly instead of calling entities(). We don't want to have // any reference which may affect the test results. const auto &entities = metric_registry::instance()._entities; - const auto &iter = entities.find(_my_entity_id); - ASSERT_NE(entities.end(), iter); - ASSERT_EQ(_expected_my_entity_raw_ptr, iter->second.get()); + const auto *entity = gutil::FindOrNull(entities, _my_entity_id); + ASSERT_NE(entity, nullptr); + ASSERT_EQ(_expected_my_entity_raw_ptr, entity->get()); - const auto &actual_surviving_metrics = get_actual_surviving_metrics(iter->second); + const auto &actual_surviving_metrics = get_actual_surviving_metrics(*entity); ASSERT_EQ(_expected_all_metrics, actual_surviving_metrics); } @@ -3149,18 +3143,18 @@ void scoped_entity::test_survival_after_retirement() const // Use internal member directly instead of calling entities(). We don't want to have // any reference which may affect the test results. const auto &entities = metric_registry::instance()._entities; - const auto &iter = entities.find(_my_entity_id); + const auto *iter = gutil::FindOrNull(entities, _my_entity_id); if (_my_entity == nullptr) { // The entity has been retired. - ASSERT_EQ(entities.end(), iter); + ASSERT_EQ(iter, nullptr); ASSERT_TRUE(_expected_surviving_metrics.empty()); return; } - ASSERT_NE(entities.end(), iter); - ASSERT_EQ(_expected_my_entity_raw_ptr, iter->second.get()); + ASSERT_NE(iter, nullptr); + ASSERT_EQ(_expected_my_entity_raw_ptr, iter->get()); - const auto &actual_surviving_metrics = get_actual_surviving_metrics(iter->second); + const auto &actual_surviving_metrics = get_actual_surviving_metrics(*iter); ASSERT_EQ(_expected_surviving_metrics, actual_surviving_metrics); } diff --git a/src/utils/test/output_utils_test.cpp b/src/utils/test/output_utils_test.cpp index cd3a9cdbfd..cd774579c5 100644 --- a/src/utils/test/output_utils_test.cpp +++ b/src/utils/test/output_utils_test.cpp @@ -32,9 +32,9 @@ #include "gtest/gtest.h" -using std::vector; -using std::string; using dsn::utils::table_printer; +using std::string; +using std::vector; namespace dsn { diff --git a/src/utils/test/run.sh b/src/utils/test/run.sh index 90698a718f..06c8b72346 100755 --- a/src/utils/test/run.sh +++ b/src/utils/test/run.sh @@ -36,9 +36,9 @@ if [ $? -ne 0 ]; then echo "run dsn_utils_tests failed" echo "---- ls ----" ls -l - if find . -name log.1.txt; then - echo "---- tail -n 100 log.1.txt ----" - tail -n 100 `find . -name log.1.txt` + if [ `find . -name pegasus.log.* | wc -l` -ne 0 ]; then + echo "---- tail -n 100 pegasus.log.* ----" + tail -n 100 `find . -name pegasus.log.*` fi if [ -f core ]; then echo "---- gdb ./dsn_utils_tests core ----" diff --git a/src/utils/test/string_conv_test.cpp b/src/utils/test/string_conv_test.cpp index c9fd5fc2d7..5e564b0d2b 100644 --- a/src/utils/test/string_conv_test.cpp +++ b/src/utils/test/string_conv_test.cpp @@ -27,7 +27,7 @@ #include "utils/string_conv.h" #include "gtest/gtest.h" -#include "absl/strings/string_view.h" +#include TEST(string_conv, buf2bool) { @@ -50,13 +50,13 @@ TEST(string_conv, buf2bool) ASSERT_FALSE(dsn::buf2bool("TrUe", result, false)); std::string str("true\0false", 10); - ASSERT_FALSE(dsn::buf2bool(absl::string_view(str.data(), 3), result)); - ASSERT_TRUE(dsn::buf2bool(absl::string_view(str.data(), 4), result)); + ASSERT_FALSE(dsn::buf2bool(std::string_view(str.data(), 3), result)); + ASSERT_TRUE(dsn::buf2bool(std::string_view(str.data(), 4), result)); ASSERT_EQ(result, true); - ASSERT_FALSE(dsn::buf2bool(absl::string_view(str.data(), 5), result)); - ASSERT_FALSE(dsn::buf2bool(absl::string_view(str.data(), 6), result)); - ASSERT_FALSE(dsn::buf2bool(absl::string_view(str.data() + 5, 4), result)); - ASSERT_TRUE(dsn::buf2bool(absl::string_view(str.data() + 5, 5), result)); + ASSERT_FALSE(dsn::buf2bool(std::string_view(str.data(), 5), result)); + ASSERT_FALSE(dsn::buf2bool(std::string_view(str.data(), 6), result)); + ASSERT_FALSE(dsn::buf2bool(std::string_view(str.data() + 5, 4), result)); + ASSERT_TRUE(dsn::buf2bool(std::string_view(str.data() + 5, 5), result)); ASSERT_EQ(result, false); } @@ -92,12 +92,12 @@ TEST(string_conv, buf2int32) // "\045" is "%", so the string length=5, otherwise(2th argument > 5) it will be reported // "global-buffer-overflow" error under AddressSanitizer check std::string str("123\0456", 5); - ASSERT_TRUE(dsn::buf2int32(absl::string_view(str.data(), 2), result)); + ASSERT_TRUE(dsn::buf2int32(std::string_view(str.data(), 2), result)); ASSERT_EQ(result, 12); - ASSERT_TRUE(dsn::buf2int32(absl::string_view(str.data(), 3), result)); + ASSERT_TRUE(dsn::buf2int32(std::string_view(str.data(), 3), result)); ASSERT_EQ(result, 123); - ASSERT_FALSE(dsn::buf2int32(absl::string_view(str.data(), 4), result)); - ASSERT_FALSE(dsn::buf2int32(absl::string_view(str.data(), 5), result)); + ASSERT_FALSE(dsn::buf2int32(std::string_view(str.data(), 4), result)); + ASSERT_FALSE(dsn::buf2int32(std::string_view(str.data(), 5), result)); } TEST(string_conv, buf2int64) @@ -139,12 +139,12 @@ TEST(string_conv, buf2int64) // "\045" is "%", so the string length=5, otherwise(2th argument > 5) it will be reported // "global-buffer-overflow" error under AddressSanitizer check std::string str("123\0456", 5); - ASSERT_TRUE(dsn::buf2int64(absl::string_view(str.data(), 2), result)); + ASSERT_TRUE(dsn::buf2int64(std::string_view(str.data(), 2), result)); ASSERT_EQ(result, 12); - ASSERT_TRUE(dsn::buf2int64(absl::string_view(str.data(), 3), result)); + ASSERT_TRUE(dsn::buf2int64(std::string_view(str.data(), 3), result)); ASSERT_EQ(result, 123); - ASSERT_FALSE(dsn::buf2int64(absl::string_view(str.data(), 4), result)); - ASSERT_FALSE(dsn::buf2int64(absl::string_view(str.data(), 5), result)); + ASSERT_FALSE(dsn::buf2int64(std::string_view(str.data(), 4), result)); + ASSERT_FALSE(dsn::buf2int64(std::string_view(str.data(), 5), result)); } TEST(string_conv, buf2uint64) @@ -183,12 +183,12 @@ TEST(string_conv, buf2uint64) // "\045" is "%", so the string length=5, otherwise(2th argument > 5) it will be reported // "global-buffer-overflow" error under AddressSanitizer check std::string str("123\0456", 5); - ASSERT_TRUE(dsn::buf2uint64(absl::string_view(str.data(), 2), result)); + ASSERT_TRUE(dsn::buf2uint64(std::string_view(str.data(), 2), result)); ASSERT_EQ(result, 12); - ASSERT_TRUE(dsn::buf2uint64(absl::string_view(str.data(), 3), result)); + ASSERT_TRUE(dsn::buf2uint64(std::string_view(str.data(), 3), result)); ASSERT_EQ(result, 123); - ASSERT_FALSE(dsn::buf2uint64(absl::string_view(str.data(), 4), result)); - ASSERT_FALSE(dsn::buf2uint64(absl::string_view(str.data(), 5), result)); + ASSERT_FALSE(dsn::buf2uint64(std::string_view(str.data(), 4), result)); + ASSERT_FALSE(dsn::buf2uint64(std::string_view(str.data(), 5), result)); } TEST(string_conv, buf2uint32) @@ -229,12 +229,12 @@ TEST(string_conv, buf2uint32) // "\045" is "%", so the string length=5, otherwise(2th argument > 5) it will be reported // "global-buffer-overflow" error under AddressSanitizer check std::string str("123\0456", 5); - ASSERT_TRUE(dsn::buf2uint32(absl::string_view(str.data(), 2), result)); + ASSERT_TRUE(dsn::buf2uint32(std::string_view(str.data(), 2), result)); ASSERT_EQ(result, 12); - ASSERT_TRUE(dsn::buf2uint32(absl::string_view(str.data(), 3), result)); + ASSERT_TRUE(dsn::buf2uint32(std::string_view(str.data(), 3), result)); ASSERT_EQ(result, 123); - ASSERT_FALSE(dsn::buf2uint32(absl::string_view(str.data(), 4), result)); - ASSERT_FALSE(dsn::buf2uint32(absl::string_view(str.data(), 5), result)); + ASSERT_FALSE(dsn::buf2uint32(std::string_view(str.data(), 4), result)); + ASSERT_FALSE(dsn::buf2uint32(std::string_view(str.data(), 5), result)); } TEST(string_conv, int64_partial) diff --git a/src/utils/test/time_utils_test.cpp b/src/utils/test/time_utils_test.cpp index 35838aaf78..ec952637ce 100644 --- a/src/utils/test/time_utils_test.cpp +++ b/src/utils/test/time_utils_test.cpp @@ -90,16 +90,16 @@ TEST(time_utils, get_current_physical_time_ns) template void test_time_ms_to_string(T &str) { - time_ms_to_string(1605091506136, str); + time_ms_to_string(1605091506036, str); std::string actual_str(str); - // Time differ between time zones. + // Time differs between time zones. // - // The real time 2020-11-11 18:45:06.136 (UTC+8) - // so it must be 2020-11-1x xx:45:06.136. + // The real time 2020-11-11 18:45:06.036 (UTC+8) + // so it must be 2020-11-1x xx:45:06.036. ASSERT_EQ(std::string("2020-11-1"), actual_str.substr(0, 9)); - ASSERT_EQ(std::string(":45:06.136"), actual_str.substr(13, 10)); + ASSERT_EQ(std::string(":45:06.036"), actual_str.substr(13, 10)); } TEST(time_utils, time_ms_to_buf) @@ -114,5 +114,20 @@ TEST(time_utils, time_ms_to_str) test_time_ms_to_string(str); } +TEST(time_utils, time_ms_to_sequent_str) +{ + std::string str; + time_ms_to_sequent_string(1605091506036, str); + + std::string actual_str(str); + + // Time differs between time zones. + // + // The real time 20201111_184506_036 (UTC+8) + // so it must be 2020111x_xx4506_036. + ASSERT_EQ(std::string("2020111"), actual_str.substr(0, 7)); + ASSERT_EQ(std::string("4506_036"), actual_str.substr(11, 8)); +} + } // namespace utils } // namespace dsn diff --git a/src/utils/thread_access_checker.cpp b/src/utils/thread_access_checker.cpp index 5f54188168..eaab4e2baf 100644 --- a/src/utils/thread_access_checker.cpp +++ b/src/utils/thread_access_checker.cpp @@ -45,4 +45,4 @@ void thread_access_checker::only_one_thread_access() _access_thread_id_inited = true; } } -} +} // namespace dsn diff --git a/src/utils/thread_access_checker.h b/src/utils/thread_access_checker.h index 623dfc0f85..929f92eff5 100644 --- a/src/utils/thread_access_checker.h +++ b/src/utils/thread_access_checker.h @@ -45,4 +45,4 @@ class thread_access_checker int _access_thread_id; bool _access_thread_id_inited; }; -} +} // namespace dsn diff --git a/src/utils/threadpool_code.h b/src/utils/threadpool_code.h index 383c1d5247..cd3eac2db7 100644 --- a/src/utils/threadpool_code.h +++ b/src/utils/threadpool_code.h @@ -66,6 +66,6 @@ class threadpool_code DEFINE_THREAD_POOL_CODE(THREAD_POOL_INVALID) DEFINE_THREAD_POOL_CODE(THREAD_POOL_DEFAULT) -} +} // namespace dsn USER_DEFINED_STRUCTURE_FORMATTER(::dsn::threadpool_code); diff --git a/src/utils/threadpool_spec.h b/src/utils/threadpool_spec.h index 1a8506422e..3643c40ba5 100644 --- a/src/utils/threadpool_spec.h +++ b/src/utils/threadpool_spec.h @@ -121,4 +121,4 @@ CONFIG_FLD(bool, false, "throttling: whether to enable throttling with virtual queues") CONFIG_END -} +} // namespace dsn diff --git a/src/utils/time_utils.cpp b/src/utils/time_utils.cpp index 34504fc993..e8d564814a 100644 --- a/src/utils/time_utils.cpp +++ b/src/utils/time_utils.cpp @@ -36,7 +36,7 @@ namespace utils { auto ret = get_localtime(ts_ms, &tmp); // NOTE: format_to() does not append a terminating null character, so remember to initialize // str's memory as zero before. - fmt::format_to(str, "{:%Y-%m-%d %H:%M:%S}.{}", *ret, static_cast(ts_ms % 1000)); + fmt::format_to(str, "{:%Y-%m-%d %H:%M:%S}.{:03}", *ret, static_cast(ts_ms % 1000)); } /*extern*/ void time_ms_to_string(uint64_t ts_ms, std::string &str) @@ -45,7 +45,18 @@ namespace utils { struct tm tmp; auto ret = get_localtime(ts_ms, &tmp); fmt::format_to(std::back_inserter(str), - "{:%Y-%m-%d %H:%M:%S}.{}", + "{:%Y-%m-%d %H:%M:%S}.{:03}", + *ret, + static_cast(ts_ms % 1000)); +} + +/*extern*/ void time_ms_to_sequent_string(uint64_t ts_ms, std::string &str) +{ + str.clear(); + struct tm tmp; + auto ret = get_localtime(ts_ms, &tmp); + fmt::format_to(std::back_inserter(str), + "{:%Y%m%d_%H%M%S}_{:03}", *ret, static_cast(ts_ms % 1000)); } diff --git a/src/utils/time_utils.h b/src/utils/time_utils.h index f89eebf8f2..d36aa98640 100644 --- a/src/utils/time_utils.h +++ b/src/utils/time_utils.h @@ -33,7 +33,7 @@ #include #include -#include "absl/strings/string_view.h" +#include #include "runtime/api_layer1.h" #include "utils/fmt_logging.h" #include "utils/ports.h" @@ -47,13 +47,16 @@ static struct tm *get_localtime(uint64_t ts_ms, struct tm *tm_buf) return localtime_r(&t, tm_buf); } -// get time string, which format is yyyy-MM-dd hh:mm:ss.SSS +// Get time string, which format is yyyy-MM-dd hh:mm:ss.SSS // NOTE: using char* as output is usually unsafe, remember to initialize its memory as zero before // calling 'time_ms_to_string'. Please use std::string as the output argument as long as it's // possible. extern void time_ms_to_string(uint64_t ts_ms, char *str); extern void time_ms_to_string(uint64_t ts_ms, std::string &str); +// Get time string, which format is yyyyMMdd_hhmmss_SSS +extern void time_ms_to_sequent_string(uint64_t ts_ms, std::string &str); + // get date string with format of 'yyyy-MM-dd' from given timestamp inline void time_ms_to_date(uint64_t ts_ms, char *str, int len) { @@ -112,7 +115,7 @@ inline int64_t get_unix_sec_today_midnight() // `hh:mm` (range in [00:00, 23:59]) to seconds since 00:00:00 // eg. `01:00` => `3600` // Return: -1 when invalid -inline int hh_mm_to_seconds(absl::string_view hhmm) +inline int hh_mm_to_seconds(std::string_view hhmm) { int hour = 0, min = 0, sec = -1; if (::sscanf(hhmm.data(), "%d:%d", &hour, &min) == 2 && (0 <= hour && hour <= 23) && @@ -125,7 +128,7 @@ inline int hh_mm_to_seconds(absl::string_view hhmm) // local time `hh:mm` to unix timestamp. // eg. `18:10` => `1525947000` when called on May 10, 2018, CST // Return: -1 when invalid -inline int64_t hh_mm_today_to_unix_sec(absl::string_view hhmm_of_day) +inline int64_t hh_mm_today_to_unix_sec(std::string_view hhmm_of_day) { int sec_of_day = hh_mm_to_seconds(hhmm_of_day); if (sec_of_day == -1) { diff --git a/src/utils/uniq_timestamp_us.h b/src/utils/uniq_timestamp_us.h index 5094071f2e..c5208ab151 100644 --- a/src/utils/uniq_timestamp_us.h +++ b/src/utils/uniq_timestamp_us.h @@ -59,4 +59,4 @@ class uniq_timestamp_us return _last_ts; } }; -} +} // namespace dsn diff --git a/src/utils/utils.cpp b/src/utils/utils.cpp deleted file mode 100644 index 749933ffe9..0000000000 --- a/src/utils/utils.cpp +++ /dev/null @@ -1,80 +0,0 @@ -/* - * The MIT License (MIT) - * - * Copyright (c) 2015 Microsoft Corporation - * - * -=- Robust Distributed System Nucleus (rDSN) -=- - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "utils/utils.h" - -#include -#include -#include -#include -#include -#include -#include - -#include "utils/fmt_logging.h" - -#if defined(__linux__) -#elif defined(__FreeBSD__) -#include -#elif defined(__APPLE__) -#include -#endif - -namespace dsn { -namespace utils { - -bool hostname_from_ip(uint32_t ip, std::string *hostname_result) -{ - struct sockaddr_in addr_in; - addr_in.sin_family = AF_INET; - addr_in.sin_port = 0; - addr_in.sin_addr.s_addr = ip; - char hostname[256]; - int err = getnameinfo((struct sockaddr *)(&addr_in), - sizeof(struct sockaddr), - hostname, - sizeof(hostname), - nullptr, - 0, - NI_NAMEREQD); - if (err != 0) { - struct in_addr net_addr; - net_addr.s_addr = ip; - char ip_str[256]; - inet_ntop(AF_INET, &net_addr, ip_str, sizeof(ip_str)); - if (err == EAI_SYSTEM) { - LOG_WARNING("got error {} when try to resolve {}", strerror(errno), ip_str); - } else { - LOG_WARNING("return error({}) when try to resolve {}", gai_strerror(err), ip_str); - } - return false; - } else { - *hostname_result = std::string(hostname); - return true; - } -} -} // namespace utils -} // namespace dsn diff --git a/src/utils/utils.h b/src/utils/utils.h index d8e90c9611..3b97c9a1b8 100644 --- a/src/utils/utils.h +++ b/src/utils/utils.h @@ -67,21 +67,6 @@ std::shared_ptr make_shared_array(size_t size) return std::shared_ptr(new T[size], std::default_delete()); } -// get host name from ip series -// if can't get a hostname from ip(maybe no hostname or other errors), return false, and -// hostname_result will be invalid value -// if multiple hostname got and all of them are resolvable return true, otherwise return false. -// and the hostname_result will be "hostname1,hostname2(or ip_address or )..." -// we only support ipv4 currently -// check if a.b.c.d:port can be resolved to hostname:port. If it can be resolved, return true -// and hostname_result -// will be the hostname, or it will be ip address or error message - -// TODO(yingchun): Consider to move it to rpc_address. -// valid_ip_network_order -> return TRUE && hostname_result=hostname | -// invalid_ip_network_order -> return FALSE -bool hostname_from_ip(uint32_t ip, std::string *hostname_result); - template std::multimap flip_map(const std::map &source) { diff --git a/src/utils/work_queue.h b/src/utils/work_queue.h index 72f2a61575..5d4c78ca2d 100644 --- a/src/utils/work_queue.h +++ b/src/utils/work_queue.h @@ -98,4 +98,4 @@ class work_queue protected: slist _hdr; }; -} \ No newline at end of file +} // namespace dsn \ No newline at end of file diff --git a/src/utils/zlock_provider.h b/src/utils/zlock_provider.h index 0658066404..e9a1b5bd14 100644 --- a/src/utils/zlock_provider.h +++ b/src/utils/zlock_provider.h @@ -179,4 +179,4 @@ class semaphore_provider : public extensible_object private: semaphore_provider *_inner_provider; }; -} +} // namespace dsn diff --git a/src/utils/zlocks.h b/src/utils/zlocks.h index 472823ac3d..0a6f614291 100644 --- a/src/utils/zlocks.h +++ b/src/utils/zlocks.h @@ -111,7 +111,7 @@ class zevent std::atomic _signaled; bool _manualReset; }; -} +} // namespace dsn /// /// RAII wrapper of rdsn's synchronization objects @@ -188,7 +188,7 @@ class zauto_write_lock bool _locked; zrwlock_nr *_lock; }; -} +} // namespace dsn /// /// utils function used to check the lock safety @@ -197,5 +197,5 @@ namespace dsn { namespace lock_checker { void check_wait_safety(); void check_dangling_lock(); -} -} +} // namespace lock_checker +} // namespace dsn diff --git a/src/zookeeper/distributed_lock_service_zookeeper.cpp b/src/zookeeper/distributed_lock_service_zookeeper.cpp index 52d1f98f62..30686831e1 100644 --- a/src/zookeeper/distributed_lock_service_zookeeper.cpp +++ b/src/zookeeper/distributed_lock_service_zookeeper.cpp @@ -34,7 +34,7 @@ #include "lock_struct.h" #include "lock_types.h" #include "runtime/service_app.h" -#include "runtime/task/async_calls.h" +#include "task/async_calls.h" #include "utils/flags.h" #include "utils/fmt_logging.h" #include "utils/strings.h" @@ -282,5 +282,5 @@ void distributed_lock_service_zookeeper::on_zoo_session_evt(lock_srv_ptr _this, LOG_WARNING("get zoo state: {}, ignore it", zookeeper_session::string_zoo_state(zoo_state)); } } -} -} +} // namespace dist +} // namespace dsn diff --git a/src/zookeeper/distributed_lock_service_zookeeper.h b/src/zookeeper/distributed_lock_service_zookeeper.h index 206e73459a..df90472afc 100644 --- a/src/zookeeper/distributed_lock_service_zookeeper.h +++ b/src/zookeeper/distributed_lock_service_zookeeper.h @@ -36,9 +36,9 @@ #include "boost/container/detail/std_fwd.hpp" #include "lock_types.h" -#include "runtime/task/future_types.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "task/future_types.h" +#include "task/task.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/distributed_lock_service.h" #include "utils/error_code.h" @@ -126,5 +126,5 @@ class distributed_lock_service_zookeeper : public distributed_lock_service, publ friend class lock_struct; }; -} -} +} // namespace dist +} // namespace dsn diff --git a/src/zookeeper/lock_struct.cpp b/src/zookeeper/lock_struct.cpp index 56b0fd33be..a0e9ad78ce 100644 --- a/src/zookeeper/lock_struct.cpp +++ b/src/zookeeper/lock_struct.cpp @@ -36,8 +36,8 @@ #include "distributed_lock_service_zookeeper.h" #include "lock_struct.h" #include "lock_types.h" -#include "runtime/task/async_calls.h" -#include "runtime/task/task.h" +#include "task/async_calls.h" +#include "task/task.h" #include "utils/blob.h" #include "utils/error_code.h" #include "utils/fmt_logging.h" @@ -81,11 +81,12 @@ static bool is_zookeeper_timeout(int zookeeper_error) zookeeper_session::string_zoo_operation(op->_optype), \ op->_input._path); \ zookeeper_session::add_ref(op); \ - tasking::enqueue(TASK_CODE_DLOCK, \ - nullptr, \ - [_this, op]() { _this->_dist_lock_service->session()->visit(op); }, \ - _this->hash(), \ - std::chrono::seconds(1)); + tasking::enqueue( \ + TASK_CODE_DLOCK, \ + nullptr, \ + [_this, op]() { _this->_dist_lock_service->session()->visit(op); }, \ + _this->hash(), \ + std::chrono::seconds(1)); #define IGNORE_CALLBACK true #define DONT_IGNORE_CALLBACK false @@ -798,5 +799,5 @@ void lock_struct::lock_expired(lock_struct_ptr _this) _this->_checker.only_one_thread_access(); _this->on_expire(); } -} -} +} // namespace dist +} // namespace dsn diff --git a/src/zookeeper/lock_struct.h b/src/zookeeper/lock_struct.h index fa10f94398..8bd660760d 100644 --- a/src/zookeeper/lock_struct.h +++ b/src/zookeeper/lock_struct.h @@ -32,7 +32,7 @@ #include #include "lock_types.h" -#include "runtime/task/future_types.h" +#include "task/future_types.h" #include "utils/autoref_ptr.h" #include "utils/distributed_lock_service.h" #include "utils/fmt_utils.h" @@ -122,5 +122,5 @@ class lock_struct : public ref_counter thread_access_checker _checker; }; -} -} +} // namespace dist +} // namespace dsn diff --git a/src/zookeeper/lock_types.h b/src/zookeeper/lock_types.h index d368fc514d..3bf6e4038b 100644 --- a/src/zookeeper/lock_types.h +++ b/src/zookeeper/lock_types.h @@ -29,7 +29,7 @@ #include "utils/autoref_ptr.h" #include "utils/error_code.h" #include "utils/threadpool_code.h" -#include "runtime/task/task_code.h" +#include "task/task_code.h" #include "common/gpid.h" #include "utils/distributed_lock_service.h" @@ -43,5 +43,5 @@ class distributed_lock_service_zookeeper; class lock_struct; typedef ref_ptr lock_srv_ptr; typedef ref_ptr lock_struct_ptr; -} -} +} // namespace dist +} // namespace dsn diff --git a/src/zookeeper/test/distributed_lock_zookeeper.cpp b/src/zookeeper/test/distributed_lock_zookeeper.cpp index 7da48ef433..860e9eb762 100644 --- a/src/zookeeper/test/distributed_lock_zookeeper.cpp +++ b/src/zookeeper/test/distributed_lock_zookeeper.cpp @@ -35,8 +35,8 @@ #include "gtest/gtest.h" #include "runtime/service_app.h" -#include "runtime/task/task.h" -#include "runtime/task/task_code.h" +#include "task/task.h" +#include "task/task_code.h" #include "utils/autoref_ptr.h" #include "utils/distributed_lock_service.h" #include "utils/error_code.h" @@ -163,14 +163,14 @@ TEST(distributed_lock_service_zookeeper, abnormal_api_call) cb_pair.first->wait(); opt.create_if_not_exist = true; - cb_pair = - dlock_svc->lock(lock_id, - my_id, - DLOCK_CALLBACK, - [](error_code ec, const std::string &, int) { ASSERT_TRUE(ec == ERR_OK); }, - DLOCK_CALLBACK, - nullptr, - opt); + cb_pair = dlock_svc->lock( + lock_id, + my_id, + DLOCK_CALLBACK, + [](error_code ec, const std::string &, int) { ASSERT_TRUE(ec == ERR_OK); }, + DLOCK_CALLBACK, + nullptr, + opt); ASSERT_TRUE(cb_pair.first != nullptr && cb_pair.second != nullptr); cb_pair.first->wait(); @@ -208,16 +208,17 @@ TEST(distributed_lock_service_zookeeper, abnormal_api_call) }); tsk->wait(); - cb_pair2 = dlock_svc->lock(lock_id, - my_id2, - DLOCK_CALLBACK, - [my_id2](error_code ec, const std::string &name, int) { - ASSERT_TRUE(ec == ERR_OK); - ASSERT_TRUE(name == my_id2); - }, - DLOCK_CALLBACK, - nullptr, - opt); + cb_pair2 = dlock_svc->lock( + lock_id, + my_id2, + DLOCK_CALLBACK, + [my_id2](error_code ec, const std::string &name, int) { + ASSERT_TRUE(ec == ERR_OK); + ASSERT_TRUE(name == my_id2); + }, + DLOCK_CALLBACK, + nullptr, + opt); bool result = cb_pair2.first->wait(2000); ASSERT_FALSE(result); diff --git a/src/zookeeper/test/run.sh b/src/zookeeper/test/run.sh index 38608a28b3..820dbdab3c 100755 --- a/src/zookeeper/test/run.sh +++ b/src/zookeeper/test/run.sh @@ -37,9 +37,9 @@ if [ $? -ne 0 ]; then echo "run dsn.zookeeper.tests failed" echo "---- ls ----" ls -l - if find . -name log.1.txt; then - echo "---- tail -n 100 log.1.txt ----" - tail -n 100 `find . -name log.1.txt` + if [ `find . -name pegasus.log.* | wc -l` -ne 0 ]; then + echo "---- tail -n 100 pegasus.log.* ----" + tail -n 100 `find . -name pegasus.log.*` fi if [ -f core ]; then echo "---- gdb ./dsn.zookeeper.tests core ----" diff --git a/src/zookeeper/zookeeper_error.cpp b/src/zookeeper/zookeeper_error.cpp index 3504990a59..97f7d3851e 100644 --- a/src/zookeeper/zookeeper_error.cpp +++ b/src/zookeeper/zookeeper_error.cpp @@ -48,5 +48,5 @@ error_code from_zerror(int zerr) return ERR_INCONSISTENT_STATE; return ERR_ZOOKEEPER_OPERATION; } -} -} +} // namespace dist +} // namespace dsn diff --git a/src/zookeeper/zookeeper_error.h b/src/zookeeper/zookeeper_error.h index a78f41be2d..0098a221f5 100644 --- a/src/zookeeper/zookeeper_error.h +++ b/src/zookeeper/zookeeper_error.h @@ -33,4 +33,4 @@ namespace dist { error_code from_zerror(int zerr); } -} +} // namespace dsn diff --git a/src/zookeeper/zookeeper_session.cpp b/src/zookeeper/zookeeper_session.cpp index ed4d6b9d22..dfff72b47f 100644 --- a/src/zookeeper/zookeeper_session.cpp +++ b/src/zookeeper/zookeeper_session.cpp @@ -31,7 +31,7 @@ #include #include "runtime/app_model.h" -#include "runtime/rpc/rpc_address.h" +#include "rpc/rpc_address.h" #include "utils/filesystem.h" #include "utils/flags.h" #include "utils/fmt_logging.h" @@ -467,5 +467,5 @@ void zookeeper_session::global_void_completion(int rc, const void *data) op_ctx->_callback_function(op_ctx); release_ref(op_ctx); } -} -} +} // namespace dist +} // namespace dsn diff --git a/src/zookeeper/zookeeper_session.h b/src/zookeeper/zookeeper_session.h index b359f867b9..e46f6d6a25 100644 --- a/src/zookeeper/zookeeper_session.h +++ b/src/zookeeper/zookeeper_session.h @@ -197,7 +197,7 @@ class zookeeper_session global_strings_completion(int rc, const struct String_vector *strings, const void *data); static void global_void_completion(int rc, const void *data); }; -} -} +} // namespace dist +} // namespace dsn USER_DEFINED_STRUCTURE_FORMATTER(::dsn::dist::zookeeper_session); diff --git a/src/zookeeper/zookeeper_session_mgr.cpp b/src/zookeeper/zookeeper_session_mgr.cpp index ca40157205..ea44dbd5ff 100644 --- a/src/zookeeper/zookeeper_session_mgr.cpp +++ b/src/zookeeper/zookeeper_session_mgr.cpp @@ -57,5 +57,5 @@ zookeeper_session *zookeeper_session_mgr::get_session(const service_app_info &in } return ans; } -} -} +} // namespace dist +} // namespace dsn diff --git a/thirdparty/CMakeLists.txt b/thirdparty/CMakeLists.txt index 07e908c9af..24038c4fed 100644 --- a/thirdparty/CMakeLists.txt +++ b/thirdparty/CMakeLists.txt @@ -152,7 +152,7 @@ ExternalProject_Add(thrift URL ${OSS_URL_PREFIX}/thrift-0.9.3.tar.gz http://archive.apache.org/dist/thrift/0.9.3/thrift-0.9.3.tar.gz URL_MD5 88d667a8ae870d5adeca8cb7d6795442 - PATCH_COMMAND patch -p1 < ${TP_DIR}/fix_thrift_for_cpp11.patch + PATCH_COMMAND patch -p1 < ${TP_DIR}/fix_thrift_build_and_link_errors.patch CMAKE_ARGS -DCMAKE_BUILD_TYPE=release -DWITH_JAVA=OFF -DWITH_PYTHON=OFF @@ -234,7 +234,6 @@ ExternalProject_Add(fmt ) set(CURL_OPTIONS - --enable-shared --disable-dict --disable-file --disable-ftp @@ -246,11 +245,11 @@ set(CURL_OPTIONS --disable-manual --disable-pop3 --disable-rtsp + --disable-shared --disable-smtp --disable-telnet --disable-tftp --without-brotli - --without-libidn --without-libidn2 --without-librtmp --without-libssh2 @@ -269,7 +268,7 @@ ExternalProject_Add(curl URL ${OSS_URL_PREFIX}/curl-8.4.0.tar.gz http://curl.haxx.se/download/curl-8.4.0.tar.gz URL_MD5 533e8a3b1228d5945a6a512537bea4c7 - CONFIGURE_COMMAND ./configure --prefix=${TP_OUTPUT} + CONFIGURE_COMMAND CFLAGS=-fPIC CPPFLAGS=-fPIC ./configure --prefix=${TP_OUTPUT} ${CURL_OPTIONS} BUILD_IN_SOURCE 1 DOWNLOAD_EXTRACT_TIMESTAMP true @@ -282,6 +281,8 @@ ExternalProject_Add(prometheus-cpp URL_MD5 cdb515e802aa9aaaf1f6dde1271a20a2 DEPENDS curl CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${TP_OUTPUT} + -DCMAKE_C_FLAGS=-fPIC + -DCMAKE_CXX_FLAGS=-fPIC -DENABLE_TESTING=OFF -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} @@ -472,3 +473,19 @@ ExternalProject_Add(http-parser DOWNLOAD_NO_PROGRESS true ) +ExternalProject_Add(spdlog + URL https://github.com/gabime/spdlog/archive/refs/tags/v1.14.1.tar.gz + URL_MD5 f2c3f15c20e67b261836ff7bfda302cf + PATCH_COMMAND "" + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${TP_OUTPUT} + -DSPDLOG_ENABLE_PCH=ON + -DSPDLOG_BUILD_PIC=ON + -DSPDLOG_FMT_EXTERNAL=ON + -DSPDLOG_NO_EXCEPTIONS=ON + -Dfmt_DIR=${TP_OUTPUT}/lib/cmake/fmt + BUILD_COMMAND make -j${PARALLEL} + INSTALL_COMMAND make install + DEPENDS fmt + DOWNLOAD_EXTRACT_TIMESTAMP true + DOWNLOAD_NO_PROGRESS true +) diff --git a/thirdparty/fix_thrift_for_cpp11.patch b/thirdparty/fix_thrift_build_and_link_errors.patch similarity index 80% rename from thirdparty/fix_thrift_for_cpp11.patch rename to thirdparty/fix_thrift_build_and_link_errors.patch index 52b5b439e4..89c7d80a21 100644 --- a/thirdparty/fix_thrift_for_cpp11.patch +++ b/thirdparty/fix_thrift_build_and_link_errors.patch @@ -51,3 +51,16 @@ index dadaac3..ef32fe1 100644 } // apache::thrift::stdcxx::placeholders }}} // apache::thrift::stdcxx #endif +diff --git a/lib/cpp/src/thrift/protocol/TJSONProtocol.cpp b/lib/cpp/src/thrift/protocol/TJSONProtocol.cpp +index e4077bc10..00512990a 100644 +--- a/lib/cpp/src/thrift/protocol/TJSONProtocol.cpp ++++ b/lib/cpp/src/thrift/protocol/TJSONProtocol.cpp +@@ -528,7 +528,7 @@ uint32_t TJSONProtocol::writeJSONDouble(double num) { + bool special = false; + switch (boost::math::fpclassify(num)) { + case FP_INFINITE: +- if (boost::math::signbit(num)) { ++ if (std::signbit(num)) { + val = kThriftNegativeInfinity; + } else { + val = kThriftInfinity;