From 9ebc2bf1a5422cceaeb7d384a243ff0605b043ef Mon Sep 17 00:00:00 2001 From: HangyuanLiu <460660596@qq.com> Date: Thu, 12 Sep 2024 10:02:40 +0800 Subject: [PATCH] Add Meta refactor development branch Signed-off-by: HangyuanLiu <460660596@qq.com> --- .../com/starrocks/alter/AlterJobExecutor.java | 425 +- .../java/com/starrocks/alter/AlterJobMgr.java | 468 +- .../starrocks/alter/AlterMVJobExecutor.java | 18 +- .../starrocks/alter/CompactionHandler.java | 16 +- .../alter/LakeTableAlterJobV2Builder.java | 6 +- .../alter/LakeTableAlterMetaJobBase.java | 7 +- .../alter/LakeTableSchemaChangeJob.java | 19 +- .../alter/MaterializedViewHandler.java | 105 +- .../starrocks/alter/OnlineOptimizeJobV2.java | 11 +- .../com/starrocks/alter/OptimizeJobV2.java | 59 +- .../starrocks/alter/SchemaChangeHandler.java | 136 +- .../starrocks/alter/SchemaChangeJobV2.java | 7 +- .../com/starrocks/backup/BackupJobInfo.java | 5 +- .../java/com/starrocks/backup/RestoreJob.java | 26 +- .../starrocks/catalog/CatalogRecycleBin.java | 66 +- .../com/starrocks/catalog/CatalogUtils.java | 2 +- .../starrocks/catalog/ColocateTableIndex.java | 8 +- .../java/com/starrocks/catalog/Database.java | 23 +- .../starrocks/catalog/ExternalOlapTable.java | 19 +- .../com/starrocks/catalog/LocalTablet.java | 44 - .../starrocks/catalog/MaterializedIndex.java | 10 +- .../starrocks/catalog/MaterializedView.java | 4 +- .../com/starrocks/catalog/MetadataViewer.java | 4 +- .../java/com/starrocks/catalog/OlapTable.java | 44 +- .../java/com/starrocks/catalog/Partition.java | 530 +- .../starrocks/catalog/PhysicalPartition.java | 536 +- .../catalog/PhysicalPartitionImpl.java | 533 -- .../catalog/RecyclePartitionInfo.java | 2 +- .../catalog/TabletInvertedIndex.java | 47 +- .../com/starrocks/catalog/TabletMeta.java | 1 + .../com/starrocks/catalog/TabletStatMgr.java | 8 +- .../com/starrocks/catalog/TempPartitions.java | 3 +- .../catalog/mv/MVTimelinessArbiter.java | 2 +- .../mv/MVTimelinessNonPartitionArbiter.java | 2 +- .../clone/ColocateTableBalancer.java | 23 +- .../clone/DiskAndTabletLoadReBalancer.java | 77 +- .../clone/DynamicPartitionScheduler.java | 10 +- .../com/starrocks/clone/TabletChecker.java | 16 +- .../com/starrocks/clone/TabletSchedCtx.java | 31 +- .../com/starrocks/clone/TabletScheduler.java | 75 +- .../common/proc/LakeTabletsProcDir.java | 3 +- .../common/proc/PartitionsProcDir.java | 4 +- .../common/util/PropertyAnalyzer.java | 2 +- .../concurrent/lock/AutoCloseableLock.java | 10 + .../common/util/concurrent/lock/Locker.java | 33 +- .../partitiontraits/OlapPartitionTraits.java | 12 +- .../consistency/CheckConsistencyJob.java | 19 +- .../consistency/ConsistencyChecker.java | 52 +- .../http/meta/ColocateMetaService.java | 4 +- .../http/meta/GlobalDictMetaService.java | 47 +- .../starrocks/http/rest/MigrationAction.java | 53 +- .../http/rest/v2/vo/PartitionInfoView.java | 11 +- .../com/starrocks/journal/JournalEntity.java | 275 +- .../com/starrocks/journal/JournalWriter.java | 21 + .../java/com/starrocks/lake/LakeTable.java | 2 +- .../lake/RecycleLakeListPartitionInfo.java | 2 +- .../lake/RecycleLakeRangePartitionInfo.java | 2 +- .../lake/RecycleLakeUnPartitionInfo.java | 2 +- .../com/starrocks/lake/StarMgrMetaSyncer.java | 10 +- .../starrocks/lake/backup/LakeRestoreJob.java | 16 +- .../java/com/starrocks/leader/LeaderImpl.java | 12 +- .../com/starrocks/leader/ReportHandler.java | 193 +- .../starrocks/listener/LoadJobMVListener.java | 2 +- .../java/com/starrocks/load/DeleteMgr.java | 3 +- .../java/com/starrocks/load/ExportJob.java | 2 +- .../load/InsertOverwriteJobRunner.java | 8 +- .../com/starrocks/load/PartitionUtils.java | 7 +- .../load/loadv2/LoadsHistorySyncer.java | 2 +- .../starrocks/load/loadv2/SparkLoadJob.java | 7 +- .../java/com/starrocks/load/pipe/Pipe.java | 2 +- .../com/starrocks/load/pipe/PipeManager.java | 2 +- .../load/pipe/filelist/RepoCreator.java | 2 +- .../java/com/starrocks/meta/BDBDatabase.java | 33 + .../com/starrocks/meta/BDBTransaction.java | 33 + .../java/com/starrocks/meta/ByteCoder.java | 36 + .../com/starrocks/meta/EditLogCommitter.java | 27 + .../meta/LocalMetastoreInterface.java | 191 + .../com/starrocks/meta/LocalMetastoreV2.java | 468 ++ .../com/starrocks/meta/MetadataHandler.java | 139 + .../starrocks/meta/ReplicaHierarchyId.java | 40 + .../com/starrocks/meta/StarRocksMetadata.java | 2661 ++++++++ .../com/starrocks/meta/TabletHierarchyId.java | 33 + .../starrocks/meta/TabletInvertedIndexV2.java | 288 + .../com/starrocks/meta/TabletManager.java | 265 + .../main/java/com/starrocks/meta/TxnMeta.java | 24 + .../com/starrocks/meta/VersionManager.java | 22 + .../com/starrocks/persist/CreateDbInfo.java | 3 +- .../com/starrocks/persist/DatabaseInfo.java | 5 +- .../java/com/starrocks/persist/EditLog.java | 176 +- .../ModifyTablePropertyOperationLog.java | 3 +- .../com/starrocks/persist/OperationType.java | 369 +- .../PhysicalPartitionPersistInfoV2.java | 5 +- .../starrocks/persist/ReplicaPersistInfo.java | 3 +- .../com/starrocks/planner/MetaScanNode.java | 4 +- .../com/starrocks/planner/OlapScanNode.java | 2 +- .../starrocks/privilege/DbPEntryObject.java | 2 +- .../privilege/FunctionPEntryObject.java | 2 +- .../starrocks/privilege/PipePEntryObject.java | 6 +- .../privilege/TablePEntryObject.java | 4 +- .../com/starrocks/qe/DDLStmtExecutor.java | 37 +- .../java/com/starrocks/qe/ShowExecutor.java | 6 +- .../starrocks/scheduler/MVActiveChecker.java | 3 +- .../PartitionBasedMvRefreshProcessor.java | 4 +- .../scheduler/history/TableKeeper.java | 4 +- .../starrocks/scheduler/mv/IMTCreator.java | 2 +- .../mv/MVPCTRefreshListPartitioner.java | 2 +- .../scheduler/mv/MVPCTRefreshPartitioner.java | 2 +- .../mv/MVPCTRefreshRangePartitioner.java | 2 +- .../server/ElasticSearchTableFactory.java | 4 +- .../com/starrocks/server/GlobalStateMgr.java | 36 +- .../starrocks/server/JDBCTableFactory.java | 2 +- .../com/starrocks/server/LocalMetastore.java | 5623 +++++------------ .../server/LocalMetastoreReplayer.java | 23 + .../com/starrocks/server/MetadataMgr.java | 24 +- .../starrocks/server/OlapTableFactory.java | 43 +- .../server/SharedDataStorageVolumeMgr.java | 6 +- .../service/FrontendServiceImpl.java | 18 +- .../main/java/com/starrocks/sql/Explain.java | 3 +- .../sql/analyzer/AstToStringBuilder.java | 2 +- .../sql/analyzer/ExpressionAnalyzer.java | 21 +- .../starrocks/sql/ast/pipe/ShowPipeStmt.java | 2 +- .../sql/optimizer/function/MetaFunctions.java | 10 +- .../rule/mv/MaterializedViewRule.java | 4 +- .../tree/AddDecodeNodeForDictStringRule.java | 5 +- .../tree/lowcardinality/DecodeCollector.java | 5 +- .../sql/plan/PlanFragmentBuilder.java | 13 +- .../statistic/StatisticExecutor.java | 3 +- .../starrocks/statistic/StatisticUtils.java | 12 +- .../statistic/StatisticsMetaManager.java | 25 +- .../statistic/sample/TabletSampleManager.java | 40 +- .../com/starrocks/task/AlterReplicaTask.java | 4 +- .../com/starrocks/task/DropReplicaTask.java | 4 +- .../starrocks/task/TabletTaskExecutor.java | 43 +- .../LakeTableTxnStateListener.java | 6 +- .../OlapTableTxnStateListener.java | 4 +- .../transaction/PublishVersionDaemon.java | 18 +- .../transaction/TransactionChecker.java | 7 +- .../com/starrocks/alter/AlterJobV2Test.java | 26 +- .../com/starrocks/alter/AlterTableTest.java | 48 +- .../java/com/starrocks/alter/AlterTest.java | 2093 +++--- .../starrocks/alter/BatchRollupJobTest.java | 6 +- .../alter/LakeTableAlterMetaJobTest.java | 24 +- ...LakeTableAsyncFastSchemaChangeJobTest.java | 22 +- .../alter/LakeTableSchemaChangeJobTest.java | 54 +- .../alter/MaterializedViewHandlerTest.java | 4 +- .../alter/OnlineOptimizeJobV2Test.java | 2 +- .../starrocks/alter/OptimizeJobV2Test.java | 31 +- .../com/starrocks/alter/RollupJobV2Test.java | 52 +- .../alter/SchemaChangeJobV2Test.java | 24 +- .../starrocks/analysis/AccessTestUtil.java | 8 +- .../analysis/AlterMaterializedViewTest.java | 12 +- .../analysis/CreateMaterializedViewTest.java | 71 +- .../CreateSyncMaterializedViewTest.java | 20 +- ...ropMaterializedViewStmtNewPlannerTest.java | 2 +- .../RefreshMaterializedViewStatementTest.java | 3 +- .../analysis/RefreshMaterializedViewTest.java | 40 +- .../analysis/ShowColumnStmtTest.java | 2 +- .../ShowCreateMaterializedViewStmtTest.java | 6 +- .../analysis/ShowCreateViewStmtTest.java | 12 +- .../starrocks/backup/BackupHandlerTest.java | 3 +- .../com/starrocks/backup/CatalogMocker.java | 7 +- .../RestoreJobMaterializedViewTest.java | 5 +- .../backup/RestoreJobPrimaryKeyTest.java | 3 +- .../com/starrocks/backup/RestoreJobTest.java | 3 +- .../starrocks/binlog/BinlogManagerTest.java | 2 +- .../com/starrocks/catalog/AdminStmtTest.java | 9 +- .../com/starrocks/catalog/BrokerMgrTest.java | 4 - .../CatalogRecycleBinLakeTableTest.java | 53 +- .../catalog/CatalogRecycleBinTest.java | 18 +- .../starrocks/catalog/CatalogUtilsTest.java | 2 +- .../catalog/ColocateTableIndexTest.java | 18 +- .../starrocks/catalog/ColocateTableTest.java | 2 +- .../catalog/CreateTableLikeTest.java | 6 +- .../starrocks/catalog/CreateTableTest.java | 8 +- .../catalog/CreateTableWithAggStateTest.java | 2 +- .../catalog/CreateTableWithLocationTest.java | 248 +- .../com/starrocks/catalog/CreateViewTest.java | 2 +- .../starrocks/catalog/DropPartitionTest.java | 20 +- .../catalog/GlobalStateMgrTestUtil.java | 15 +- .../catalog/ListPartitionInfoTest.java | 2 +- .../starrocks/catalog/LocalTabletTest.java | 27 - ...plTest.java => PhysicalPartitionTest.java} | 10 +- .../catalog/StorageCoolDownTest.java | 10 +- .../catalog/StorageMediumInferTest.java | 12 +- .../starrocks/catalog/TabletStatMgrTest.java | 38 +- .../starrocks/catalog/TempPartitionTest.java | 19 +- .../clone/ColocateTableBalancerTest.java | 150 +- .../DiskAndTabletLoadReBalancerTest.java | 70 +- .../clone/DynamicPartitionSchedulerTest.java | 2 +- .../starrocks/clone/TabletSchedulerTest.java | 2 +- .../cluster/SystemInfoServiceTest.java | 2 +- .../connector/MockedMetadataMgr.java | 6 +- .../connector/hive/ReplayMetadataMgr.java | 4 +- .../iceberg/IcebergMetadataTest.java | 6 +- .../consistency/ConsistencyCheckerTest.java | 2 +- .../consistency/MetaRecoveryDaemonTest.java | 69 +- .../starrocks/http/StarRocksHttpTestCase.java | 37 +- .../rest/v2/TablePartitionActionTest.java | 8 +- .../java/com/starrocks/lake/AlterTest.java | 18 +- .../starrocks/lake/CreateLakeTableTest.java | 4 +- .../starrocks/lake/LakeTableHelperTest.java | 4 +- .../lake/compaction/CompactionJobTest.java | 6 +- .../lake/compaction/CompactionMgrTest.java | 3 +- .../compaction/CompactionSchedulerTest.java | 5 +- .../com/starrocks/leader/LeaderImplTest.java | 4 +- .../starrocks/leader/ReportHandlerTest.java | 4 +- .../load/loadv2/SparkLoadJobTest.java | 4 +- .../load/streamload/ShowStreamLoadTest.java | 2 +- ...eMaterializedViewRefreshSchemeLogTest.java | 13 +- .../com/starrocks/persist/EditLogTest.java | 17 - .../starrocks/persist/OperationTypeTest.java | 99 - .../RenameMaterializedViewLogTest.java | 4 +- .../starrocks/planner/OlapTableSinkTest.java | 10 +- .../planner/mv/MVMetaVersionRepairerTest.java | 14 +- .../privilege/AuthorizationMgrTest.java | 4 +- .../privilege/InvalidateObjectTest.java | 2 +- .../privilege/RBACMockedMetadataMgr.java | 6 +- .../LocationLabeledTableBalanceTest.java | 2 +- .../LocationMismatchRepairTest.java | 2 +- .../com/starrocks/qe/CoordinatorTest.java | 3 +- .../com/starrocks/qe/ShowExecutorTest.java | 4 +- .../com/starrocks/qe/ShowTableMockMeta.java | 6 +- .../java/com/starrocks/qe/ShowTablesTest.java | 2 +- .../com/starrocks/qe/VariableMgrTest.java | 3 - .../scheduler/SchedulerConnectorTestBase.java | 2 +- .../replication/ReplicationJobTest.java | 46 +- .../replication/ReplicationMgrTest.java | 26 +- ...titionBasedMvRefreshProcessorHiveTest.java | 116 +- ...ionBasedMvRefreshProcessorIcebergTest.java | 250 +- ...titionBasedMvRefreshProcessorJdbcTest.java | 270 +- ...nBasedMvRefreshProcessorOlapPart2Test.java | 44 +- ...titionBasedMvRefreshProcessorOlapTest.java | 60 +- .../starrocks/server/ConcurrentDDLTest.java | 43 +- .../starrocks/server/LocalMetaStoreTest.java | 62 +- .../com/starrocks/server/MetadataMgrTest.java | 5 +- .../server/WarehouseManagerTest.java | 3 +- .../service/FrontendServiceImplTest.java | 4 +- .../sql/analyzer/AnalyzeStmtTest.java | 6 +- .../sql/analyzer/AnalyzeUtilTest.java | 2 +- .../sql/analyzer/CTASAnalyzerTest.java | 2 +- .../sql/analyzer/PrivilegeCheckerTest.java | 6 +- .../starrocks/sql/ast/DescribeStmtTest.java | 2 +- .../starrocks/sql/optimizer/UtilsTest.java | 4 +- .../rewrite/PredicateReorderRuleTest.java | 2 +- .../DistributionPrunerRuleTest.java | 2 +- .../MVRewriteWithSchemaChangeTest.java | 3 +- .../MvRewritePartialPartitionTest.java | 2 +- .../materialization/MvRewriteTest.java | 2 +- .../CachedStatisticStorageTest.java | 2 +- .../statistics/StatisticsCalculatorTest.java | 12 +- .../sql/plan/ConnectorPlanTestBase.java | 6 +- .../starrocks/sql/plan/ExternalTableTest.java | 5 +- .../com/starrocks/sql/plan/LimitTest.java | 2 +- .../sql/plan/PlanFragmentWithCostTest.java | 4 +- .../sql/plan/PlanTestNoneDBBase.java | 4 +- .../java/com/starrocks/sql/plan/ScanTest.java | 2 +- .../com/starrocks/sql/plan/ViewPlanTest.java | 2 +- .../statistic/StatisticsCollectJobTest.java | 22 +- .../statistic/StatisticsExecutorTest.java | 3 +- .../system/SystemInfoServiceTest.java | 14 +- .../com/starrocks/task/AgentTaskTest.java | 6 +- .../transaction/GlobalTransactionMgrTest.java | 361 +- .../transaction/LakePublishBatchTest.java | 10 +- .../LakeTableTxnLogApplierTest.java | 19 +- .../starrocks/utframe/StarRocksAssert.java | 50 +- .../starrocks/utframe/TestWithFeService.java | 4 +- .../com/starrocks/utframe/UtFrameUtils.java | 8 +- 267 files changed, 10479 insertions(+), 9900 deletions(-) delete mode 100644 fe/fe-core/src/main/java/com/starrocks/catalog/PhysicalPartitionImpl.java create mode 100644 fe/fe-core/src/main/java/com/starrocks/meta/BDBDatabase.java create mode 100644 fe/fe-core/src/main/java/com/starrocks/meta/BDBTransaction.java create mode 100644 fe/fe-core/src/main/java/com/starrocks/meta/ByteCoder.java create mode 100644 fe/fe-core/src/main/java/com/starrocks/meta/EditLogCommitter.java create mode 100644 fe/fe-core/src/main/java/com/starrocks/meta/LocalMetastoreInterface.java create mode 100644 fe/fe-core/src/main/java/com/starrocks/meta/LocalMetastoreV2.java create mode 100644 fe/fe-core/src/main/java/com/starrocks/meta/MetadataHandler.java create mode 100644 fe/fe-core/src/main/java/com/starrocks/meta/ReplicaHierarchyId.java create mode 100644 fe/fe-core/src/main/java/com/starrocks/meta/StarRocksMetadata.java create mode 100644 fe/fe-core/src/main/java/com/starrocks/meta/TabletHierarchyId.java create mode 100644 fe/fe-core/src/main/java/com/starrocks/meta/TabletInvertedIndexV2.java create mode 100644 fe/fe-core/src/main/java/com/starrocks/meta/TabletManager.java create mode 100644 fe/fe-core/src/main/java/com/starrocks/meta/TxnMeta.java create mode 100644 fe/fe-core/src/main/java/com/starrocks/meta/VersionManager.java create mode 100644 fe/fe-core/src/main/java/com/starrocks/server/LocalMetastoreReplayer.java rename fe/fe-core/src/test/java/com/starrocks/catalog/{PhysicalPartitionImplTest.java => PhysicalPartitionTest.java} (92%) diff --git a/fe/fe-core/src/main/java/com/starrocks/alter/AlterJobExecutor.java b/fe/fe-core/src/main/java/com/starrocks/alter/AlterJobExecutor.java index f8084cba191300..ece30ade3ebd63 100644 --- a/fe/fe-core/src/main/java/com/starrocks/alter/AlterJobExecutor.java +++ b/fe/fe-core/src/main/java/com/starrocks/alter/AlterJobExecutor.java @@ -15,13 +15,16 @@ package com.starrocks.alter; import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import com.google.common.collect.Range; import com.starrocks.analysis.DateLiteral; import com.starrocks.analysis.ParseNode; import com.starrocks.analysis.TableName; import com.starrocks.analysis.TableRef; import com.starrocks.catalog.ColocateTableIndex; +import com.starrocks.catalog.Column; import com.starrocks.catalog.DataProperty; import com.starrocks.catalog.Database; import com.starrocks.catalog.DynamicPartitionProperty; @@ -34,11 +37,13 @@ import com.starrocks.catalog.PartitionType; import com.starrocks.catalog.RangePartitionInfo; import com.starrocks.catalog.Table; +import com.starrocks.catalog.TableProperty; import com.starrocks.catalog.Type; import com.starrocks.common.AnalysisException; import com.starrocks.common.DdlException; import com.starrocks.common.ErrorCode; import com.starrocks.common.ErrorReport; +import com.starrocks.common.ErrorReportException; import com.starrocks.common.InvalidOlapTableStateException; import com.starrocks.common.MaterializedViewExceptions; import com.starrocks.common.UserException; @@ -50,8 +55,11 @@ import com.starrocks.common.util.concurrent.lock.Locker; import com.starrocks.persist.AlterViewInfo; import com.starrocks.persist.BatchModifyPartitionsInfo; +import com.starrocks.persist.ColumnRenameInfo; import com.starrocks.persist.ModifyPartitionInfo; +import com.starrocks.persist.ModifyTablePropertyOperationLog; import com.starrocks.persist.SwapTableOperationLog; +import com.starrocks.persist.TableInfo; import com.starrocks.qe.ConnectContext; import com.starrocks.server.GlobalStateMgr; import com.starrocks.server.LocalMetastore; @@ -99,11 +107,13 @@ import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeSet; +import static com.starrocks.meta.StarRocksMetadata.inactiveRelatedMaterializedView; import static com.starrocks.sql.common.UnsupportedException.unsupportedException; public class AlterJobExecutor implements AstVisitor { @@ -270,7 +280,7 @@ public Void visitTableRenameClause(TableRenameClause clause, ConnectContext cont locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(table.getId()), LockType.WRITE); try { ErrorReport.wrapWithRuntimeException(() -> - GlobalStateMgr.getCurrentState().getLocalMetastore().renameTable(db, table, clause)); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().renameTable(db, table, clause)); } finally { locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(table.getId()), LockType.WRITE); } @@ -282,7 +292,7 @@ public Void visitAlterTableCommentClause(AlterTableCommentClause clause, Connect Locker locker = new Locker(); locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(table.getId()), LockType.WRITE); try { - ErrorReport.wrapWithRuntimeException(() -> GlobalStateMgr.getCurrentState().getLocalMetastore() + ErrorReport.wrapWithRuntimeException(() -> GlobalStateMgr.getCurrentState().getStarRocksMetadata() .alterTableComment(db, table, clause)); } finally { locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(table.getId()), LockType.WRITE); @@ -295,43 +305,38 @@ public Void visitSwapTableClause(SwapTableClause clause, ConnectContext context) // must hold db write lock Locker locker = new Locker(); locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(table.getId()), LockType.WRITE); + OlapTable origTable = (OlapTable) table; + String origTblName = origTable.getName(); + String newTblName = clause.getTblName(); + Table newTbl = GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), newTblName); + if (newTbl == null || !(newTbl.isOlapOrCloudNativeTable() || newTbl.isMaterializedView())) { + throw new AlterJobException("Table " + newTblName + " does not exist or is not OLAP/LAKE table"); + } + OlapTable olapNewTbl = (OlapTable) newTbl; + + // First, we need to check whether the table to be operated on can be renamed try { - OlapTable origTable = (OlapTable) table; - String origTblName = origTable.getName(); - String newTblName = clause.getTblName(); - Table newTbl = GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), newTblName); - if (newTbl == null || !(newTbl.isOlapOrCloudNativeTable() || newTbl.isMaterializedView())) { - throw new AlterJobException("Table " + newTblName + " does not exist or is not OLAP/LAKE table"); + olapNewTbl.checkAndSetName(origTblName, true); + origTable.checkAndSetName(newTblName, true); + + if (origTable.isMaterializedView() || newTbl.isMaterializedView()) { + if (!(origTable.isMaterializedView() && newTbl.isMaterializedView())) { + throw new AlterJobException("Materialized view can only SWAP WITH materialized view"); + } } - OlapTable olapNewTbl = (OlapTable) newTbl; - // First, we need to check whether the table to be operated on can be renamed - try { - olapNewTbl.checkAndSetName(origTblName, true); - origTable.checkAndSetName(newTblName, true); + // inactive the related MVs + inactiveRelatedMaterializedView(db, origTable, + MaterializedViewExceptions.inactiveReasonForBaseTableSwapped(origTblName)); + inactiveRelatedMaterializedView(db, olapNewTbl, + MaterializedViewExceptions.inactiveReasonForBaseTableSwapped(newTblName)); - if (origTable.isMaterializedView() || newTbl.isMaterializedView()) { - if (!(origTable.isMaterializedView() && newTbl.isMaterializedView())) { - throw new AlterJobException("Materialized view can only SWAP WITH materialized view"); - } - } + SwapTableOperationLog log = new SwapTableOperationLog(db.getId(), origTable.getId(), olapNewTbl.getId()); + GlobalStateMgr.getCurrentState().getLocalMetastore().swapTable(log); - // inactive the related MVs - LocalMetastore.inactiveRelatedMaterializedView(db, origTable, - MaterializedViewExceptions.inactiveReasonForBaseTableSwapped(origTblName)); - LocalMetastore.inactiveRelatedMaterializedView(db, olapNewTbl, - MaterializedViewExceptions.inactiveReasonForBaseTableSwapped(newTblName)); - - SwapTableOperationLog log = new SwapTableOperationLog(db.getId(), origTable.getId(), olapNewTbl.getId()); - GlobalStateMgr.getCurrentState().getAlterJobMgr().swapTableInternal(log); - GlobalStateMgr.getCurrentState().getEditLog().logSwapTable(log); - - LOG.info("finish swap table {}-{} with table {}-{}", origTable.getId(), origTblName, newTbl.getId(), - newTblName); - return null; - } catch (DdlException e) { - throw new AlterJobException(e.getMessage(), e); - } + LOG.info("finish swap table {}-{} with table {}-{}", origTable.getId(), origTblName, newTbl.getId(), + newTblName); + return null; } finally { locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(table.getId()), LockType.WRITE); } @@ -342,12 +347,13 @@ public Void visitModifyTablePropertiesClause(ModifyTablePropertiesClause clause, try { Map properties = clause.getProperties(); SchemaChangeHandler schemaChangeHandler = GlobalStateMgr.getCurrentState().getSchemaChangeHandler(); + LocalMetastore localMetastore = GlobalStateMgr.getServingState().getLocalMetastore(); if (properties.containsKey(PropertyAnalyzer.PROPERTIES_WRITE_QUORUM)) { - schemaChangeHandler.updateTableMeta(db, tableName.getTbl(), properties, TTabletMetaType.WRITE_QUORUM); + localMetastore.updateTableMeta(db, tableName.getTbl(), properties, TTabletMetaType.WRITE_QUORUM); } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_INMEMORY)) { - schemaChangeHandler.updateTableMeta(db, tableName.getTbl(), properties, TTabletMetaType.INMEMORY); + localMetastore.updateTableMeta(db, tableName.getTbl(), properties, TTabletMetaType.INMEMORY); } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_PRIMARY_INDEX_CACHE_EXPIRE_SEC)) { - schemaChangeHandler.updateTableMeta(db, tableName.getTbl(), properties, + localMetastore.updateTableMeta(db, tableName.getTbl(), properties, TTabletMetaType.PRIMARY_INDEX_CACHE_EXPIRE_SEC); } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_ENABLE_PERSISTENT_INDEX)) { if (table.isCloudNativeTable()) { @@ -361,20 +367,20 @@ public Void visitModifyTablePropertiesClause(ModifyTablePropertiesClause clause, isSynchronous = false; } else { - schemaChangeHandler.updateTableMeta(db, tableName.getTbl(), properties, + localMetastore.updateTableMeta(db, tableName.getTbl(), properties, TTabletMetaType.ENABLE_PERSISTENT_INDEX); } } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_REPLICATED_STORAGE)) { - schemaChangeHandler.updateTableMeta(db, tableName.getTbl(), properties, + localMetastore.updateTableMeta(db, tableName.getTbl(), properties, TTabletMetaType.REPLICATED_STORAGE); } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_BUCKET_SIZE)) { - schemaChangeHandler.updateTableMeta(db, tableName.getTbl(), properties, + localMetastore.updateTableMeta(db, tableName.getTbl(), properties, TTabletMetaType.BUCKET_SIZE); } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_MUTABLE_BUCKET_NUM)) { - schemaChangeHandler.updateTableMeta(db, tableName.getTbl(), properties, + localMetastore.updateTableMeta(db, tableName.getTbl(), properties, TTabletMetaType.MUTABLE_BUCKET_NUM); } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_ENABLE_LOAD_PROFILE)) { - schemaChangeHandler.updateTableMeta(db, tableName.getTbl(), properties, + localMetastore.updateTableMeta(db, tableName.getTbl(), properties, TTabletMetaType.ENABLE_LOAD_PROFILE); } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_BINLOG_ENABLE) || properties.containsKey(PropertyAnalyzer.PROPERTIES_BINLOG_TTL) || @@ -401,7 +407,7 @@ public Void visitModifyTablePropertiesClause(ModifyTablePropertiesClause clause, GlobalStateMgr.getCurrentState().getColocateTableIndex() .modifyTableColocate(db, olapTable, colocateGroup, false, null); } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_DISTRIBUTION_TYPE)) { - GlobalStateMgr.getCurrentState().getLocalMetastore().convertDistributionType(db, olapTable); + //GlobalStateMgr.getCurrentState().getLocalMetastore().convertDistributionType(db, olapTable); } else if (DynamicPartitionUtil.checkDynamicPartitionPropertiesExist(properties)) { if (!olapTable.dynamicPartitionExists()) { try { @@ -420,24 +426,22 @@ public Void visitModifyTablePropertiesClause(ModifyTablePropertiesClause clause, "modify dynamic_partition.buckets. Colocate tables must have same bucket number."); } } - GlobalStateMgr.getCurrentState().getLocalMetastore() - .modifyTableDynamicPartition(db, olapTable, properties); + modifyTableDynamicPartition(db, olapTable, properties); } else if (properties.containsKey("default." + PropertyAnalyzer.PROPERTIES_REPLICATION_NUM)) { Preconditions.checkNotNull(properties.get(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM)); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .modifyTableDefaultReplicationNum(db, olapTable, properties); + modifyTableDefaultReplicationNum(db, olapTable, properties); } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM)) { - GlobalStateMgr.getCurrentState().getLocalMetastore().modifyTableReplicationNum(db, olapTable, properties); + modifyTableReplicationNum(db, olapTable, properties); } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_PARTITION_LIVE_NUMBER)) { - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTableProperties(db, olapTable, properties); + alterTableProperties(db, olapTable, properties); } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_MEDIUM)) { - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTableProperties(db, olapTable, properties); + alterTableProperties(db, olapTable, properties); } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TTL)) { - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTableProperties(db, olapTable, properties); + alterTableProperties(db, olapTable, properties); } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION)) { - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTableProperties(db, olapTable, properties); + alterTableProperties(db, olapTable, properties); } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_LABELS_LOCATION)) { - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTableProperties(db, olapTable, properties); + alterTableProperties(db, olapTable, properties); } else { schemaChangeHandler.process(Lists.newArrayList(clause), db, olapTable); } @@ -453,6 +457,236 @@ public Void visitModifyTablePropertiesClause(ModifyTablePropertiesClause clause, return null; } + public void modifyTableDynamicPartition(Database db, OlapTable table, Map properties) + throws DdlException { + Map logProperties = new HashMap<>(properties); + TableProperty tableProperty = table.getTableProperty(); + if (tableProperty == null) { + DynamicPartitionUtil.checkAndSetDynamicPartitionProperty(table, properties); + } else { + Map analyzedDynamicPartition = DynamicPartitionUtil.analyzeDynamicPartition(properties); + tableProperty.modifyTableProperties(analyzedDynamicPartition); + tableProperty.buildDynamicProperty(); + } + + DynamicPartitionUtil.registerOrRemovePartitionScheduleInfo(db.getId(), table); + + ModifyTablePropertyOperationLog info = + new ModifyTablePropertyOperationLog(db.getId(), table.getId(), logProperties); + GlobalStateMgr.getCurrentState().getEditLog().logDynamicPartition(info); + } + + /** + * Set replication number for unpartitioned table. + * ATTN: only for unpartitioned table now. + * + * @param db + * @param table + * @param properties + * @throws DdlException + */ + // The caller need to hold the db write lock + public void modifyTableReplicationNum(Database db, OlapTable table, Map properties) + throws DdlException { + if (GlobalStateMgr.getCurrentState().getColocateTableIndex().isColocateTable(table.getId())) { + throw new DdlException("table " + table.getName() + " is colocate table, cannot change replicationNum"); + } + + String defaultReplicationNumName = "default." + PropertyAnalyzer.PROPERTIES_REPLICATION_NUM; + PartitionInfo partitionInfo = table.getPartitionInfo(); + if (partitionInfo.isRangePartition()) { + throw new DdlException( + "This is a range partitioned table, you should specify partitions with MODIFY PARTITION clause." + + " If you want to set default replication number, please use '" + defaultReplicationNumName + + "' instead of '" + PropertyAnalyzer.PROPERTIES_REPLICATION_NUM + "' to escape misleading."); + } + + // unpartitioned table + // update partition replication num + String partitionName = table.getName(); + Partition partition = table.getPartition(partitionName); + if (partition == null) { + throw new DdlException("Partition does not exist. name: " + partitionName); + } + + short replicationNum = Short.parseShort(properties.get(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM)); + boolean isInMemory = partitionInfo.getIsInMemory(partition.getId()); + DataProperty newDataProperty = partitionInfo.getDataProperty(partition.getId()); + partitionInfo.setReplicationNum(partition.getId(), replicationNum); + + // update table default replication num + table.setReplicationNum(replicationNum); + + // log + ModifyPartitionInfo info = new ModifyPartitionInfo(db.getId(), table.getId(), partition.getId(), + newDataProperty, replicationNum, isInMemory); + GlobalStateMgr.getCurrentState().getLocalMetastore().modifyPartition(info); + LOG.info("modify partition[{}-{}-{}] replication num to {}", db.getOriginName(), table.getName(), + partition.getName(), replicationNum); + } + + /** + * Set default replication number for a specified table. + * You can see the default replication number by Show Create Table stmt. + * + * @param db + * @param table + * @param properties + */ + // The caller need to hold the db write lock + public void modifyTableDefaultReplicationNum(Database db, OlapTable table, Map properties) + throws DdlException { + Locker locker = new Locker(); + Preconditions.checkArgument(locker.isDbWriteLockHeldByCurrentThread(db)); + if (GlobalStateMgr.getCurrentState().getColocateTableIndex().isColocateTable(table.getId())) { + throw new DdlException("table " + table.getName() + " is colocate table, cannot change replicationNum"); + } + + // check unpartitioned table + PartitionInfo partitionInfo = table.getPartitionInfo(); + Partition partition = null; + boolean isUnpartitionedTable = false; + if (partitionInfo.getType() == PartitionType.UNPARTITIONED) { + isUnpartitionedTable = true; + String partitionName = table.getName(); + partition = table.getPartition(partitionName); + if (partition == null) { + throw new DdlException("Partition does not exist. name: " + partitionName); + } + } + + TableProperty tableProperty = table.getTableProperty(); + if (tableProperty == null) { + tableProperty = new TableProperty(properties); + table.setTableProperty(tableProperty); + } else { + tableProperty.modifyTableProperties(properties); + } + tableProperty.buildReplicationNum(); + + // update partition replication num if this table is unpartitioned table + if (isUnpartitionedTable) { + Preconditions.checkNotNull(partition); + partitionInfo.setReplicationNum(partition.getId(), tableProperty.getReplicationNum()); + } + + // log + ModifyTablePropertyOperationLog info = + new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties); + GlobalStateMgr.getCurrentState().getEditLog().logModifyReplicationNum(info); + LOG.info("modify table[{}] replication num to {}", table.getName(), + properties.get(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM)); + } + + public void alterTableProperties(Database db, OlapTable table, Map properties) + throws DdlException { + Map propertiesToPersist = new HashMap<>(properties); + Map results = validateToBeModifiedProps(properties, table); + + TableProperty tableProperty = table.getTableProperty(); + for (String key : results.keySet()) { + if (propertiesToPersist.containsKey(PropertyAnalyzer.PROPERTIES_PARTITION_LIVE_NUMBER)) { + int partitionLiveNumber = (int) results.get(key); + tableProperty.getProperties().put(PropertyAnalyzer.PROPERTIES_PARTITION_LIVE_NUMBER, + String.valueOf(partitionLiveNumber)); + if (partitionLiveNumber == TableProperty.INVALID) { + GlobalStateMgr.getCurrentState().getDynamicPartitionScheduler().removeTtlPartitionTable(db.getId(), + table.getId()); + } else { + GlobalStateMgr.getCurrentState().getDynamicPartitionScheduler().registerTtlPartitionTable(db.getId(), + table.getId()); + } + tableProperty.setPartitionTTLNumber(partitionLiveNumber); + ModifyTablePropertyOperationLog info = + new ModifyTablePropertyOperationLog(db.getId(), table.getId(), + ImmutableMap.of(key, propertiesToPersist.get(key))); + GlobalStateMgr.getCurrentState().getEditLog().logAlterTableProperties(info); + } + if (propertiesToPersist.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_MEDIUM)) { + DataProperty dataProperty = (DataProperty) results.get(key); + TStorageMedium storageMedium = dataProperty.getStorageMedium(); + table.setStorageMedium(storageMedium); + tableProperty.getProperties() + .put(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TIME, + String.valueOf(dataProperty.getCooldownTimeMs())); + ModifyTablePropertyOperationLog info = + new ModifyTablePropertyOperationLog(db.getId(), table.getId(), + ImmutableMap.of(key, propertiesToPersist.get(key))); + GlobalStateMgr.getCurrentState().getEditLog().logAlterTableProperties(info); + } + if (propertiesToPersist.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TTL)) { + String storageCoolDownTTL = propertiesToPersist.get(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TTL); + tableProperty.getProperties().put(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TTL, storageCoolDownTTL); + tableProperty.buildStorageCoolDownTTL(); + ModifyTablePropertyOperationLog info = + new ModifyTablePropertyOperationLog(db.getId(), table.getId(), + ImmutableMap.of(key, propertiesToPersist.get(key))); + GlobalStateMgr.getCurrentState().getEditLog().logAlterTableProperties(info); + } + if (propertiesToPersist.containsKey(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION)) { + String partitionDuration = propertiesToPersist.get(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION); + tableProperty.getProperties().put(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION, partitionDuration); + tableProperty.buildDataCachePartitionDuration(); + ModifyTablePropertyOperationLog info = + new ModifyTablePropertyOperationLog(db.getId(), table.getId(), + ImmutableMap.of(key, propertiesToPersist.get(key))); + GlobalStateMgr.getCurrentState().getEditLog().logAlterTableProperties(info); + } + if (propertiesToPersist.containsKey(PropertyAnalyzer.PROPERTIES_LABELS_LOCATION)) { + String location = propertiesToPersist.get(PropertyAnalyzer.PROPERTIES_LABELS_LOCATION); + table.setLocation(location); + ModifyTablePropertyOperationLog info = + new ModifyTablePropertyOperationLog(db.getId(), table.getId(), + ImmutableMap.of(key, propertiesToPersist.get(key))); + GlobalStateMgr.getCurrentState().getEditLog().logAlterTableProperties(info); + } + } + } + + private Map validateToBeModifiedProps(Map properties, OlapTable table) throws DdlException { + Map results = Maps.newHashMap(); + if (properties.containsKey(PropertyAnalyzer.PROPERTIES_PARTITION_LIVE_NUMBER)) { + int partitionLiveNumber = PropertyAnalyzer.analyzePartitionLiveNumber(properties, true); + results.put(PropertyAnalyzer.PROPERTIES_PARTITION_LIVE_NUMBER, partitionLiveNumber); + } + if (properties.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_MEDIUM)) { + try { + DataProperty dataProperty = DataProperty.getInferredDefaultDataProperty(); + dataProperty = PropertyAnalyzer.analyzeDataProperty(properties, dataProperty, false); + results.put(PropertyAnalyzer.PROPERTIES_STORAGE_MEDIUM, dataProperty); + } catch (AnalysisException ex) { + throw new RuntimeException(ex.getMessage()); + } + } + if (properties.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TTL)) { + try { + PropertyAnalyzer.analyzeStorageCoolDownTTL(properties, true); + results.put(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TTL, null); + } catch (AnalysisException ex) { + throw new RuntimeException(ex.getMessage()); + } + } + if (properties.containsKey(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION)) { + try { + PropertyAnalyzer.analyzeDataCachePartitionDuration(properties); + results.put(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION, null); + } catch (AnalysisException ex) { + throw new RuntimeException(ex.getMessage()); + } + } + if (properties.containsKey(PropertyAnalyzer.PROPERTIES_LABELS_LOCATION)) { + if (table.getColocateGroup() != null) { + throw new DdlException("Cannot set location for colocate table"); + } + String locations = PropertyAnalyzer.analyzeLocation(properties, true); + results.put(PropertyAnalyzer.PROPERTIES_LABELS_LOCATION, locations); + } + if (!properties.isEmpty()) { + throw new DdlException("Modify failed because unknown properties: " + properties); + } + return results; + } + @Override public Void visitOptimizeClause(OptimizeClause clause, ConnectContext context) { unsupportedException("Not support"); @@ -493,13 +727,43 @@ public Void visitColumnRenameClause(ColumnRenameClause clause, ConnectContext co modifiedColumns.add(clause.getColName()); ErrorReport.wrapWithRuntimeException(() -> schemaChangeHandler.checkModifiedColumWithMaterializedViews((OlapTable) table, modifiedColumns)); - GlobalStateMgr.getCurrentState().getLocalMetastore().renameColumn(db, table, clause); + renameColumn(db, table, clause); } finally { locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(table.getId()), LockType.WRITE); } return null; } + public void renameColumn(Database db, Table table, ColumnRenameClause renameClause) { + if (!(table instanceof OlapTable)) { + throw ErrorReportException.report(ErrorCode.ERR_COLUMN_RENAME_ONLY_FOR_OLAP_TABLE); + } + if (db.isSystemDatabase() || db.isStatisticsDatabase()) { + throw ErrorReportException.report(ErrorCode.ERR_CANNOT_RENAME_COLUMN_IN_INTERNAL_DB, db.getFullName()); + } + OlapTable olapTable = (OlapTable) table; + if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) { + throw ErrorReportException.report(ErrorCode.ERR_CANNOT_RENAME_COLUMN_OF_NOT_NORMAL_TABLE, olapTable.getState()); + } + + String colName = renameClause.getColName(); + String newColName = renameClause.getNewColName(); + + Column column = olapTable.getColumn(colName); + if (column == null) { + throw ErrorReportException.report(ErrorCode.ERR_BAD_FIELD_ERROR, colName, table.getName()); + } + Column currentColumn = olapTable.getColumn(newColName); + if (currentColumn != null) { + throw ErrorReportException.report(ErrorCode.ERR_DUP_FIELDNAME, newColName); + } + olapTable.renameColumn(colName, newColName); + + ColumnRenameInfo columnRenameInfo = new ColumnRenameInfo(db.getId(), table.getId(), colName, newColName); + GlobalStateMgr.getCurrentState().getLocalMetastore().renameColumn(columnRenameInfo); + LOG.info("rename column {} to {}", colName, newColName); + } + @Override public Void visitReorderColumnsClause(ReorderColumnsClause clause, ConnectContext context) { unsupportedException("Not support"); @@ -523,14 +787,49 @@ public Void visitRollupRenameClause(RollupRenameClause clause, ConnectContext co Locker locker = new Locker(); locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(table.getId()), LockType.WRITE); try { - ErrorReport.wrapWithRuntimeException(() -> - GlobalStateMgr.getCurrentState().getLocalMetastore().renameRollup(db, (OlapTable) table, clause)); + ErrorReport.wrapWithRuntimeException(() -> renameRollup(db, (OlapTable) table, clause)); } finally { locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(table.getId()), LockType.WRITE); } return null; } + public void renameRollup(Database db, OlapTable table, RollupRenameClause renameClause) throws DdlException { + if (table.getState() != OlapTable.OlapTableState.NORMAL) { + throw new DdlException("Table[" + table.getName() + "] is under " + table.getState()); + } + + String rollupName = renameClause.getRollupName(); + // check if it is base table name + if (rollupName.equals(table.getName())) { + throw new DdlException("Using ALTER TABLE RENAME to change table name"); + } + + String newRollupName = renameClause.getNewRollupName(); + if (rollupName.equals(newRollupName)) { + throw new DdlException("Same rollup name"); + } + + Map indexNameToIdMap = table.getIndexNameToId(); + if (indexNameToIdMap.get(rollupName) == null) { + throw new DdlException("Rollup index[" + rollupName + "] does not exists"); + } + + // check if name is already used + if (indexNameToIdMap.get(newRollupName) != null) { + throw new DdlException("Rollup name[" + newRollupName + "] is already used"); + } + + long indexId = indexNameToIdMap.remove(rollupName); + indexNameToIdMap.put(newRollupName, indexId); + + // log + TableInfo tableInfo = TableInfo.createForRollupRename(db.getId(), table.getId(), indexId, newRollupName); + GlobalStateMgr.getCurrentState().getLocalMetastore().renameRollup(tableInfo); + + LOG.info("rename rollup[{}] to {}", rollupName, newRollupName); + } + @Override public Void visitCompactionClause(CompactionClause clause, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> @@ -564,7 +863,7 @@ public Void visitAddPartitionClause(AddPartitionClause clause, ConnectContext co DynamicPartitionUtil.checkAlterAllowed((OlapTable) table); } ErrorReport.wrapWithRuntimeException(() -> - GlobalStateMgr.getCurrentState().getLocalMetastore().addPartitions(context, db, table.getName(), clause)); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().addPartitions(context, db, table.getName(), clause)); return null; } @@ -581,7 +880,7 @@ public Void visitDropPartitionClause(DropPartitionClause clause, ConnectContext } ErrorReport.wrapWithRuntimeException(() -> - GlobalStateMgr.getCurrentState().getLocalMetastore().dropPartition(db, table, clause)); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropPartition(db, table, clause)); return null; } @@ -595,14 +894,14 @@ public Void visitTruncatePartitionClause(TruncatePartitionClause clause, Connect ctx.setGlobalStateMgr(GlobalStateMgr.getCurrentState()); ErrorReport.wrapWithRuntimeException(() -> - GlobalStateMgr.getCurrentState().getLocalMetastore().truncateTable(tStmt, ctx)); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().truncateTable(tStmt, ctx)); return null; } @Override public Void visitReplacePartitionClause(ReplacePartitionClause clause, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> - GlobalStateMgr.getCurrentState().getLocalMetastore().replaceTempPartition(db, table.getName(), clause)); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().replaceTempPartition(db, table.getName(), clause)); return null; } @@ -616,7 +915,7 @@ public Void visitPartitionRenameClause(PartitionRenameClause clause, ConnectCont } ErrorReport.wrapWithRuntimeException(() -> - GlobalStateMgr.getCurrentState().getLocalMetastore().renamePartition(db, table, clause)); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().renamePartition(db, table, clause)); } finally { locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(table.getId()), LockType.WRITE); } @@ -777,9 +1076,7 @@ public Void visitAlterViewClause(AlterViewClause alterViewClause, ConnectContext alterViewClause.getInlineViewDef(), alterViewClause.getColumns(), ctx.getSessionVariable().getSqlMode(), alterViewClause.getComment()); - - GlobalStateMgr.getCurrentState().getAlterJobMgr().alterView(alterViewInfo); - GlobalStateMgr.getCurrentState().getEditLog().logModifyViewDef(alterViewInfo); + GlobalStateMgr.getCurrentState().getLocalMetastore().modifyViewDef(alterViewInfo); return null; } } diff --git a/fe/fe-core/src/main/java/com/starrocks/alter/AlterJobMgr.java b/fe/fe-core/src/main/java/com/starrocks/alter/AlterJobMgr.java index 3e9922bf5a0e7b..2910e745f8352e 100644 --- a/fe/fe-core/src/main/java/com/starrocks/alter/AlterJobMgr.java +++ b/fe/fe-core/src/main/java/com/starrocks/alter/AlterJobMgr.java @@ -34,73 +34,21 @@ package com.starrocks.alter; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; -import com.starrocks.authentication.AuthenticationMgr; -import com.starrocks.catalog.BaseTableInfo; -import com.starrocks.catalog.Column; -import com.starrocks.catalog.Database; -import com.starrocks.catalog.MaterializedIndexMeta; -import com.starrocks.catalog.MaterializedView; -import com.starrocks.catalog.OlapTable; -import com.starrocks.catalog.OlapTable.OlapTableState; -import com.starrocks.catalog.PartitionInfo; -import com.starrocks.catalog.PartitionType; -import com.starrocks.catalog.Table; -import com.starrocks.catalog.Table.TableType; -import com.starrocks.catalog.TableProperty; -import com.starrocks.catalog.View; import com.starrocks.common.DdlException; -import com.starrocks.common.ErrorCode; -import com.starrocks.common.ErrorReport; -import com.starrocks.common.InvalidOlapTableStateException; -import com.starrocks.common.MaterializedViewExceptions; -import com.starrocks.common.MetaNotFoundException; -import com.starrocks.common.UserException; -import com.starrocks.common.util.concurrent.lock.AutoCloseableLock; -import com.starrocks.common.util.concurrent.lock.LockType; -import com.starrocks.common.util.concurrent.lock.Locker; -import com.starrocks.persist.AlterMaterializedViewBaseTableInfosLog; -import com.starrocks.persist.AlterMaterializedViewStatusLog; -import com.starrocks.persist.AlterViewInfo; -import com.starrocks.persist.ChangeMaterializedViewRefreshSchemeLog; import com.starrocks.persist.ImageWriter; -import com.starrocks.persist.ModifyPartitionInfo; -import com.starrocks.persist.ModifyTablePropertyOperationLog; -import com.starrocks.persist.RenameMaterializedViewLog; -import com.starrocks.persist.SwapTableOperationLog; import com.starrocks.persist.gson.IForwardCompatibleObject; import com.starrocks.persist.metablock.SRMetaBlockEOFException; import com.starrocks.persist.metablock.SRMetaBlockException; import com.starrocks.persist.metablock.SRMetaBlockID; import com.starrocks.persist.metablock.SRMetaBlockReader; import com.starrocks.persist.metablock.SRMetaBlockWriter; -import com.starrocks.privilege.PrivilegeBuiltinConstants; -import com.starrocks.qe.ConnectContext; -import com.starrocks.scheduler.Task; -import com.starrocks.scheduler.TaskBuilder; -import com.starrocks.server.GlobalStateMgr; -import com.starrocks.server.LocalMetastore; -import com.starrocks.sql.analyzer.Analyzer; -import com.starrocks.sql.analyzer.MaterializedViewAnalyzer; -import com.starrocks.sql.analyzer.SemanticException; -import com.starrocks.sql.ast.AlterMaterializedViewStatusClause; -import com.starrocks.sql.ast.CreateMaterializedViewStatement; -import com.starrocks.sql.ast.DropMaterializedViewStmt; -import com.starrocks.sql.ast.QueryStatement; -import com.starrocks.sql.ast.StatementBase; -import com.starrocks.sql.ast.UserIdentity; -import com.starrocks.sql.optimizer.rule.transformation.materialization.MvUtils; -import com.starrocks.sql.parser.SqlParser; +import com.starrocks.sql.ast.CancelAlterTableStmt; +import com.starrocks.sql.ast.ShowAlterStmt; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import java.io.IOException; -import java.util.Comparator; -import java.util.List; import java.util.Map; -import java.util.Optional; -import java.util.stream.Collectors; public class AlterJobMgr { private static final Logger LOG = LogManager.getLogger(AlterJobMgr.class); @@ -130,408 +78,20 @@ public void stop() { clusterHandler.setStop(); } - public void processDropMaterializedView(DropMaterializedViewStmt stmt) throws DdlException, MetaNotFoundException { - // check db - String dbName = stmt.getDbName(); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbName); - if (db == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); - } - Locker locker = new Locker(); - if (!locker.lockDatabaseAndCheckExist(db, LockType.WRITE)) { - throw new DdlException("drop materialized failed. database:" + db.getFullName() + " not exist"); - } - try { - Table table = null; - boolean hasfindTable = false; - for (Table t : GlobalStateMgr.getCurrentState().getLocalMetastore().getTables(db.getId())) { - if (t instanceof OlapTable) { - OlapTable olapTable = (OlapTable) t; - for (MaterializedIndexMeta mvMeta : olapTable.getVisibleIndexMetas()) { - String indexName = olapTable.getIndexNameById(mvMeta.getIndexId()); - if (indexName == null) { - LOG.warn("OlapTable {} miss index {}", olapTable.getName(), mvMeta.getIndexId()); - continue; - } - if (indexName.equals(stmt.getMvName())) { - table = olapTable; - hasfindTable = true; - break; - } - } - if (hasfindTable) { - break; - } - } - } - if (table == null) { - throw new MetaNotFoundException("Materialized view " + stmt.getMvName() + " is not found"); - } - // check table type - if (table.getType() != TableType.OLAP) { - throw new DdlException( - "Do not support non-OLAP table [" + table.getName() + "] when drop materialized view"); - } - // check table state - OlapTable olapTable = (OlapTable) table; - if (olapTable.getState() != OlapTableState.NORMAL) { - throw InvalidOlapTableStateException.of(olapTable.getState(), olapTable.getName()); - } - // drop materialized view - materializedViewHandler.processDropMaterializedView(stmt, db, olapTable); - - } catch (MetaNotFoundException e) { - if (stmt.isSetIfExists()) { - LOG.info(e.getMessage()); - } else { - throw e; - } - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } - } - - public void alterMaterializedViewStatus(MaterializedView materializedView, String status, boolean isReplay) { - LOG.info("process change materialized view {} status to {}, isReplay: {}", - materializedView.getName(), status, isReplay); - if (AlterMaterializedViewStatusClause.ACTIVE.equalsIgnoreCase(status)) { - ConnectContext context = new ConnectContext(); - context.setGlobalStateMgr(GlobalStateMgr.getCurrentState()); - context.setQualifiedUser(AuthenticationMgr.ROOT_USER); - context.setCurrentUserIdentity(UserIdentity.ROOT); - context.setCurrentRoleIds(Sets.newHashSet(PrivilegeBuiltinConstants.ROOT_ROLE_ID)); - - String createMvSql = materializedView.getMaterializedViewDdlStmt(false, isReplay); - QueryStatement mvQueryStatement = null; - try { - mvQueryStatement = recreateMVQuery(materializedView, context, createMvSql); - } catch (SemanticException e) { - throw new SemanticException("Can not active materialized view [%s]" + - " because analyze materialized view define sql: \n\n%s" + - "\n\nCause an error: %s", materializedView.getName(), createMvSql, e.getDetailMsg()); - } - - // Skip checks to maintain eventual consistency when replay - List baseTableInfos = - Lists.newArrayList(MaterializedViewAnalyzer.getBaseTableInfos(mvQueryStatement, !isReplay)); - materializedView.setBaseTableInfos(baseTableInfos); - materializedView.getRefreshScheme().getAsyncRefreshContext().clearVisibleVersionMap(); - materializedView.onReload(); - materializedView.setActive(); - } else if (AlterMaterializedViewStatusClause.INACTIVE.equalsIgnoreCase(status)) { - materializedView.setInactiveAndReason(MANUAL_INACTIVE_MV_REASON); - } - } - /* - * Recreate the MV query and validate the correctness of syntax and schema + * used for handling CancelAlterStmt (for client is the CANCEL ALTER + * command). including SchemaChangeHandler and RollupHandler */ - public static QueryStatement recreateMVQuery(MaterializedView materializedView, - ConnectContext context, - String createMvSql) { - // If we could parse the MV sql successfully, and the schema of mv does not change, - // we could reuse the existing MV - Optional mayDb = GlobalStateMgr.getCurrentState().getLocalMetastore().mayGetDb(materializedView.getDbId()); - - // check database existing - String dbName = mayDb.orElseThrow(() -> - new SemanticException("database " + materializedView.getDbId() + " not exists")).getFullName(); - context.setDatabase(dbName); - - // Try to parse and analyze the creation sql - List statementBaseList = SqlParser.parse(createMvSql, context.getSessionVariable()); - CreateMaterializedViewStatement createStmt = (CreateMaterializedViewStatement) statementBaseList.get(0); - Analyzer.analyze(createStmt, context); - - // validate the schema - List newColumns = createStmt.getMvColumnItems().stream() - .sorted(Comparator.comparing(Column::getName)) - .collect(Collectors.toList()); - List existedColumns = materializedView.getColumns().stream() - .sorted(Comparator.comparing(Column::getName)) - .collect(Collectors.toList()); - if (newColumns.size() != existedColumns.size()) { - throw new SemanticException(String.format("number of columns changed: %d != %d", - existedColumns.size(), newColumns.size())); - } - for (int i = 0; i < existedColumns.size(); i++) { - Column existed = existedColumns.get(i); - Column created = newColumns.get(i); - if (!existed.isSchemaCompatible(created)) { - String message = MaterializedViewExceptions.inactiveReasonForColumnNotCompatible( - existed.toString(), created.toString()); - materializedView.setInactiveAndReason(message); - throw new SemanticException(message); - } - } - - return createStmt.getQueryStatement(); - } - - public void replayAlterMaterializedViewBaseTableInfos(AlterMaterializedViewBaseTableInfosLog log) { - long dbId = log.getDbId(); - long mvId = log.getMvId(); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); - MaterializedView mv = (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getId(), mvId); - if (mv == null) { - return; - } - - Locker locker = new Locker(); - locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(mv.getId()), LockType.WRITE); - try { - mv.replayAlterMaterializedViewBaseTableInfos(log); - } catch (Throwable e) { - LOG.warn("replay alter materialized-view status failed: {}", mv.getName(), e); - mv.setInactiveAndReason("replay alter status failed: " + e.getMessage()); - } finally { - locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(mv.getId()), LockType.WRITE); - } - } - - public void replayAlterMaterializedViewStatus(AlterMaterializedViewStatusLog log) { - long dbId = log.getDbId(); - long tableId = log.getTableId(); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); - MaterializedView mv = (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getId(), tableId); - if (mv == null) { - return; - } - - Locker locker = new Locker(); - locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(mv.getId()), LockType.WRITE); - try { - alterMaterializedViewStatus(mv, log.getStatus(), true); - } catch (Throwable e) { - LOG.warn("replay alter materialized-view status failed: {}", mv.getName(), e); - mv.setInactiveAndReason("replay alter status failed: " + e.getMessage()); - } finally { - locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(mv.getId()), LockType.WRITE); - } - } - - public void replayRenameMaterializedView(RenameMaterializedViewLog log) { - long dbId = log.getDbId(); - long materializedViewId = log.getId(); - String newMaterializedViewName = log.getNewMaterializedViewName(); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); - MaterializedView oldMaterializedView = (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getId(), materializedViewId); - if (oldMaterializedView != null) { - try (AutoCloseableLock ignore = new AutoCloseableLock(new Locker(), db.getId(), - Lists.newArrayList(oldMaterializedView.getId()), LockType.WRITE)) { - db.dropTable(oldMaterializedView.getName()); - oldMaterializedView.setName(newMaterializedViewName); - db.registerTableUnlocked(oldMaterializedView); - updateTaskDefinition(oldMaterializedView); - LOG.info("Replay rename materialized view [{}] to {}, id: {}", oldMaterializedView.getName(), - newMaterializedViewName, oldMaterializedView.getId()); - } catch (Throwable e) { - oldMaterializedView.setInactiveAndReason("replay rename failed: " + e.getMessage()); - LOG.warn("replay rename materialized-view failed: {}", oldMaterializedView.getName(), e); - } - } - } - - private void updateTaskDefinition(MaterializedView materializedView) { - Task currentTask = GlobalStateMgr.getCurrentState().getTaskManager().getTask( - TaskBuilder.getMvTaskName(materializedView.getId())); - if (currentTask != null) { - currentTask.setDefinition(materializedView.getTaskDefinition()); - currentTask.setPostRun(TaskBuilder.getAnalyzeMVStmt(materializedView.getName())); - } - } - - public void replayChangeMaterializedViewRefreshScheme(ChangeMaterializedViewRefreshSchemeLog log) { - long dbId = log.getDbId(); - long id = log.getId(); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); - if (db == null) { - return; - } - - MaterializedView oldMaterializedView = (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getId(), id); - if (oldMaterializedView == null) { - LOG.warn("Ignore change materialized view refresh scheme log because table:" + id + "is null"); - return; - } - - Locker locker = new Locker(); - locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(oldMaterializedView.getId()), LockType.WRITE); - try { - final MaterializedView.MvRefreshScheme newMvRefreshScheme = new MaterializedView.MvRefreshScheme(); - final MaterializedView.MvRefreshScheme oldRefreshScheme = oldMaterializedView.getRefreshScheme(); - newMvRefreshScheme.setAsyncRefreshContext(oldRefreshScheme.getAsyncRefreshContext()); - newMvRefreshScheme.setLastRefreshTime(oldRefreshScheme.getLastRefreshTime()); - final MaterializedView.RefreshType refreshType = log.getRefreshType(); - final MaterializedView.AsyncRefreshContext asyncRefreshContext = log.getAsyncRefreshContext(); - newMvRefreshScheme.setType(refreshType); - newMvRefreshScheme.setAsyncRefreshContext(asyncRefreshContext); - - long maxChangedTableRefreshTime = - MvUtils.getMaxTablePartitionInfoRefreshTime( - log.getAsyncRefreshContext().getBaseTableVisibleVersionMap().values()); - newMvRefreshScheme.setLastRefreshTime(maxChangedTableRefreshTime); - - oldMaterializedView.setRefreshScheme(newMvRefreshScheme); - LOG.info( - "Replay materialized view [{}]'s refresh type to {}, start time to {}, " + - "interval step to {}, timeunit to {}, id: {}, maxChangedTableRefreshTime:{}", - oldMaterializedView.getName(), refreshType.name(), asyncRefreshContext.getStartTime(), - asyncRefreshContext.getStep(), - asyncRefreshContext.getTimeUnit(), oldMaterializedView.getId(), maxChangedTableRefreshTime); - } catch (Throwable e) { - oldMaterializedView.setInactiveAndReason("replay failed: " + e.getMessage()); - LOG.warn("replay change materialized-view refresh scheme failed: {}", - oldMaterializedView.getName(), e); - } finally { - locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(oldMaterializedView.getId()), LockType.WRITE); - } - } - - public void replayAlterMaterializedViewProperties(short opCode, ModifyTablePropertyOperationLog log) { - long dbId = log.getDbId(); - long tableId = log.getTableId(); - Map properties = log.getProperties(); - - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); - MaterializedView mv = (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getId(), tableId); - if (mv == null) { - LOG.warn("Ignore change materialized view properties og because table:" + tableId + "is null"); - return; - } - - Locker locker = new Locker(); - locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(mv.getId()), LockType.WRITE); - try { - TableProperty tableProperty = mv.getTableProperty(); - if (tableProperty == null) { - tableProperty = new TableProperty(properties); - mv.setTableProperty(tableProperty.buildProperty(opCode)); - } else { - tableProperty.modifyTableProperties(properties); - tableProperty.buildProperty(opCode); - } - } catch (Throwable e) { - mv.setInactiveAndReason("replay failed: " + e.getMessage()); - LOG.warn("replay alter materialized-view properties failed: {}", mv.getName(), e); - } finally { - locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(mv.getId()), LockType.WRITE); - } - } - - public void replaySwapTable(SwapTableOperationLog log) { - try { - swapTableInternal(log); - } catch (DdlException e) { - LOG.warn("should not happen", e); - } - long dbId = log.getDbId(); - long origTblId = log.getOrigTblId(); - long newTblId = log.getNewTblId(); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); - OlapTable origTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getId(), origTblId); - OlapTable newTbl = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getId(), newTblId); - LOG.debug("finish replay swap table {}-{} with table {}-{}", origTblId, origTable.getName(), newTblId, - newTbl.getName()); - } - - /** - * The swap table operation works as follow: - * For example, SWAP TABLE A WITH TABLE B. - * must pre check A can be renamed to B and B can be renamed to A - */ - public void swapTableInternal(SwapTableOperationLog log) throws DdlException { - long dbId = log.getDbId(); - long origTblId = log.getOrigTblId(); - long newTblId = log.getNewTblId(); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); - OlapTable origTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getId(), origTblId); - OlapTable newTbl = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getId(), newTblId); - - String origTblName = origTable.getName(); - String newTblName = newTbl.getName(); - - // drop origin table and new table - db.dropTable(origTblName); - db.dropTable(newTblName); - - // rename new table name to origin table name and add it to database - newTbl.checkAndSetName(origTblName, false); - db.registerTableUnlocked(newTbl); - - // rename origin table name to new table name and add it to database - origTable.checkAndSetName(newTblName, false); - db.registerTableUnlocked(origTable); - - // swap dependencies of base table - if (origTable.isMaterializedView()) { - MaterializedView oldMv = (MaterializedView) origTable; - MaterializedView newMv = (MaterializedView) newTbl; - updateTaskDefinition(oldMv); - updateTaskDefinition(newMv); - } - } - - public void alterView(AlterViewInfo alterViewInfo) { - long dbId = alterViewInfo.getDbId(); - long tableId = alterViewInfo.getTableId(); - String inlineViewDef = alterViewInfo.getInlineViewDef(); - List newFullSchema = alterViewInfo.getNewFullSchema(); - String comment = alterViewInfo.getComment(); - - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); - View view = (View) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getId(), tableId); - - Locker locker = new Locker(); - locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(view.getId()), LockType.WRITE); - try { - String viewName = view.getName(); - view.setInlineViewDefWithSqlMode(inlineViewDef, alterViewInfo.getSqlMode()); - try { - view.init(); - } catch (UserException e) { - throw new AlterJobException("failed to init view stmt", e); - } - view.setNewFullSchema(newFullSchema); - view.setComment(comment); - LocalMetastore.inactiveRelatedMaterializedView(db, view, - MaterializedViewExceptions.inactiveReasonForBaseViewChanged(viewName)); - db.dropTable(viewName); - db.registerTableUnlocked(view); - - LOG.info("replay modify view[{}] definition to {}", viewName, inlineViewDef); - } finally { - locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(view.getId()), LockType.WRITE); - } - } - - public void replayModifyPartition(ModifyPartitionInfo info) { - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(info.getDbId()); - OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getId(), info.getTableId()); - - Locker locker = new Locker(); - locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(olapTable.getId()), LockType.WRITE); - try { - PartitionInfo partitionInfo = olapTable.getPartitionInfo(); - if (info.getDataProperty() != null) { - partitionInfo.setDataProperty(info.getPartitionId(), info.getDataProperty()); - } - if (info.getReplicationNum() != (short) -1) { - short replicationNum = info.getReplicationNum(); - partitionInfo.setReplicationNum(info.getPartitionId(), replicationNum); - // update default replication num if this table is unpartitioned table - if (partitionInfo.getType() == PartitionType.UNPARTITIONED) { - olapTable.setReplicationNum(replicationNum); - } - } - partitionInfo.setIsInMemory(info.getPartitionId(), info.isInMemory()); - } finally { - locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(olapTable.getId()), LockType.WRITE); + public void cancelAlter(CancelAlterTableStmt stmt, String reason) throws DdlException { + if (stmt.getAlterType() == ShowAlterStmt.AlterType.ROLLUP) { + materializedViewHandler.cancel(stmt, reason); + } else if (stmt.getAlterType() == ShowAlterStmt.AlterType.COLUMN + || stmt.getAlterType() == ShowAlterStmt.AlterType.OPTIMIZE) { + schemaChangeHandler.cancel(stmt, reason); + } else if (stmt.getAlterType() == ShowAlterStmt.AlterType.MATERIALIZED_VIEW) { + materializedViewHandler.cancelMV(stmt); + } else { + throw new DdlException("Cancel " + stmt.getAlterType() + " does not implement yet"); } } diff --git a/fe/fe-core/src/main/java/com/starrocks/alter/AlterMVJobExecutor.java b/fe/fe-core/src/main/java/com/starrocks/alter/AlterMVJobExecutor.java index fa9e6f2b6d70fa..1263b0283fdcd1 100644 --- a/fe/fe-core/src/main/java/com/starrocks/alter/AlterMVJobExecutor.java +++ b/fe/fe-core/src/main/java/com/starrocks/alter/AlterMVJobExecutor.java @@ -82,7 +82,7 @@ public Void visitTableRenameClause(TableRenameClause clause, ConnectContext cont final RenameMaterializedViewLog renameMaterializedViewLog = new RenameMaterializedViewLog(table.getId(), db.getId(), newMvName); updateTaskDefinition((MaterializedView) table); - GlobalStateMgr.getCurrentState().getEditLog().logMvRename(renameMaterializedViewLog); + GlobalStateMgr.getCurrentState().getLocalMetastore().renameMaterializedView(renameMaterializedViewLog); LOG.info("rename materialized view[{}] to {}, id: {}", oldMvName, newMvName, table.getId()); return null; } @@ -307,7 +307,7 @@ public Void visitModifyTablePropertiesClause(ModifyTablePropertiesClause modifyT if (isChanged) { ModifyTablePropertyOperationLog log = new ModifyTablePropertyOperationLog(materializedView.getDbId(), materializedView.getId(), propClone); - GlobalStateMgr.getCurrentState().getEditLog().logAlterMaterializedViewProperties(log); + GlobalStateMgr.getCurrentState().getLocalMetastore().alterMaterializedViewProperties(log); } LOG.info("alter materialized view properties {}, id: {}", propClone, materializedView.getId()); return null; @@ -378,7 +378,7 @@ public Void visitRefreshSchemeClause(RefreshSchemeClause refreshSchemeDesc, Conn } final ChangeMaterializedViewRefreshSchemeLog log = new ChangeMaterializedViewRefreshSchemeLog(materializedView); - GlobalStateMgr.getCurrentState().getEditLog().logMvChangeRefreshScheme(log); + GlobalStateMgr.getCurrentState().getLocalMetastore().changeMaterializedRefreshScheme(log); } finally { locker.unLockDatabase(db.getId(), LockType.WRITE); } @@ -403,11 +403,11 @@ public Void visitAlterMaterializedViewStatusClause(AlterMaterializedViewStatusCl return null; } - GlobalStateMgr.getCurrentState().getAlterJobMgr(). - alterMaterializedViewStatus(materializedView, status, false); + GlobalStateMgr.getCurrentState().getLocalMetastore() + .alterMaterializedViewStatus(materializedView, status, false); // for manual refresh type, do not refresh if (materializedView.getRefreshScheme().getType() != MaterializedView.RefreshType.MANUAL) { - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .refreshMaterializedView(dbName, materializedView.getName(), true, null, Constants.TaskRunPriority.NORMAL.value(), true, false); } @@ -418,14 +418,14 @@ public Void visitAlterMaterializedViewStatusClause(AlterMaterializedViewStatusCl LOG.warn("Setting the materialized view {}({}) to inactive because " + "user use alter materialized view set status to inactive", materializedView.getName(), materializedView.getId()); - GlobalStateMgr.getCurrentState().getAlterJobMgr(). - alterMaterializedViewStatus(materializedView, status, false); + GlobalStateMgr.getCurrentState().getLocalMetastore() + .alterMaterializedViewStatus(materializedView, status, false); } else { throw new AlterJobException("Unsupported modification materialized view status:" + status); } AlterMaterializedViewStatusLog log = new AlterMaterializedViewStatusLog(materializedView.getDbId(), materializedView.getId(), status); - GlobalStateMgr.getCurrentState().getEditLog().logAlterMvStatus(log); + GlobalStateMgr.getCurrentState().getLocalMetastore().alterMvStatus(log); return null; } catch (DdlException | MetaNotFoundException e) { throw new AlterJobException(e.getMessage(), e); diff --git a/fe/fe-core/src/main/java/com/starrocks/alter/CompactionHandler.java b/fe/fe-core/src/main/java/com/starrocks/alter/CompactionHandler.java index 64ec32d45919ae..f48f7524ffd457 100644 --- a/fe/fe-core/src/main/java/com/starrocks/alter/CompactionHandler.java +++ b/fe/fe-core/src/main/java/com/starrocks/alter/CompactionHandler.java @@ -30,6 +30,7 @@ import com.starrocks.lake.compaction.PartitionIdentifier; import com.starrocks.qe.ShowResultSet; import com.starrocks.server.GlobalStateMgr; +import com.starrocks.server.LocalMetastore; import com.starrocks.server.RunMode; import com.starrocks.sql.ast.AlterClause; import com.starrocks.sql.ast.CompactionClause; @@ -47,7 +48,7 @@ import java.util.ArrayList; import java.util.List; -public class CompactionHandler { +public class CompactionHandler { private static final Logger LOG = LogManager.getLogger(CompactionHandler.class); // add synchronized to avoid process 2 or more stmts at same time @@ -82,11 +83,16 @@ public static synchronized ShowResultSet process(List alterClauses, locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(olapTable.getId()), LockType.READ); try { List allPartitions = findAllPartitions(olapTable, compactionClause); + + LocalMetastore localMetastore = GlobalStateMgr.getCurrentState().getLocalMetastore(); for (Partition partition : allPartitions) { - for (PhysicalPartition physicalPartition : partition.getSubPartitions()) { - for (MaterializedIndex index : physicalPartition.getMaterializedIndices( - MaterializedIndex.IndexExtState.VISIBLE)) { - for (Tablet tablet : index.getTablets()) { + List physicalPartitionList = localMetastore.getAllPhysicalPartition(partition); + for (PhysicalPartition physicalPartition : physicalPartitionList) { + List materializedIndices = localMetastore + .getMaterializedIndices(physicalPartition, MaterializedIndex.IndexExtState.VISIBLE); + for (MaterializedIndex materializedIndex : materializedIndices) { + List tabletList = localMetastore.getAllTablets(materializedIndex); + for (Tablet tablet : tabletList) { for (Long backendId : ((LocalTablet) tablet).getBackendIds()) { backendToTablets.put(backendId, tablet.getId()); } diff --git a/fe/fe-core/src/main/java/com/starrocks/alter/LakeTableAlterJobV2Builder.java b/fe/fe-core/src/main/java/com/starrocks/alter/LakeTableAlterJobV2Builder.java index fecad154bd0c4a..7995b1fd71fc69 100644 --- a/fe/fe-core/src/main/java/com/starrocks/alter/LakeTableAlterJobV2Builder.java +++ b/fe/fe-core/src/main/java/com/starrocks/alter/LakeTableAlterJobV2Builder.java @@ -75,7 +75,11 @@ public AlterJobV2 build() throws UserException { long partitionId = partition.getParentId(); long physicalPartitionId = partition.getId(); long shardGroupId = partition.getShardGroupId(); - List originTablets = partition.getIndex(originIndexId).getTablets(); + + MaterializedIndex materializedIndex = partition.getIndex(originIndexId); + List originTablets = GlobalStateMgr.getCurrentState().getLocalMetastore() + .getAllTablets(materializedIndex); + // TODO: It is not good enough to create shards into the same group id, schema change PR needs to // revise the code again. List originTabletIds = originTablets.stream().map(Tablet::getId).collect(Collectors.toList()); diff --git a/fe/fe-core/src/main/java/com/starrocks/alter/LakeTableAlterMetaJobBase.java b/fe/fe-core/src/main/java/com/starrocks/alter/LakeTableAlterMetaJobBase.java index 8eb3a5e0a4b7c4..a23f8fb1a7b276 100644 --- a/fe/fe-core/src/main/java/com/starrocks/alter/LakeTableAlterMetaJobBase.java +++ b/fe/fe-core/src/main/java/com/starrocks/alter/LakeTableAlterMetaJobBase.java @@ -263,7 +263,8 @@ boolean publishVersion() { long commitVersion = commitVersionMap.get(partitionId); Map dirtyIndexMap = physicalPartitionIndexMap.row(partitionId); for (MaterializedIndex index : dirtyIndexMap.values()) { - Utils.publishVersion(index.getTablets(), txnInfo, commitVersion - 1, commitVersion, + List tabletList = GlobalStateMgr.getCurrentState().getLocalMetastore().getAllTablets(index); + Utils.publishVersion(tabletList, txnInfo, commitVersion - 1, commitVersion, warehouseId); } } @@ -305,7 +306,7 @@ public void updatePhysicalPartitionTabletMeta(Database db, OlapTable table, Part locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(table.getId()), LockType.READ); } for (MaterializedIndex index : indexList) { - updateIndexTabletMeta(db, table, partition, index); + updateIndexTabletMeta(db, table, partition.getDefaultPhysicalPartition(), index); } } @@ -319,7 +320,7 @@ public void updateIndexTabletMeta(Database db, OlapTable table, PhysicalPartitio Locker locker = new Locker(); locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(table.getId()), LockType.READ); try { - tablets = new ArrayList<>(index.getTablets()); + tablets = GlobalStateMgr.getCurrentState().getLocalMetastore().getAllTablets(index); } finally { locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(table.getId()), LockType.READ); } diff --git a/fe/fe-core/src/main/java/com/starrocks/alter/LakeTableSchemaChangeJob.java b/fe/fe-core/src/main/java/com/starrocks/alter/LakeTableSchemaChangeJob.java index 72e11235cf4e2a..25b15fd24bd7b1 100644 --- a/fe/fe-core/src/main/java/com/starrocks/alter/LakeTableSchemaChangeJob.java +++ b/fe/fe-core/src/main/java/com/starrocks/alter/LakeTableSchemaChangeJob.java @@ -90,7 +90,6 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; import javax.annotation.Nullable; import javax.validation.constraints.NotNull; @@ -338,7 +337,9 @@ protected void runPendingJob() throws AlterCancelException { if (enableTabletCreationOptimization) { numTablets = physicalPartitionIndexMap.size(); } else { - numTablets = physicalPartitionIndexMap.values().stream().map(MaterializedIndex::getTablets) + numTablets = physicalPartitionIndexMap.values().stream() + .map(materializedIndex -> GlobalStateMgr.getCurrentState().getLocalMetastore() + .getAllTablets(materializedIndex)) .mapToLong(List::size).sum(); } countDownLatch = new MarkedCountDownLatch<>((int) numTablets); @@ -373,7 +374,7 @@ protected void runPendingJob() throws AlterCancelException { .build().toTabletSchema(); boolean createSchemaFile = true; - for (Tablet shadowTablet : shadowIdx.getTablets()) { + for (Tablet shadowTablet : GlobalStateMgr.getCurrentState().getLocalMetastore().getAllTablets(shadowIdx)) { long shadowTabletId = shadowTablet.getId(); ComputeNode computeNode = GlobalStateMgr.getCurrentState().getWarehouseMgr() .getComputeNodeAssignedToTablet(warehouseId, (LakeTablet) shadowTablet); @@ -418,7 +419,7 @@ protected void runPendingJob() throws AlterCancelException { } sendAgentTaskAndWait(batchTask, countDownLatch, Config.tablet_create_timeout_second * numTablets, - waitingCreatingReplica, isCancelling); + waitingCreatingReplica, isCancelling); // Add shadow indexes to table. try (WriteLockedDatabase db = getWriteLockedDatabase(dbId)) { @@ -483,7 +484,7 @@ protected void runWaitingTxnJob() throws AlterCancelException { for (Map.Entry entry : shadowIndexMap.entrySet()) { long shadowIdxId = entry.getKey(); MaterializedIndex shadowIdx = entry.getValue(); - for (Tablet shadowTablet : shadowIdx.getTablets()) { + for (Tablet shadowTablet : GlobalStateMgr.getCurrentState().getLocalMetastore().getAllTablets(shadowIdx)) { ComputeNode computeNode = GlobalStateMgr.getCurrentState().getWarehouseMgr() .getComputeNodeAssignedToTablet(warehouseId, (LakeTablet) shadowTablet); if (computeNode == null) { @@ -610,7 +611,7 @@ protected void runFinishedRewritingJob() throws AlterCancelException { // Delete tablet and shards for (MaterializedIndex droppedIndex : droppedIndexes) { - List shards = droppedIndex.getTablets().stream().map(Tablet::getId).collect(Collectors.toList()); + List shards = GlobalStateMgr.getCurrentState().getLocalMetastore().getAllTabletIDs(droppedIndex); // TODO: what if unusedShards deletion is partially successful? StarMgrMetaSyncer.dropTabletAndDeleteShard(shards, GlobalStateMgr.getCurrentState().getStarOSAgent()); } @@ -668,7 +669,9 @@ boolean publishVersion() { long commitVersion = commitVersionMap.get(partitionId); Map shadowIndexMap = physicalPartitionIndexMap.row(partitionId); for (MaterializedIndex shadowIndex : shadowIndexMap.values()) { - Utils.publishVersion(shadowIndex.getTablets(), txnInfo, 1, commitVersion, warehouseId); + Utils.publishVersion( + GlobalStateMgr.getCurrentState().getLocalMetastore().getAllTablets(shadowIndex), + txnInfo, 1, commitVersion, warehouseId); } } return true; @@ -700,7 +703,7 @@ private void inactiveRelatedMv(Set modifiedColumns, @NotNull OlapTable t Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); for (MvId mvId : tbl.getRelatedMaterializedViews()) { MaterializedView mv = (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getId(), mvId.getId()); + .getTable(db.getId(), mvId.getId()); if (mv == null) { LOG.warn("Ignore materialized view {} does not exists", mvId); continue; diff --git a/fe/fe-core/src/main/java/com/starrocks/alter/MaterializedViewHandler.java b/fe/fe-core/src/main/java/com/starrocks/alter/MaterializedViewHandler.java index d828f8db9fd4c8..5adf90276f6463 100644 --- a/fe/fe-core/src/main/java/com/starrocks/alter/MaterializedViewHandler.java +++ b/fe/fe-core/src/main/java/com/starrocks/alter/MaterializedViewHandler.java @@ -46,6 +46,7 @@ import com.starrocks.catalog.LocalTablet; import com.starrocks.catalog.MaterializedIndex; import com.starrocks.catalog.MaterializedIndex.IndexState; +import com.starrocks.catalog.MaterializedIndexMeta; import com.starrocks.catalog.OlapTable; import com.starrocks.catalog.OlapTable.OlapTableState; import com.starrocks.catalog.Partition; @@ -61,16 +62,15 @@ import com.starrocks.common.ErrorCode; import com.starrocks.common.ErrorReport; import com.starrocks.common.FeConstants; +import com.starrocks.common.InvalidOlapTableStateException; import com.starrocks.common.MetaNotFoundException; import com.starrocks.common.util.ListComparator; import com.starrocks.common.util.PropertyAnalyzer; import com.starrocks.common.util.Util; -import com.starrocks.common.util.concurrent.lock.AutoCloseableLock; import com.starrocks.common.util.concurrent.lock.LockType; import com.starrocks.common.util.concurrent.lock.Locker; import com.starrocks.persist.BatchDropInfo; import com.starrocks.persist.DropInfo; -import com.starrocks.persist.EditLog; import com.starrocks.qe.OriginStatement; import com.starrocks.qe.ShowResultSet; import com.starrocks.server.GlobalStateMgr; @@ -285,7 +285,7 @@ public void processBatchAddRollup(List alterClauses, Database db, O TabletInvertedIndex tabletInvertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); for (RollupJobV2 rollupJobV2 : rollupNameJobMap.values()) { for (MaterializedIndex index : rollupJobV2.getPartitionIdToRollupIndex().values()) { - for (Tablet tablet : index.getTablets()) { + for (Tablet tablet : GlobalStateMgr.getCurrentState().getLocalMetastore().getAllTablets(index)) { tabletInvertedIndex.deleteTablet(tablet.getId()); } } @@ -459,7 +459,7 @@ private List checkAndPrepareMaterializedView(CreateMaterializedViewStmt // check if mv index already exists in db - if (GlobalStateMgr.getCurrentState().getLocalMetastore().mayGetTable(db.getFullName(), mvName).isPresent()) { + if (GlobalStateMgr.getCurrentState().getStarRocksMetadata().mayGetTable(db.getFullName(), mvName).isPresent()) { throw new DdlException("Table [" + mvName + "] already exists in the db " + db.getFullName()); } @@ -753,14 +753,75 @@ public void processBatchDropRollup(List dropRollupClauses, Database } // batch log drop rollup operation - EditLog editLog = GlobalStateMgr.getCurrentState().getEditLog(); long dbId = db.getId(); long tableId = olapTable.getId(); - editLog.logBatchDropRollup(new BatchDropInfo(dbId, tableId, indexIdSet)); + GlobalStateMgr.getCurrentState().getLocalMetastore() + .batchDropRollup(new BatchDropInfo(dbId, tableId, indexIdSet)); LOG.info("finished drop rollup index[{}] in table[{}]", String.join("", rollupNameSet), olapTable.getName()); } + public void processDropMaterializedView(DropMaterializedViewStmt stmt) throws DdlException, MetaNotFoundException { + // check db + String dbName = stmt.getDbName(); + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbName); + if (db == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); + } + Locker locker = new Locker(); + if (!locker.lockDatabaseAndCheckExist(db, LockType.WRITE)) { + throw new DdlException("drop materialized failed. database:" + db.getFullName() + " not exist"); + } + try { + Table table = null; + boolean hasfindTable = false; + for (Table t : GlobalStateMgr.getCurrentState().getLocalMetastore().getTables(db.getId())) { + if (t instanceof OlapTable) { + OlapTable olapTable = (OlapTable) t; + for (MaterializedIndexMeta mvMeta : olapTable.getVisibleIndexMetas()) { + String indexName = olapTable.getIndexNameById(mvMeta.getIndexId()); + if (indexName == null) { + LOG.warn("OlapTable {} miss index {}", olapTable.getName(), mvMeta.getIndexId()); + continue; + } + if (indexName.equals(stmt.getMvName())) { + table = olapTable; + hasfindTable = true; + break; + } + } + if (hasfindTable) { + break; + } + } + } + if (table == null) { + throw new MetaNotFoundException("Materialized view " + stmt.getMvName() + " is not found"); + } + // check table type + if (table.getType() != Table.TableType.OLAP) { + throw new DdlException( + "Do not support non-OLAP table [" + table.getName() + "] when drop materialized view"); + } + // check table state + OlapTable olapTable = (OlapTable) table; + if (olapTable.getState() != OlapTableState.NORMAL) { + throw InvalidOlapTableStateException.of(olapTable.getState(), olapTable.getName()); + } + // drop materialized view + processDropMaterializedView(stmt, db, olapTable); + + } catch (MetaNotFoundException e) { + if (stmt.isSetIfExists()) { + LOG.info(e.getMessage()); + } else { + throw e; + } + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); + } + } + public void processDropMaterializedView(DropMaterializedViewStmt dropMaterializedViewStmt, Database db, OlapTable olapTable) throws DdlException, MetaNotFoundException { Locker locker = new Locker(); @@ -772,8 +833,9 @@ public void processDropMaterializedView(DropMaterializedViewStmt dropMaterialize // Step2; drop data in memory long mvIndexId = dropMaterializedView(mvName, olapTable); // Step3: log drop mv operation - EditLog editLog = GlobalStateMgr.getCurrentState().getEditLog(); - editLog.logDropRollup(new DropInfo(db.getId(), olapTable.getId(), mvIndexId, false)); + + GlobalStateMgr.getCurrentState().getLocalMetastore() + .dropRollup(new DropInfo(db.getId(), olapTable.getId(), mvIndexId, false)); LOG.info("finished drop materialized view [{}] in table [{}]", mvName, olapTable.getName()); } catch (MetaNotFoundException e) { if (dropMaterializedViewStmt.isSetIfExists()) { @@ -836,33 +898,6 @@ private long dropMaterializedView(String mvName, OlapTable olapTable) { return mvIndexId; } - public void replayDropRollup(DropInfo dropInfo, GlobalStateMgr globalStateMgr) { - Database db = globalStateMgr.getLocalMetastore().getDb(dropInfo.getDbId()); - long tableId = dropInfo.getTableId(); - long rollupIndexId = dropInfo.getIndexId(); - OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getId(), tableId); - - try (AutoCloseableLock ignore = - new AutoCloseableLock(new Locker(), db.getId(), Lists.newArrayList(tableId), LockType.WRITE)) { - TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); - - for (PhysicalPartition partition : olapTable.getPhysicalPartitions()) { - MaterializedIndex rollupIndex = partition.deleteRollupIndex(rollupIndexId); - - if (!GlobalStateMgr.isCheckpointThread()) { - // remove from inverted index - for (Tablet tablet : rollupIndex.getTablets()) { - invertedIndex.deleteTablet(tablet.getId()); - } - } - } - - String rollupIndexName = olapTable.getIndexNameById(rollupIndexId); - olapTable.deleteIndexInfo(rollupIndexName); - } - LOG.info("replay drop rollup {}", dropInfo.getIndexId()); - } - @Override protected void runAfterCatalogReady() { super.runAfterCatalogReady(); diff --git a/fe/fe-core/src/main/java/com/starrocks/alter/OnlineOptimizeJobV2.java b/fe/fe-core/src/main/java/com/starrocks/alter/OnlineOptimizeJobV2.java index 5d0ed124e5e762..cd897bcd497700 100644 --- a/fe/fe-core/src/main/java/com/starrocks/alter/OnlineOptimizeJobV2.java +++ b/fe/fe-core/src/main/java/com/starrocks/alter/OnlineOptimizeJobV2.java @@ -446,7 +446,8 @@ private void onTaskFinished(Database db, OlapTable targetTable, OptimizeTask rew Set sourceTablets = Sets.newHashSet(); Partition partition = targetTable.getPartition(sourcePartitionName); - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + for (MaterializedIndex index + : partition.getDefaultPhysicalPartition().getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { sourceTablets.addAll(index.getTablets()); } @@ -548,9 +549,10 @@ private void cancelInternal() { Partition partition = targetTable.getPartition(pid); if (partition != null) { - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + for (MaterializedIndex index : partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { // hash set is able to deduplicate the elements - tmpTablets.addAll(index.getTablets()); + tmpTablets.addAll(GlobalStateMgr.getCurrentState().getLocalMetastore().getAllTablets(index)); } targetTable.dropTempPartition(partition.getName(), true); } else { @@ -652,7 +654,8 @@ private void onReplayFinished(OnlineOptimizeJobV2 replayedJob, OlapTable targetT for (long id : replayedJob.getTmpPartitionIds()) { Partition partition = targetTable.getPartition(id); if (partition != null) { - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + for (MaterializedIndex index : partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { sourceTablets.addAll(index.getTablets()); } targetTable.dropTempPartition(partition.getName(), true); diff --git a/fe/fe-core/src/main/java/com/starrocks/alter/OptimizeJobV2.java b/fe/fe-core/src/main/java/com/starrocks/alter/OptimizeJobV2.java index acde4f1e9e7aaa..c595410c7ef1e3 100644 --- a/fe/fe-core/src/main/java/com/starrocks/alter/OptimizeJobV2.java +++ b/fe/fe-core/src/main/java/com/starrocks/alter/OptimizeJobV2.java @@ -260,7 +260,7 @@ protected void runWaitingTxnJob() throws AlterCancelException { } ); tableColumnNames = targetTable.getBaseSchema().stream().filter(column -> !column.isGeneratedColumn()) - .map(col -> ParseUtil.backquote(col.getName())).collect(Collectors.toList()); + .map(col -> ParseUtil.backquote(col.getName())).collect(Collectors.toList()); } finally { locker.unLockDatabase(db.getId(), LockType.READ); } @@ -270,8 +270,8 @@ protected void runWaitingTxnJob() throws AlterCancelException { String tmpPartitionName = tmpPartitionNames.get(i); String partitionName = partitionNames.get(i); String rewriteSql = "insert into " + tableName + " TEMPORARY PARTITION (" - + tmpPartitionName + ") select " + Joiner.on(", ").join(tableColumnNames) - + " from " + tableName + " partition (" + partitionName + ")"; + + tmpPartitionName + ") select " + Joiner.on(", ").join(tableColumnNames) + + " from " + tableName + " partition (" + partitionName + ")"; String taskName = getName() + "_" + tmpPartitionName; OptimizeTask rewriteTask = TaskBuilder.buildOptimizeTask(taskName, properties, rewriteSql, dbName); rewriteTask.setPartitionName(partitionName); @@ -336,11 +336,11 @@ protected void runRunningJob() throws AlterCancelException { taskNames.add(rewriteTask.getName()); } List resStatus = GlobalStateMgr.getCurrentState().getTaskManager() - .getTaskRunManager().getTaskRunHistory().lookupHistoryByTaskNames(dbName, taskNames); + .getTaskRunManager().getTaskRunHistory().lookupHistoryByTaskNames(dbName, taskNames); for (OptimizeTask rewriteTask : rewriteTasks) { if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.FAILED - || rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.SUCCESS) { + || rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.SUCCESS) { progress += 100 / rewriteTasks.size(); continue; } @@ -359,7 +359,7 @@ protected void runRunningJob() throws AlterCancelException { continue; } List filteredTask = resStatus.stream() - .filter(x -> rewriteTask.getName().equals(x.getTaskName())).collect(Collectors.toList()); + .filter(x -> rewriteTask.getName().equals(x.getTaskName())).collect(Collectors.toList()); if (filteredTask.isEmpty()) { allFinished = false; continue; @@ -386,7 +386,7 @@ protected void runRunningJob() throws AlterCancelException { // replace partition try (AutoCloseableLock ignore = - new AutoCloseableLock(new Locker(), db.getId(), Lists.newArrayList(tbl.getId()), LockType.WRITE)) { + new AutoCloseableLock(new Locker(), db.getId(), Lists.newArrayList(tbl.getId()), LockType.WRITE)) { onFinished(db, tbl); } @@ -407,8 +407,8 @@ protected void runFinishedRewritingJob() { private void onFinished(Database db, OlapTable targetTable) throws AlterCancelException { try { tmpPartitionNames = getTmpPartitionIds().stream() - .map(partitionId -> targetTable.getPartition(partitionId).getName()) - .collect(Collectors.toList()); + .map(partitionId -> targetTable.getPartition(partitionId).getName()) + .collect(Collectors.toList()); Map partitionLastVersion = Maps.newHashMap(); optimizeClause.getSourcePartitionIds().stream() @@ -424,10 +424,10 @@ private void onFinished(Database db, OlapTable targetTable) throws AlterCancelEx String errMsg = ""; for (OptimizeTask rewriteTask : rewriteTasks) { if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.FAILED - || partitionLastVersion.get(rewriteTask.getPartitionName()) != rewriteTask.getLastVersion()) { + || partitionLastVersion.get(rewriteTask.getPartitionName()) != rewriteTask.getLastVersion()) { LOG.info("optimize job {} rewrite task {} state {} failed or partition {} version {} change to {}", - jobId, rewriteTask.getName(), rewriteTask.getOptimizeTaskState(), rewriteTask.getPartitionName(), - rewriteTask.getLastVersion(), partitionLastVersion.get(rewriteTask.getPartitionName())); + jobId, rewriteTask.getName(), rewriteTask.getOptimizeTaskState(), rewriteTask.getPartitionName(), + rewriteTask.getLastVersion(), partitionLastVersion.get(rewriteTask.getPartitionName())); sourcePartitionNames.remove(rewriteTask.getPartitionName()); tmpPartitionNames.remove(rewriteTask.getTempPartitionName()); targetTable.dropTempPartition(rewriteTask.getTempPartitionName(), true); @@ -446,9 +446,9 @@ private void onFinished(Database db, OlapTable targetTable) throws AlterCancelEx if (hasFailedTask && (optimizeClause.getKeysDesc() != null || optimizeClause.getSortKeys() != null)) { rewriteTasks.forEach( - rewriteTask -> targetTable.dropTempPartition(rewriteTask.getTempPartitionName(), true)); + rewriteTask -> targetTable.dropTempPartition(rewriteTask.getTempPartitionName(), true)); throw new AlterCancelException( - "optimize keysType or sort keys failed since some partitions rewrite failed [" + errMsg + "]"); + "optimize keysType or sort keys failed since some partitions rewrite failed [" + errMsg + "]"); } allPartitionOptimized = false; @@ -460,10 +460,10 @@ private void onFinished(Database db, OlapTable targetTable) throws AlterCancelEx if (optimizeClause.isTableOptimize()) { if (optimizeClause.getDistributionDesc().getType() != targetTable.getDefaultDistributionInfo().getType()) { if (targetPartitionNames.size() != targetPartitionNum - || targetPartitionNum != sourcePartitionNames.size()) { + || targetPartitionNum != sourcePartitionNames.size()) { // partial partitions of target table are optimized throw new AlterCancelException("can not change distribution type of target table" + - " since partial partitions are not optimized [" + errMsg + "]"); + " since partial partitions are not optimized [" + errMsg + "]"); } } allPartitionOptimized = true; @@ -473,7 +473,8 @@ private void onFinished(Database db, OlapTable targetTable) throws AlterCancelEx Set sourceTablets = Sets.newHashSet(); sourcePartitionNames.forEach(name -> { Partition partition = targetTable.getPartition(name); - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + for (MaterializedIndex index + : partition.getDefaultPhysicalPartition().getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { sourceTablets.addAll(index.getTablets()); } }); @@ -489,7 +490,7 @@ private void onFinished(Database db, OlapTable targetTable) throws AlterCancelEx } // write log ReplacePartitionOperationLog info = new ReplacePartitionOperationLog(db.getId(), targetTable.getId(), - sourcePartitionNames, tmpPartitionNames, true, false, partitionInfo instanceof SinglePartitionInfo); + sourcePartitionNames, tmpPartitionNames, true, false, partitionInfo instanceof SinglePartitionInfo); GlobalStateMgr.getCurrentState().getEditLog().logReplaceTempPartition(info); // mark all source tablet ids force delete to drop it directly on BE, // not to move it to trash @@ -497,7 +498,7 @@ private void onFinished(Database db, OlapTable targetTable) throws AlterCancelEx try { GlobalStateMgr.getCurrentState().getColocateTableIndex().updateLakeTableColocationInfo(targetTable, - true /* isJoin */, null /* expectGroupId */); + true /* isJoin */, null /* expectGroupId */); } catch (DdlException e) { // log an error if update colocation info failed, insert overwrite already succeeded LOG.error("table {} update colocation info failed after insert overwrite, {}.", tableId, e.getMessage()); @@ -511,8 +512,8 @@ private void onFinished(Database db, OlapTable targetTable) throws AlterCancelEx targetTable.setState(OlapTableState.NORMAL); LOG.info("optimize job {} finish replace partitions dbId:{}, tableId:{}," - + "source partitions:{}, tmp partitions:{}, allOptimized:{}", - jobId, dbId, tableId, sourcePartitionNames, tmpPartitionNames, allPartitionOptimized); + + "source partitions:{}, tmp partitions:{}, allOptimized:{}", + jobId, dbId, tableId, sourcePartitionNames, tmpPartitionNames, allPartitionOptimized); } catch (Exception e) { LOG.warn("optimize table failed dbId:{}, tableId:{} exception: {}", dbId, tableId, e); throw new AlterCancelException("optimize table failed " + e.getMessage()); @@ -573,7 +574,8 @@ private void cancelInternal() { Partition partition = targetTable.getPartition(pid); if (partition != null) { - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + for (MaterializedIndex index : partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { // hash set is able to deduplicate the elements sourceTablets.addAll(index.getTablets()); } @@ -597,7 +599,7 @@ private void cancelInternal() { // Check whether transactions of the given database which txnId is less than 'watershedTxnId' are finished. protected boolean isPreviousLoadFinished() throws AnalysisException { return GlobalStateMgr.getCurrentState().getGlobalTransactionMgr() - .isPreviousTransactionsFinished(watershedTxnId, dbId, Lists.newArrayList(tableId)); + .isPreviousTransactionsFinished(watershedTxnId, dbId, Lists.newArrayList(tableId)); } /** @@ -616,7 +618,7 @@ private void replayPending(OptimizeJobV2 replayedJob) { return; } try (AutoCloseableLock ignore = - new AutoCloseableLock(new Locker(), db.getId(), Lists.newArrayList(tbl.getId()), LockType.WRITE)) { + new AutoCloseableLock(new Locker(), db.getId(), Lists.newArrayList(tbl.getId()), LockType.WRITE)) { // set table state tbl.setState(OlapTableState.SCHEMA_CHANGE); } @@ -666,7 +668,8 @@ private void onReplayFinished(OptimizeJobV2 replayedJob, OlapTable targetTable) for (long id : replayedJob.getTmpPartitionIds()) { Partition partition = targetTable.getPartition(id); if (partition != null) { - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + for (MaterializedIndex index + : partition.getDefaultPhysicalPartition().getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { sourceTablets.addAll(index.getTablets()); } targetTable.dropTempPartition(partition.getName(), true); @@ -682,8 +685,8 @@ private void onReplayFinished(OptimizeJobV2 replayedJob, OlapTable targetTable) targetTable.setState(OlapTableState.NORMAL); LOG.info("finish replay optimize job {} dbId:{}, tableId:{}," - + "source partitions:{}, tmp partitions:{}, allOptimized:{}", - jobId, dbId, tableId, sourcePartitionNames, tmpPartitionNames, allPartitionOptimized); + + "source partitions:{}, tmp partitions:{}, allOptimized:{}", + jobId, dbId, tableId, sourcePartitionNames, tmpPartitionNames, allPartitionOptimized); } /** @@ -696,7 +699,7 @@ private void replayFinished(OptimizeJobV2 replayedJob) { OlapTable tbl = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getId(), tableId); if (tbl != null) { try (AutoCloseableLock ignore = - new AutoCloseableLock(new Locker(), db.getId(), Lists.newArrayList(tbl.getId()), LockType.WRITE)) { + new AutoCloseableLock(new Locker(), db.getId(), Lists.newArrayList(tbl.getId()), LockType.WRITE)) { onReplayFinished(replayedJob, tbl); } } diff --git a/fe/fe-core/src/main/java/com/starrocks/alter/SchemaChangeHandler.java b/fe/fe-core/src/main/java/com/starrocks/alter/SchemaChangeHandler.java index 79947502b66a95..711a3008b8b4de 100644 --- a/fe/fe-core/src/main/java/com/starrocks/alter/SchemaChangeHandler.java +++ b/fe/fe-core/src/main/java/com/starrocks/alter/SchemaChangeHandler.java @@ -84,14 +84,15 @@ import com.starrocks.common.UserException; import com.starrocks.common.util.ListComparator; import com.starrocks.common.util.PropertyAnalyzer; -import com.starrocks.common.util.WriteQuorum; import com.starrocks.common.util.concurrent.MarkedCountDownLatch; import com.starrocks.common.util.concurrent.lock.LockType; import com.starrocks.common.util.concurrent.lock.Locker; +import com.starrocks.persist.ModifyTablePropertyOperationLog; import com.starrocks.persist.TableAddOrDropColumnsInfo; import com.starrocks.qe.ConnectContext; import com.starrocks.qe.ShowResultSet; import com.starrocks.server.GlobalStateMgr; +import com.starrocks.server.LocalMetastore; import com.starrocks.server.RunMode; import com.starrocks.sql.analyzer.SemanticException; import com.starrocks.sql.ast.AddColumnClause; @@ -119,7 +120,6 @@ import com.starrocks.task.TabletMetadataUpdateAgentTaskFactory; import com.starrocks.thrift.TTabletMetaType; import com.starrocks.thrift.TTaskType; -import com.starrocks.thrift.TWriteQuorumType; import com.starrocks.warehouse.Warehouse; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -2053,83 +2053,6 @@ public ShowResultSet processLakeTableAlterMeta(AlterClause alterClause, Database return null; } - public void updateTableMeta(Database db, String tableName, Map properties, - TTabletMetaType metaType) - throws DdlException { - List partitions = Lists.newArrayList(); - OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), tableName); - - Locker locker = new Locker(); - locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(olapTable.getId()), LockType.READ); - try { - partitions.addAll(olapTable.getPartitions()); - } finally { - locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(olapTable.getId()), LockType.READ); - } - - boolean metaValue = false; - if (metaType == TTabletMetaType.INMEMORY) { - metaValue = Boolean.parseBoolean(properties.get(PropertyAnalyzer.PROPERTIES_INMEMORY)); - if (metaValue == olapTable.isInMemory()) { - return; - } - } else if (metaType == TTabletMetaType.ENABLE_PERSISTENT_INDEX) { - metaValue = Boolean.parseBoolean(properties.get(PropertyAnalyzer.PROPERTIES_ENABLE_PERSISTENT_INDEX)); - if (metaValue == olapTable.enablePersistentIndex()) { - return; - } - } else if (metaType == TTabletMetaType.WRITE_QUORUM) { - TWriteQuorumType writeQuorum = WriteQuorum - .findTWriteQuorumByName(properties.get(PropertyAnalyzer.PROPERTIES_WRITE_QUORUM)); - if (writeQuorum == olapTable.writeQuorum()) { - return; - } - } else if (metaType == TTabletMetaType.REPLICATED_STORAGE) { - metaValue = Boolean.parseBoolean(properties.get(PropertyAnalyzer.PROPERTIES_REPLICATED_STORAGE)); - if (metaValue == olapTable.enableReplicatedStorage()) { - return; - } - } else if (metaType == TTabletMetaType.BUCKET_SIZE) { - long bucketSize = Long.parseLong(properties.get(PropertyAnalyzer.PROPERTIES_BUCKET_SIZE)); - if (bucketSize == olapTable.getAutomaticBucketSize()) { - return; - } - } else if (metaType == TTabletMetaType.MUTABLE_BUCKET_NUM) { - long mutableBucketNum = Long.parseLong(properties.get(PropertyAnalyzer.PROPERTIES_MUTABLE_BUCKET_NUM)); - if (mutableBucketNum == olapTable.getMutableBucketNum()) { - return; - } - } else if (metaType == TTabletMetaType.ENABLE_LOAD_PROFILE) { - boolean enableLoadProfile = Boolean.parseBoolean(properties.get(PropertyAnalyzer.PROPERTIES_ENABLE_LOAD_PROFILE)); - if (enableLoadProfile == olapTable.enableLoadProfile()) { - return; - } - } else if (metaType == TTabletMetaType.PRIMARY_INDEX_CACHE_EXPIRE_SEC) { - int primaryIndexCacheExpireSec = Integer.parseInt(properties.get( - PropertyAnalyzer.PROPERTIES_PRIMARY_INDEX_CACHE_EXPIRE_SEC)); - if (primaryIndexCacheExpireSec == olapTable.primaryIndexCacheExpireSec()) { - return; - } - } else { - LOG.warn("meta type: {} does not support", metaType); - return; - } - - if (metaType == TTabletMetaType.INMEMORY || metaType == TTabletMetaType.ENABLE_PERSISTENT_INDEX) { - for (Partition partition : partitions) { - updatePartitionTabletMeta(db, olapTable.getName(), partition.getName(), metaValue, metaType); - } - } - - locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(olapTable.getId()), LockType.WRITE); - try { - GlobalStateMgr.getCurrentState().getLocalMetastore().modifyTableMeta(db, olapTable, properties, metaType); - } finally { - locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(olapTable.getId()), LockType.WRITE); - } - } - // return true means that the modification of FEMeta is successful, // and as long as the modification of metadata is successful, // the final consistency will be achieved through the report handler @@ -2203,7 +2126,7 @@ public boolean updateBinlogConfigMeta(Database db, Long tableId, Map tabletSet = beIdToTabletId.computeIfAbsent(replica.getBackendId(), k -> Sets.newHashSet()); tabletSet.add(tablet.getId()); } @@ -2524,12 +2466,36 @@ public void updateTableConstraint(Database db, String tableName, Map properties) + throws DdlException { + Locker locker = new Locker(); + Preconditions.checkArgument(locker.isDbWriteLockHeldByCurrentThread(db)); + Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), tableName); + if (table == null) { + throw new DdlException(String.format("table:%s does not exist", tableName)); + } + OlapTable olapTable = (OlapTable) table; + TableProperty tableProperty = olapTable.getTableProperty(); + if (tableProperty == null) { + tableProperty = new TableProperty(properties); + olapTable.setTableProperty(tableProperty); + } else { + tableProperty.modifyTableProperties(properties); + } + tableProperty.buildConstraint(); + + ModifyTablePropertyOperationLog info = + new ModifyTablePropertyOperationLog(db.getId(), olapTable.getId(), properties); + GlobalStateMgr.getCurrentState().getEditLog().logModifyConstraint(info); + } + @Override public void cancel(CancelStmt stmt) throws DdlException { cancel(stmt, "user cancelled"); diff --git a/fe/fe-core/src/main/java/com/starrocks/alter/SchemaChangeJobV2.java b/fe/fe-core/src/main/java/com/starrocks/alter/SchemaChangeJobV2.java index ad8052e350bffe..78113c0a9a199f 100644 --- a/fe/fe-core/src/main/java/com/starrocks/alter/SchemaChangeJobV2.java +++ b/fe/fe-core/src/main/java/com/starrocks/alter/SchemaChangeJobV2.java @@ -467,7 +467,7 @@ private void addShadowIndexToCatalog(OlapTable tbl) { Map shadowIndexMap = physicalPartitionIndexMap.row(partitionId); for (MaterializedIndex shadowIndex : shadowIndexMap.values()) { Preconditions.checkState(shadowIndex.getState() == IndexState.SHADOW, shadowIndex.getState()); - partition.createRollupIndex(shadowIndex); + GlobalStateMgr.getCurrentState().getLocalMetastore().addMaterializedIndex(partition, shadowIndex); } } @@ -848,7 +848,7 @@ private void onFinished(OlapTable tbl) { } // replace the origin index with shadow index, set index state as NORMAL for (Partition partition : tbl.getPartitions()) { - TStorageMedium medium = tbl.getPartitionInfo().getDataProperty(partition.getParentId()).getStorageMedium(); + TStorageMedium medium = tbl.getPartitionInfo().getDataProperty(partition.getId()).getStorageMedium(); // drop the origin index from partitions for (Map.Entry entry : indexIdMap.entrySet()) { long shadowIdxId = entry.getKey(); @@ -866,6 +866,9 @@ private void onFinished(OlapTable tbl) { droppedIdx = physicalPartition.getBaseIndex(); } else { droppedIdx = physicalPartition.deleteRollupIndex(originIdxId); + + GlobalStateMgr.getCurrentState().getLocalMetastore().dropMaterializedIndex( + physicalPartition, originIdxId); } Preconditions.checkNotNull(droppedIdx, originIdxId + " vs. " + shadowIdxId); diff --git a/fe/fe-core/src/main/java/com/starrocks/backup/BackupJobInfo.java b/fe/fe-core/src/main/java/com/starrocks/backup/BackupJobInfo.java index 431b8603a63b1a..3507d800f9aa88 100644 --- a/fe/fe-core/src/main/java/com/starrocks/backup/BackupJobInfo.java +++ b/fe/fe-core/src/main/java/com/starrocks/backup/BackupJobInfo.java @@ -314,9 +314,10 @@ public static BackupJobInfo fromCatalog(long backupTime, String label, String db BackupPartitionInfo partitionInfo = new BackupPartitionInfo(); partitionInfo.id = partition.getId(); partitionInfo.name = partition.getName(); - partitionInfo.version = partition.getVisibleVersion(); + partitionInfo.version = partition.getDefaultPhysicalPartition().getVisibleVersion(); if (partition.getSubPartitions().size() == 1) { - for (MaterializedIndex index : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { + for (MaterializedIndex index : partition.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.VISIBLE)) { BackupIndexInfo idxInfo = new BackupIndexInfo(); idxInfo.id = index.getId(); idxInfo.name = olapTbl.getIndexNameById(index.getId()); diff --git a/fe/fe-core/src/main/java/com/starrocks/backup/RestoreJob.java b/fe/fe-core/src/main/java/com/starrocks/backup/RestoreJob.java index ee23d4a73fb50d..9510997c449622 100644 --- a/fe/fe-core/src/main/java/com/starrocks/backup/RestoreJob.java +++ b/fe/fe-core/src/main/java/com/starrocks/backup/RestoreJob.java @@ -870,17 +870,20 @@ protected void prepareAndSendSnapshotTasks(Database db) { .getTable(db.getId(), idChain.getTblId()); PhysicalPartition part = tbl.getPhysicalPartition(idChain.getPartId()); MaterializedIndex index = part.getIndex(idChain.getIdxId()); - LocalTablet tablet = (LocalTablet) index.getTablet(idChain.getTabletId()); - Replica replica = tablet.getReplicaById(idChain.getReplicaId()); + + Tablet tablet = GlobalStateMgr.getCurrentState().getLocalMetastore().getTablet(index, idChain.getTabletId()); + Replica replica = GlobalStateMgr.getCurrentState().getLocalMetastore().getReplica( + (LocalTablet) tablet, idChain.getReplicaId()); + long signature = globalStateMgr.getNextId(); SnapshotTask task = new SnapshotTask(null, replica.getBackendId(), signature, jobId, db.getId(), - tbl.getId(), part.getId(), index.getId(), tablet.getId(), + tbl.getId(), part.getId(), index.getId(), idChain.getTabletId(), part.getVisibleVersion(), tbl.getSchemaHashByIndexId(index.getId()), timeoutMs, true /* is restore task*/); batchTask.addTask(task); - unfinishedSignatureToId.put(signature, tablet.getId()); + unfinishedSignatureToId.put(signature, idChain.getTabletId()); bePathsMap.put(replica.getBackendId(), replica.getPathHash()); } } finally { @@ -995,13 +998,13 @@ public Partition resetPartitionForRestore(OlapTable localTbl, OlapTable remoteTb for (String localIdxName : localIdxNameToId.keySet()) { // set ids of indexes in remote partition to the local index ids long remoteIdxId = remoteTbl.getIndexIdByName(localIdxName); - MaterializedIndex remoteIdx = remotePart.getIndex(remoteIdxId); + MaterializedIndex remoteIdx = remotePart.getDefaultPhysicalPartition().getIndex(remoteIdxId); long localIdxId = localIdxNameToId.get(localIdxName); remoteIdx.setIdForRestore(localIdxId); if (localIdxId != localTbl.getBaseIndexId()) { // not base table, reset - remotePart.deleteRollupIndex(remoteIdxId); - remotePart.createRollupIndex(remoteIdx); + remotePart.getDefaultPhysicalPartition().deleteRollupIndex(remoteIdxId); + remotePart.getDefaultPhysicalPartition().createRollupIndex(remoteIdx); } } @@ -1046,7 +1049,8 @@ protected void genFileMapping(OlapTable localTbl, Partition localPartition, Long protected void genFileMappingWithPartition(OlapTable localTbl, Partition localPartition, Long remoteTblId, BackupPartitionInfo backupPartInfo, boolean overwrite) { - for (MaterializedIndex localIdx : localPartition.getMaterializedIndices(IndexExtState.VISIBLE)) { + for (MaterializedIndex localIdx : localPartition.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.VISIBLE)) { BackupIndexInfo backupIdxInfo = backupPartInfo.getIdx(localTbl.getIndexNameById(localIdx.getId())); Preconditions.checkState(backupIdxInfo.tablets.size() == localIdx.getTablets().size()); for (int i = 0; i < localIdx.getTablets().size(); i++) { @@ -1150,7 +1154,8 @@ protected void addRestoredPartitions(Database db, boolean modify) { } protected void modifyInvertedIndex(OlapTable restoreTbl, Partition restorePart) { - for (MaterializedIndex restoreIdx : restorePart.getMaterializedIndices(IndexExtState.VISIBLE)) { + for (MaterializedIndex restoreIdx : restorePart.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.VISIBLE)) { int schemaHash = restoreTbl.getSchemaHashByIndexId(restoreIdx.getId()); TabletMeta tabletMeta = new TabletMeta(dbId, restoreTbl.getId(), restorePart.getId(), restoreIdx.getId(), schemaHash, TStorageMedium.HDD); @@ -1663,7 +1668,8 @@ public void cancelInternal(boolean isReplay) { for (Table restoreTbl : restoredTbls) { LOG.info("remove restored table when cancelled: {}", restoreTbl.getName()); for (Partition part : restoreTbl.getPartitions()) { - for (MaterializedIndex idx : part.getMaterializedIndices(IndexExtState.VISIBLE)) { + for (MaterializedIndex idx : part.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.VISIBLE)) { for (Tablet tablet : idx.getTablets()) { globalStateMgr.getTabletInvertedIndex().deleteTablet(tablet.getId()); } diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/CatalogRecycleBin.java b/fe/fe-core/src/main/java/com/starrocks/catalog/CatalogRecycleBin.java index 0c81586fe59593..496464fe538ffe 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/CatalogRecycleBin.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/CatalogRecycleBin.java @@ -51,6 +51,8 @@ import com.starrocks.common.io.Text; import com.starrocks.common.io.Writable; import com.starrocks.common.util.FrontendDaemon; +import com.starrocks.common.util.concurrent.lock.LockType; +import com.starrocks.common.util.concurrent.lock.Locker; import com.starrocks.persist.ImageWriter; import com.starrocks.persist.RecoverInfo; import com.starrocks.persist.gson.IForwardCompatibleObject; @@ -122,7 +124,11 @@ private void removeRecycleMarkers(Long id) { enableEraseLater.remove(id); } - public synchronized void recycleDatabase(Database db, Set tableNames) { + public synchronized void recycleDatabase(Database db, Set tableNames, boolean isForce) { + if (isForce) { + onEraseDatabase(db.getId()); + return; + } Preconditions.checkState(!idToDatabase.containsKey(db.getId())); // db should be empty. all tables are recycled before @@ -138,6 +144,13 @@ public synchronized void recycleDatabase(Database db, Set tableNames) { LOG.info("recycle db[{}-{}]", db.getId(), db.getOriginName()); } + public void onEraseDatabase(long dbId) { + // remove database transaction manager + GlobalStateMgr.getCurrentState().getGlobalTransactionMgr().removeDatabaseTransactionMgr(dbId); + // unbind db to storage volume + GlobalStateMgr.getCurrentState().getStorageVolumeMgr().unbindDbToStorageVolume(dbId); + } + public synchronized Database getDatabase(long dbId) { RecycleDatabaseInfo databaseInfo = idToDatabase.get(dbId); if (databaseInfo != null) { @@ -299,7 +312,7 @@ private synchronized boolean canEraseTable(RecycleTableInfo tableInfo, long curr } // database is force dropped, the table can not be recovered, erase it. - if (GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIncludeRecycleBin(tableInfo.getDbId()) == null) { + if (GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIncludeRecycleBin(tableInfo.getDbId()) == null) { return true; } return false; @@ -311,13 +324,14 @@ private synchronized boolean canErasePartition(RecyclePartitionInfo partitionInf } // database is force dropped, the partition can not be recovered, erase it. - Database database = GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIncludeRecycleBin(partitionInfo.getDbId()); + Database database = GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .getDbIncludeRecycleBin(partitionInfo.getDbId()); if (database == null) { return true; } // table is force dropped, the partition can not be recovered, erase it. - if (GlobalStateMgr.getCurrentState().getLocalMetastore() + if (GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getTableIncludeRecycleBin(database, partitionInfo.getTableId()) == null) { return true; } @@ -358,7 +372,7 @@ protected synchronized void eraseDatabase(long currentTimeMs) { dbIter.remove(); removeRecycleMarkers(entry.getKey()); - GlobalStateMgr.getCurrentState().getLocalMetastore().onEraseDatabase(db.getId()); + onEraseDatabase(db.getId()); GlobalStateMgr.getCurrentState().getEditLog().logEraseDb(db.getId()); LOG.info("erase db[{}-{}] finished", db.getId(), db.getOriginName()); currentEraseOpCnt++; @@ -379,7 +393,7 @@ private synchronized void eraseDatabaseWithSameName(String dbName) { iterator.remove(); removeRecycleMarkers(entry.getKey()); - GlobalStateMgr.getCurrentState().getLocalMetastore().onEraseDatabase(db.getId()); + onEraseDatabase(db.getId()); LOG.info("erase database[{}-{}], because db with the same name db is recycled", db.getId(), dbName); } } @@ -389,7 +403,7 @@ public synchronized void replayEraseDatabase(long dbId) { idToDatabase.remove(dbId); idToRecycleTime.remove(dbId); - GlobalStateMgr.getCurrentState().getLocalMetastore().onEraseDatabase(dbId); + onEraseDatabase(dbId); LOG.info("replay erase db[{}] finished", dbId); } @@ -580,7 +594,7 @@ public synchronized void replayErasePartition(long partitionId) { Partition partition = partitionInfo.getPartition(); if (!isCheckpointThread()) { - GlobalStateMgr.getCurrentState().getLocalMetastore().onErasePartition(partition); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().onErasePartition(partition); } LOG.info("replay erase partition[{}-{}] finished", partitionId, partition.getName()); @@ -687,18 +701,28 @@ public synchronized boolean recoverTable(Database db, String tableName) { return true; } - public synchronized void replayRecoverTable(Database db, long tableId) { - // make sure to get db write lock - long dbId = db.getId(); - Map idToTableInfoDbLevel = idToTableInfo.row(dbId); - RecycleTableInfo tableInfo = idToTableInfoDbLevel.get(tableId); - Preconditions.checkState(tableInfo.getDbId() == db.getId()); - Table table = tableInfo.getTable(); - db.registerTableUnlocked(table); - nameToTableInfo.row(dbId).remove(table.getName()); - idToTableInfoDbLevel.remove(tableId); - idToRecycleTime.remove(tableInfo.getTable().getId()); - LOG.info("replay recover table[{}-{}] finished", tableId, tableInfo.getTable().getName()); + public synchronized void replayRecoverTable(RecoverInfo recoverInfo) { + long dbId = recoverInfo.getDbId(); + long tableId = recoverInfo.getTableId(); + + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); + Locker locker = new Locker(); + locker.lockDatabase(dbId, LockType.WRITE); + try { + // make sure to get db write lock + Map idToTableInfoDbLevel = idToTableInfo.row(dbId); + RecycleTableInfo tableInfo = idToTableInfoDbLevel.get(tableId); + Preconditions.checkState(tableInfo.getDbId() == db.getId()); + Table table = tableInfo.getTable(); + db.registerTableUnlocked(table); + nameToTableInfo.row(dbId).remove(table.getName()); + idToTableInfoDbLevel.remove(tableId); + idToRecycleTime.remove(tableInfo.getTable().getId()); + LOG.info("replay recover table[{}-{}] finished", tableId, tableInfo.getTable().getName()); + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); + } + } public synchronized void recoverPartition(long dbId, OlapTable table, String partitionName) throws DdlException { @@ -783,6 +807,7 @@ public void addTabletToInvertedIndex() { // no need to handle idToDatabase. Database is already empty before being put here TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); + // idToTable for (RecycleTableInfo tableInfo : idToTableInfo.values()) { Table table = tableInfo.getTable(); @@ -808,6 +833,7 @@ public void addTabletToInvertedIndex() { int schemaHash = olapTable.getSchemaHashByIndexId(indexId); TabletMeta tabletMeta = new TabletMeta(dbId, tableId, physicalPartitionId, indexId, schemaHash, medium, table.isCloudNativeTable()); + for (Tablet tablet : index.getTablets()) { long tabletId = tablet.getId(); invertedIndex.addTablet(tabletId, tabletMeta); diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/CatalogUtils.java b/fe/fe-core/src/main/java/com/starrocks/catalog/CatalogUtils.java index 14d5194d64e452..5119de1433d087 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/CatalogUtils.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/CatalogUtils.java @@ -424,7 +424,7 @@ public static int calAvgBucketNumOfRecentPartitions(OlapTable olapTable, int rec List partitions = (List) olapTable.getRecentPartitions(recentPartitionNum); boolean dataImported = true; for (Partition partition : partitions) { - if (partition.getVisibleVersion() == 1) { + if (partition.getDefaultPhysicalPartition().getVisibleVersion() == 1) { dataImported = false; break; } diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/ColocateTableIndex.java b/fe/fe-core/src/main/java/com/starrocks/catalog/ColocateTableIndex.java index c587f98c7e9cb9..a4d4f404ff68d4 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/ColocateTableIndex.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/ColocateTableIndex.java @@ -1008,12 +1008,12 @@ protected void cleanupInvalidDbOrTable(GlobalStateMgr globalStateMgr) { for (Map.Entry entry : table2Group.entrySet()) { long dbId = entry.getValue().dbId; long tableId = entry.getKey(); - Database database = globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(dbId); + Database database = globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(dbId); if (database == null) { LOG.warn("cannot find db {}, will remove invalid table {} from group {}", dbId, tableId, entry.getValue()); } else { - Table table = globalStateMgr.getLocalMetastore().getTableIncludeRecycleBin(database, tableId); + Table table = globalStateMgr.getStarRocksMetadata().getTableIncludeRecycleBin(database, tableId); if (table != null) { // this is a valid table/database, do nothing continue; @@ -1064,8 +1064,8 @@ private void constructLakeGroups(GlobalStateMgr globalStateMgr) { long dbId = entry.getValue().dbId; long tableId = entry.getKey(); // database and table should be valid if reach here - Database database = globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(dbId); - Table table = globalStateMgr.getLocalMetastore().getTableIncludeRecycleBin(database, tableId); + Database database = globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(dbId); + Table table = globalStateMgr.getStarRocksMetadata().getTableIncludeRecycleBin(database, tableId); if (table.isCloudNativeTable()) { lakeGroups.add(entry.getValue()); } diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/Database.java b/fe/fe-core/src/main/java/com/starrocks/catalog/Database.java index 275ed6e0cb29dc..4b05f2d0c1cfb6 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/Database.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/Database.java @@ -334,7 +334,7 @@ public void dropTable(String tableName, boolean isSetIfExists, boolean isForce) } unprotectDropTable(table.getId(), isForce, false); DropInfo info = new DropInfo(id, table.getId(), -1L, isForce); - GlobalStateMgr.getCurrentState().getEditLog().logDropTable(info); + GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(info); } finally { locker.unLockDatabase(id, LockType.WRITE); } @@ -347,27 +347,6 @@ public void dropTable(String tableName, boolean isSetIfExists, boolean isForce) tableName, fullQualifiedName, table.getId(), table.getType(), isForce); } - public void dropTemporaryTable(long tableId, String tableName, boolean isSetIfExists, boolean isForce) throws DdlException { - Table table; - Locker locker = new Locker(); - locker.lockDatabase(id, LockType.WRITE); - try { - table = idToTable.get(tableId); - if (table == null) { - if (isSetIfExists) { - return; - } - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName); - } - unprotectDropTemporaryTable(tableId, isForce, false); - DropInfo info = new DropInfo(id, table.getId(), -1L, isForce); - GlobalStateMgr.getCurrentState().getEditLog().logDropTable(info); - } finally { - locker.unLockDatabase(id, LockType.WRITE); - } - } - - /** * Drop a table from this database. * diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/ExternalOlapTable.java b/fe/fe-core/src/main/java/com/starrocks/catalog/ExternalOlapTable.java index dbae6823389d4a..e9fc4c07725fd3 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/ExternalOlapTable.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/ExternalOlapTable.java @@ -471,12 +471,15 @@ private void updateMetaInternal(String dbName, TTableMeta meta, List(); + this.idToTablets = new TreeMap<>(); this.tablets = new ArrayList<>(); this.rowCount = 0; this.visibleTxnId = (this.state == IndexState.SHADOW) ? visibleTxnId : 0; @@ -227,7 +227,7 @@ public void setRowCount(long rowCount) { public long getDataSize() { long dataSize = 0; - for (Tablet tablet : getTablets()) { + for (Tablet tablet : GlobalStateMgr.getCurrentState().getLocalMetastore().getAllTablets(this)) { dataSize += tablet.getDataSize(false); } return dataSize; @@ -235,7 +235,7 @@ public long getDataSize() { public long getTabletMaxDataSize() { long maxDataSize = 0; - for (Tablet tablet : getTablets()) { + for (Tablet tablet : GlobalStateMgr.getCurrentState().getLocalMetastore().getAllTablets(this)) { maxDataSize = Math.max(maxDataSize, tablet.getDataSize(true)); } return maxDataSize; @@ -252,7 +252,7 @@ public long getReplicaCount() { } else { Preconditions.checkState(t instanceof LocalTablet); long replicaCount = 0; - for (Tablet tablet : getTablets()) { + for (Tablet tablet : GlobalStateMgr.getCurrentState().getLocalMetastore().getAllTablets(this)) { LocalTablet localTablet = (LocalTablet) tablet; replicaCount += localTablet.getImmutableReplicas().size(); } diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/MaterializedView.java b/fe/fe-core/src/main/java/com/starrocks/catalog/MaterializedView.java index 1ebbe2c830fdbd..efbf28291b5fd1 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/MaterializedView.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/MaterializedView.java @@ -170,7 +170,7 @@ public static BasePartitionInfo fromExternalTable(com.starrocks.connector.Partit } public static BasePartitionInfo fromOlapTable(Partition partition) { - return new BasePartitionInfo(partition.getId(), partition.getVisibleVersion(), -1); + return new BasePartitionInfo(partition.getId(), partition.getDefaultPhysicalPartition().getVisibleVersion(), -1); } public long getId() { @@ -1940,7 +1940,7 @@ public Status doAfterRestore(MvRestoreContext mvRestoreContext) throws DdlExcept AlterMaterializedViewBaseTableInfosLog alterMaterializedViewBaseTableInfos = new AlterMaterializedViewBaseTableInfosLog(dbId, getId(), oldMvId, baseTableInfos, baseTableVisibleVersionMap); - GlobalStateMgr.getCurrentState().getEditLog().logAlterMvBaseTableInfos(alterMaterializedViewBaseTableInfos); + GlobalStateMgr.getCurrentState().getLocalMetastore().alterMvBaseTableInfos(alterMaterializedViewBaseTableInfos); } // rebuild mv tasks to be scheduled in TaskManager. diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/MetadataViewer.java b/fe/fe-core/src/main/java/com/starrocks/catalog/MetadataViewer.java index d72855d1d8cea3..3f68776186d179 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/MetadataViewer.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/MetadataViewer.java @@ -46,6 +46,7 @@ import com.starrocks.common.util.concurrent.lock.Locker; import com.starrocks.qe.ConnectContext; import com.starrocks.server.GlobalStateMgr; +import com.starrocks.server.LocalMetastore; import com.starrocks.server.RunMode; import com.starrocks.sql.ast.AdminShowReplicaDistributionStmt; import com.starrocks.sql.ast.AdminShowReplicaStatusStmt; @@ -72,6 +73,7 @@ private static List> getTabletStatus(String dbName, String tblName, GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); SystemInfoService infoService = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo(); + LocalMetastore localMetastore = GlobalStateMgr.getCurrentState().getLocalMetastore(); Database db = globalStateMgr.getLocalMetastore().getDb(dbName); if (db == null) { @@ -109,7 +111,7 @@ private static List> getTabletStatus(String dbName, String tblName, for (MaterializedIndex index : physicalPartition.getMaterializedIndices(IndexExtState.VISIBLE)) { int schemaHash = olapTable.getSchemaHashByIndexId(index.getId()); - for (Tablet tablet : index.getTablets()) { + for (Tablet tablet : localMetastore.getAllTablets(index)) { long tabletId = tablet.getId(); int count = replicationNum; for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/OlapTable.java b/fe/fe-core/src/main/java/com/starrocks/catalog/OlapTable.java index 3adb93b7577b18..62290d326af82c 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/OlapTable.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/OlapTable.java @@ -90,11 +90,11 @@ import com.starrocks.lake.DataCacheInfo; import com.starrocks.lake.StarOSAgent; import com.starrocks.lake.StorageInfo; +import com.starrocks.meta.StarRocksMetadata; import com.starrocks.persist.ColocatePersistInfo; import com.starrocks.qe.ConnectContext; import com.starrocks.qe.OriginStatement; import com.starrocks.server.GlobalStateMgr; -import com.starrocks.server.LocalMetastore; import com.starrocks.server.RunMode; import com.starrocks.server.TemporaryTableMgr; import com.starrocks.sql.analyzer.AnalyzeState; @@ -195,7 +195,7 @@ public enum OlapTableState { * or ROLLUP). * The query plan which is generate during this state is invalid because the meta * during the creation of the logical plan and the physical plan might be inconsistent. - */ + */ UPDATING_META } @@ -552,11 +552,11 @@ public boolean isTemporaryTable() { return this.sessionId != null; } - public void checkAndSetName(String newName, boolean onlyCheck) throws DdlException { + public void checkAndSetName(String newName, boolean onlyCheck) { // check if rollup has same name for (String idxName : getIndexNameToId().keySet()) { if (idxName.equals(newName)) { - throw new DdlException("New name conflicts with rollup index name: " + idxName); + throw new SemanticException("New name conflicts with rollup index name: " + idxName); } } if (!onlyCheck) { @@ -680,10 +680,11 @@ public void setIndexMeta(long indexId, String indexName, List schema, in public boolean hasMaterializedView() { Optional partition = idToPartition.values().stream().findFirst(); - if (!partition.isPresent()) { + if (partition.isEmpty()) { return false; } else { - return partition.get().hasMaterializedView(); + PhysicalPartition physicalPartition = partition.get().getDefaultPhysicalPartition(); + return physicalPartition.hasMaterializedView(); } } @@ -944,11 +945,11 @@ public Status createTabletsForRestore(int tabletNum, MaterializedIndex index, Gl if (isColocate) { try { isColocate = GlobalStateMgr.getCurrentState().getColocateTableIndex() - .addTableToGroup(db, this, this.colocateGroup, false); + .addTableToGroup(db, this, this.colocateGroup, false); } catch (Exception e) { return new Status(ErrCode.COMMON_ERROR, - "check colocate restore failed, errmsg: " + e.getMessage() + - ", you can disable colocate restore by turn off Config.enable_colocate_restore"); + "check colocate restore failed, errmsg: " + e.getMessage() + + ", you can disable colocate restore by turn off Config.enable_colocate_restore"); } } @@ -966,14 +967,13 @@ public Status createTabletsForRestore(int tabletNum, MaterializedIndex index, Gl long newTabletId = globalStateMgr.getNextId(); LocalTablet newTablet = new LocalTablet(newTabletId); index.addTablet(newTablet, null /* tablet meta */, false/* update inverted index */); - // replicas List beIds; if (chooseBackendsArbitrary) { // This is the first colocate table in the group, or just a normal table, // randomly choose backends beIds = GlobalStateMgr.getCurrentState().getNodeMgr() - .getClusterInfo().getNodeSelector().seqChooseBackendIds(replicationNum, true, true, getLocation()); + .getClusterInfo().getNodeSelector().seqChooseBackendIds(replicationNum, true, true, getLocation()); backendsPerBucketSeq.add(beIds); } else { // get backends from existing backend sequence @@ -992,7 +992,7 @@ public Status createTabletsForRestore(int tabletNum, MaterializedIndex index, Gl newTablet.addReplica(replica, false/* update inverted index */); } Preconditions.checkState(beIds.size() == replicationNum, - beIds.size() + " vs. " + replicationNum); + beIds.size() + " vs. " + replicationNum); } // first colocate table in CG @@ -1236,8 +1236,8 @@ public Map> getRangePartitionMap() { /** * @return : table's partition name to list partition names. * eg: - * partition columns : (a, b, c) - * values : [[1, 2, 3], [4, 5, 6]] + * partition columns : (a, b, c) + * values : [[1, 2, 3], [4, 5, 6]] */ public Map getListPartitionItems() { Preconditions.checkState(partitionInfo instanceof ListPartitionInfo); @@ -1623,7 +1623,8 @@ public Collection getRecentPartitions(int recentPartitionNum) { Collections.sort(partitions, new Comparator() { @Override public int compare(Partition h1, Partition h2) { - return (int) (h2.getVisibleVersion() - h1.getVisibleVersion()); + return (int) (h2.getDefaultPhysicalPartition().getVisibleVersion() + - h1.getDefaultPhysicalPartition().getVisibleVersion()); } }); return partitions.subList(0, recentPartitionNum); @@ -1835,7 +1836,9 @@ public TTableDescriptor toThrift(List partitions) { public long getRowCount() { long rowCount = 0; for (Map.Entry entry : idToPartition.entrySet()) { - rowCount += entry.getValue().getBaseIndex().getRowCount(); + for (PhysicalPartition partition : entry.getValue().getSubPartitions()) { + rowCount += partition.getBaseIndex().getRowCount(); + } } return rowCount; } @@ -2251,7 +2254,7 @@ public List> getArbitraryTabletBucketsSeq() throws DdlException { List> backendsPerBucketSeq = Lists.newArrayList(); Optional optionalPartition = idToPartition.values().stream().findFirst(); if (optionalPartition.isPresent()) { - Partition partition = optionalPartition.get(); + PhysicalPartition partition = optionalPartition.get().getDefaultPhysicalPartition(); short replicationNum = partitionInfo.getReplicationNum(partition.getId()); MaterializedIndex baseIdx = partition.getBaseIndex(); for (Long tabletId : baseIdx.getTabletIdsInOrder()) { @@ -2906,7 +2909,7 @@ public Map buildBinlogAvailableVersion() { Collection partitions = getPartitions(); for (Partition partition : partitions) { result.put(TableProperty.BINLOG_PARTITION + partition.getId(), - String.valueOf(partition.getVisibleVersion())); + String.valueOf(partition.getDefaultPhysicalPartition().getVisibleVersion())); } return result; } @@ -3152,7 +3155,8 @@ public void removeTabletsFromInvertedIndex() { TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); Collection allPartitions = getAllPartitions(); for (Partition partition : allPartitions) { - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + for (MaterializedIndex index : partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { for (Tablet tablet : index.getTablets()) { invertedIndex.deleteTablet(tablet.getId()); } @@ -3167,7 +3171,7 @@ public void onDrop(Database db, boolean force, boolean replay) { // in recycle bin, // which make things easier. dropAllTempPartitions(); - LocalMetastore.inactiveRelatedMaterializedView(db, this, + StarRocksMetadata.inactiveRelatedMaterializedView(db, this, MaterializedViewExceptions.inactiveReasonForBaseTableNotExists(getName())); if (!replay && hasAutoIncrementColumn()) { sendDropAutoIncrementMapTask(); diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/Partition.java b/fe/fe-core/src/main/java/com/starrocks/catalog/Partition.java index b868ccdb3caeb8..f4082cbfe0b3af 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/Partition.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/Partition.java @@ -35,14 +35,11 @@ package com.starrocks.catalog; import com.google.common.base.Objects; -import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.gson.annotations.SerializedName; -import com.starrocks.catalog.MaterializedIndex.IndexExtState; -import com.starrocks.catalog.MaterializedIndex.IndexState; -import com.starrocks.common.FeConstants; import com.starrocks.persist.gson.GsonPostProcessable; +import com.starrocks.persist.gson.GsonUtils; import com.starrocks.server.GlobalStateMgr; import com.starrocks.transaction.TransactionType; import org.apache.logging.log4j.LogManager; @@ -50,15 +47,13 @@ import java.io.IOException; import java.util.Collection; -import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; /** * Internal representation of partition-related metadata. */ -public class Partition extends MetaObject implements PhysicalPartition, GsonPostProcessable { +public class Partition extends MetaObject implements GsonPostProcessable { private static final Logger LOG = LogManager.getLogger(Partition.class); public static final long PARTITION_INIT_VERSION = 1L; @@ -74,82 +69,28 @@ public enum PartitionState { @SerializedName(value = "id") private long id; - private long beforeRestoreId; - @SerializedName(value = "name") private String name; + @SerializedName(value = "state") private PartitionState state; + + @SerializedName(value = "dpid") + private long defaultPhysicalPartitionId; + @SerializedName(value = "idToSubPartition") - private Map idToSubPartition = Maps.newHashMap(); - private Map nameToSubPartition = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER); + private Map idToSubPartition = Maps.newHashMap(); + private Map nameToSubPartition = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER); @SerializedName(value = "distributionInfo") private DistributionInfo distributionInfo; - @SerializedName(value = "shardGroupId") - private long shardGroupId; - - /* Physical Partition Member */ - @SerializedName(value = "isImmutable") - private AtomicBoolean isImmutable = new AtomicBoolean(false); - - @SerializedName(value = "baseIndex") - private MaterializedIndex baseIndex; - /** - * Visible rollup indexes are indexes which are visible to user. - * User can do query on them, show them in related 'show' stmt. - */ - @SerializedName(value = "idToVisibleRollupIndex") - private Map idToVisibleRollupIndex = Maps.newHashMap(); - /** - * Shadow indexes are indexes which are not visible to user. - * Query will not run on these shadow indexes, and user can not see them neither. - * But load process will load data into these shadow indexes. - */ - @SerializedName(value = "idToShadowIndex") - private Map idToShadowIndex = Maps.newHashMap(); - - /** - * committed version(hash): after txn is committed, set committed version(hash) - * visible version(hash): after txn is published, set visible version - * next version(hash): next version is set after finished committing, it should equal to committed version + 1 - */ - - // not have committedVersion because committedVersion = nextVersion - 1 - @SerializedName(value = "visibleVersion") - private volatile long visibleVersion; - @SerializedName(value = "visibleVersionTime") - private volatile long visibleVersionTime; - @SerializedName(value = "nextVersion") - private volatile long nextVersion; - - /* - * in shared-nothing mode, data version is always equals to visible version - * in shared-data mode, compactions increase visible version but not data version - */ - @SerializedName(value = "dataVersion") - private volatile long dataVersion; - @SerializedName(value = "nextDataVersion") - private volatile long nextDataVersion; - - /* - * if the visible version and version epoch are unchanged, the data is unchanged - */ - @SerializedName(value = "versionEpoch") - private volatile long versionEpoch; - @SerializedName(value = "versionTxnType") - private volatile TransactionType versionTxnType; - - /** - * ID of the transaction that has committed current visible version. - * Just for tracing the txn log, no need to persist. - */ - private volatile long visibleTxnId = -1; - - private volatile long lastVacuumTime = 0; - - private volatile long minRetainVersion = 0; + public Partition(long id, String name, DistributionInfo distributionInfo) { + this.id = id; + this.name = name; + this.state = PartitionState.NORMAL; + this.distributionInfo = distributionInfo; + } private Partition() { } @@ -157,6 +98,12 @@ private Partition() { public Partition(long id, String name, MaterializedIndex baseIndex, DistributionInfo distributionInfo) { + this(id, name, baseIndex, distributionInfo, 0); + } + + public Partition(long id, String name, + MaterializedIndex baseIndex, + DistributionInfo distributionInfo, long shardGroupId) { this.id = id; this.name = name; this.state = PartitionState.NORMAL; @@ -171,15 +118,13 @@ public Partition(long id, String name, this.nextDataVersion = this.nextVersion; this.versionEpoch = this.nextVersionEpoch(); this.versionTxnType = TransactionType.TXN_NORMAL; - this.distributionInfo = distributionInfo; - } - - public Partition(long id, String name, - MaterializedIndex baseIndex, - DistributionInfo distributionInfo, long shardGroupId) { - this(id, name, baseIndex, distributionInfo); this.shardGroupId = shardGroupId; + + this.defaultPhysicalPartitionId = id; + PhysicalPartition physicalPartition = new PhysicalPartition(id, name, id, this.shardGroupId, baseIndex); + this.idToSubPartition.put(id, physicalPartition); + this.nameToSubPartition.put(name, physicalPartition); } public Partition shallowCopy() { @@ -201,77 +146,14 @@ public Partition shallowCopy() { partition.shardGroupId = this.shardGroupId; partition.idToSubPartition = Maps.newHashMap(this.idToSubPartition); partition.nameToSubPartition = Maps.newHashMap(this.nameToSubPartition); + partition.defaultPhysicalPartitionId = this.defaultPhysicalPartitionId; return partition; } - @Override - public void setIdForRestore(long id) { - this.beforeRestoreId = this.id; - this.id = id; - } - public long getId() { return this.id; } - @Override - public long getBeforeRestoreId() { - return beforeRestoreId; - } - - @Override - public void setImmutable(boolean isImmutable) { - this.isImmutable.set(isImmutable); - } - - @Override - public boolean isImmutable() { - return this.isImmutable.get(); - } - - public void addSubPartition(PhysicalPartition subPartition) { - if (subPartition instanceof PhysicalPartitionImpl) { - if (subPartition.getName() == null) { - subPartition.setName(generatePhysicalPartitionName(subPartition.getId())); - } - idToSubPartition.put(subPartition.getId(), (PhysicalPartitionImpl) subPartition); - nameToSubPartition.put(subPartition.getName(), (PhysicalPartitionImpl) subPartition); - } - } - - public void removeSubPartition(long id) { - PhysicalPartitionImpl subPartition = idToSubPartition.remove(id); - if (subPartition != null) { - nameToSubPartition.remove(subPartition.getName()); - } - } - - public Collection getSubPartitions() { - List subPartitions = idToSubPartition.values().stream().collect(Collectors.toList()); - subPartitions.add(this); - return subPartitions; - } - - public PhysicalPartition getSubPartition(long id) { - return this.id == id ? this : idToSubPartition.get(id); - } - - public PhysicalPartition getSubPartition(String name) { - return this.name.equals(name) ? this : nameToSubPartition.get(name); - } - - public long getParentId() { - return this.id; - } - - public void setParentId(long parentId) { - return; - } - - public long getShardGroupId() { - return this.shardGroupId; - } - public void setName(String newName) { this.name = newName; } @@ -284,58 +166,6 @@ public void setState(PartitionState state) { this.state = state; } - /* - * If a partition is overwritten by a restore job, we need to reset all version info to - * the restored partition version info) - */ - public void updateVersionForRestore(long visibleVersion) { - this.setVisibleVersion(visibleVersion); - this.nextVersion = this.visibleVersion + 1; - LOG.info("update partition {} version for restore: visible: {}, next: {}", - name, visibleVersion, nextVersion); - } - - public void updateVisibleVersion(long visibleVersion) { - updateVisibleVersion(visibleVersion, System.currentTimeMillis()); - } - - public void updateVisibleVersion(long visibleVersion, long visibleVersionTime) { - this.setVisibleVersion(visibleVersion, visibleVersionTime); - } - - public void updateVisibleVersion(long visibleVersion, long visibleVersionTime, long visibleTxnId) { - setVisibleVersion(visibleVersion, visibleVersionTime, visibleTxnId); - } - - public long getVisibleVersion() { - return visibleVersion; - } - - public long getVisibleVersionTime() { - return visibleVersionTime; - } - - // The method updateVisibleVersion is called when fe restart, the visibleVersionTime is updated - private void setVisibleVersion(long visibleVersion) { - this.visibleVersion = visibleVersion; - this.visibleVersionTime = System.currentTimeMillis(); - } - - public void setVisibleVersion(long visibleVersion, long visibleVersionTime) { - this.visibleVersion = visibleVersion; - this.visibleVersionTime = visibleVersionTime; - } - - public void setVisibleVersion(long visibleVersion, long visibleVersionTime, long visibleTxnId) { - this.visibleVersion = visibleVersion; - this.visibleVersionTime = visibleVersionTime; - this.visibleTxnId = visibleTxnId; - } - - public long getVisibleTxnId() { - return visibleTxnId; - } - public PartitionState getState() { return this.state; } @@ -348,129 +178,50 @@ public void setDistributionInfo(DistributionInfo distributionInfo) { this.distributionInfo = distributionInfo; } - public void createRollupIndex(MaterializedIndex mIndex) { - if (mIndex.getState().isVisible()) { - this.idToVisibleRollupIndex.put(mIndex.getId(), mIndex); - } else { - this.idToShadowIndex.put(mIndex.getId(), mIndex); - } + public String generatePhysicalPartitionName(long physicalParitionId) { + return this.name + '_' + physicalParitionId; } - public MaterializedIndex deleteRollupIndex(long indexId) { - if (this.idToVisibleRollupIndex.containsKey(indexId)) { - return idToVisibleRollupIndex.remove(indexId); - } else { - return idToShadowIndex.remove(indexId); + public void addSubPartition(PhysicalPartition subPartition) { + if (defaultPhysicalPartitionId == 0) { + defaultPhysicalPartitionId = subPartition.getId(); } + if (subPartition.getName() == null) { + subPartition.setName(generatePhysicalPartitionName(subPartition.getId())); + } + idToSubPartition.put(subPartition.getId(), subPartition); + nameToSubPartition.put(subPartition.getName(), subPartition); } - public void setBaseIndex(MaterializedIndex baseIndex) { - this.baseIndex = baseIndex; - } - - public MaterializedIndex getBaseIndex() { - return baseIndex; - } - - public long getNextVersion() { - return nextVersion; - } - - public void setNextVersion(long nextVersion) { - this.nextVersion = nextVersion; - } - - public long getCommittedVersion() { - return this.nextVersion - 1; - } - - public long getDataVersion() { - return dataVersion; - } - - public void setDataVersion(long dataVersion) { - this.dataVersion = dataVersion; - } - - public long getNextDataVersion() { - return nextDataVersion; - } - - public void setNextDataVersion(long nextDataVersion) { - this.nextDataVersion = nextDataVersion; - } - - public long getCommittedDataVersion() { - return this.nextDataVersion - 1; - } - - public long getVersionEpoch() { - return versionEpoch; - } - - public void setVersionEpoch(long versionEpoch) { - this.versionEpoch = versionEpoch; - } - - public long nextVersionEpoch() { - return GlobalStateMgr.getCurrentState().getGtidGenerator().nextGtid(); - } - - public TransactionType getVersionTxnType() { - return versionTxnType; + public void removeSubPartition(long id) { + PhysicalPartition subPartition = idToSubPartition.remove(id); + if (subPartition != null) { + nameToSubPartition.remove(subPartition.getName()); + } } - public void setVersionTxnType(TransactionType versionTxnType) { - this.versionTxnType = versionTxnType; + public Collection getSubPartitions() { + return Lists.newArrayList(idToSubPartition.values()); } - public MaterializedIndex getIndex(long indexId) { - if (baseIndex.getId() == indexId) { - return baseIndex; - } - if (idToVisibleRollupIndex.containsKey(indexId)) { - return idToVisibleRollupIndex.get(indexId); - } else { - return idToShadowIndex.get(indexId); - } + public PhysicalPartition getSubPartition(long id) { + return idToSubPartition.get(id); } - public List getMaterializedIndices(IndexExtState extState) { - int expectedSize = 1 + idToVisibleRollupIndex.size() + idToShadowIndex.size(); - List indices = Lists.newArrayListWithExpectedSize(expectedSize); - switch (extState) { - case ALL: - indices.add(baseIndex); - indices.addAll(idToVisibleRollupIndex.values()); - indices.addAll(idToShadowIndex.values()); - break; - case VISIBLE: - indices.add(baseIndex); - indices.addAll(idToVisibleRollupIndex.values()); - break; - case SHADOW: - indices.addAll(idToShadowIndex.values()); - default: - break; - } - return indices; + public PhysicalPartition getSubPartition(String name) { + return nameToSubPartition.get(name); } - @Override - public long getTabletMaxDataSize() { - long maxDataSize = 0; - for (MaterializedIndex mIndex : getMaterializedIndices(IndexExtState.VISIBLE)) { - maxDataSize = Math.max(maxDataSize, mIndex.getTabletMaxDataSize()); - } - return maxDataSize; + public PhysicalPartition getDefaultPhysicalPartition() { + return idToSubPartition.get(defaultPhysicalPartitionId); } - public long storageDataSize() { - long dataSize = 0; - for (MaterializedIndex mIndex : getMaterializedIndices(IndexExtState.VISIBLE)) { - dataSize += mIndex.getDataSize(); + public boolean hasData() { + boolean hasData = false; + for (PhysicalPartition subPartition : getSubPartitions()) { + hasData |= subPartition.hasStorageData(); } - return dataSize; + return hasData; } public long getDataSize() { @@ -481,29 +232,13 @@ public long getDataSize() { return dataSize; } - public long storageRowCount() { - long rowCount = 0; - for (MaterializedIndex mIndex : getMaterializedIndices(IndexExtState.VISIBLE)) { - rowCount += mIndex.getRowCount(); - } - return rowCount; - } - public long getRowCount() { long rowCount = 0; - for (PhysicalPartition subPartition : idToSubPartition.values()) { + for (PhysicalPartition subPartition : getSubPartitions()) { rowCount += subPartition.storageRowCount(); } - rowCount += this.storageRowCount(); - return rowCount; - } - public long storageReplicaCount() { - long replicaCount = 0; - for (MaterializedIndex mIndex : getMaterializedIndices(IndexExtState.VISIBLE)) { - replicaCount += mIndex.getReplicaCount(); - } - return replicaCount; + return rowCount; } public long getReplicaCount() { @@ -514,48 +249,14 @@ public long getReplicaCount() { return replicaCount; } - public boolean hasMaterializedView() { - return !idToVisibleRollupIndex.isEmpty(); - } - - public boolean hasStorageData() { - // The fe unit test need to check the selected index id without any data. - // So if set FeConstants.runningUnitTest, we can ensure that the number of partitions is not empty, - // And the test case can continue to execute the logic of 'select best roll up' - return ((visibleVersion != PARTITION_INIT_VERSION) - || FeConstants.runningUnitTest); - } - - public boolean hasData() { - boolean hasData = false; - for (PhysicalPartition subPartition : getSubPartitions()) { - hasData |= subPartition.hasStorageData(); - } - return hasData; - } - - public boolean isFirstLoad() { - return visibleVersion == PARTITION_INIT_VERSION + 1; - } + public void setIdForRestore(long id) { + PhysicalPartition physicalPartition = getDefaultPhysicalPartition(); + removeSubPartition(defaultPhysicalPartitionId); + physicalPartition.setIdForRestore(id); + addSubPartition(physicalPartition); - /* - * Change the index' state from SHADOW to NORMAL - * Also move it to idToVisibleRollupIndex if it is not the base index. - */ - public boolean visualiseShadowIndex(long shadowIndexId, boolean isBaseIndex) { - MaterializedIndex shadowIdx = idToShadowIndex.remove(shadowIndexId); - if (shadowIdx == null) { - return false; - } - Preconditions.checkState(!idToVisibleRollupIndex.containsKey(shadowIndexId), shadowIndexId); - shadowIdx.setState(IndexState.NORMAL); - if (isBaseIndex) { - baseIndex = shadowIdx; - } else { - idToVisibleRollupIndex.put(shadowIndexId, shadowIdx); - } - LOG.info("visualise the shadow index: {}", shadowIndexId); - return true; + this.id = id; + this.defaultPhysicalPartitionId = id; } @Override @@ -573,11 +274,7 @@ public boolean equals(Object obj) { } Partition partition = (Partition) obj; - return (id == partition.id) - && (visibleVersion == partition.visibleVersion) - && (baseIndex.equals(partition.baseIndex) - && distributionInfo.equals(partition.distributionInfo)) - && Objects.equal(idToVisibleRollupIndex, partition.idToVisibleRollupIndex); + return id == partition.id; } @Override @@ -599,11 +296,11 @@ public String toString() { } buffer.append("visibleVersion: ").append(visibleVersion).append("; "); - buffer.append("committedVersion: ").append(getCommittedVersion()).append("; "); + buffer.append("committedVersion: ").append(this.nextVersion - 1).append("; "); buffer.append("nextVersion: ").append(nextVersion).append("; "); buffer.append("dataVersion: ").append(dataVersion).append("; "); - buffer.append("committedDataVersion: ").append(getCommittedDataVersion()).append("; "); + buffer.append("committedDataVersion: ").append(this.nextDataVersion - 1).append("; "); buffer.append("versionEpoch: ").append(versionEpoch).append("; "); buffer.append("versionTxnType: ").append(versionTxnType).append("; "); @@ -614,26 +311,6 @@ public String toString() { return buffer.toString(); } - public long getLastVacuumTime() { - return lastVacuumTime; - } - - public void setLastVacuumTime(long lastVacuumTime) { - this.lastVacuumTime = lastVacuumTime; - } - - public long getMinRetainVersion() { - return minRetainVersion; - } - - public void setMinRetainVersion(long minRetainVersion) { - this.minRetainVersion = minRetainVersion; - } - - public String generatePhysicalPartitionName(long physicalParitionId) { - return this.name + '_' + physicalParitionId; - } - @Override public void gsonPostProcess() throws IOException { if (dataVersion == 0) { @@ -649,11 +326,80 @@ public void gsonPostProcess() throws IOException { versionTxnType = TransactionType.TXN_NORMAL; } - for (PhysicalPartitionImpl subPartition : idToSubPartition.values()) { + for (PhysicalPartition subPartition : idToSubPartition.values()) { if (subPartition.getName() == null) { subPartition.setName(generatePhysicalPartitionName(subPartition.getId())); } nameToSubPartition.put(subPartition.getName(), subPartition); } + + if (defaultPhysicalPartitionId == 0) { + defaultPhysicalPartitionId = id; + String partitionJson = GsonUtils.GSON.toJson(this); + PhysicalPartition physicalPartition = GsonUtils.GSON.fromJson(partitionJson, PhysicalPartition.class); + physicalPartition.setParentId(id); + idToSubPartition.put(id, physicalPartition); + nameToSubPartition.put(name, physicalPartition); + } + } + + /**************************************PhysicalPartition **********************************************/ + + @SerializedName(value = "shardGroupId") + private long shardGroupId; + + /* Physical Partition Member */ + @SerializedName(value = "isImmutable") + private AtomicBoolean isImmutable = new AtomicBoolean(false); + + @SerializedName(value = "baseIndex") + private MaterializedIndex baseIndex; + /** + * Visible rollup indexes are indexes which are visible to user. + * User can do query on them, show them in related 'show' stmt. + */ + @SerializedName(value = "idToVisibleRollupIndex") + private Map idToVisibleRollupIndex = Maps.newHashMap(); + /** + * Shadow indexes are indexes which are not visible to user. + * Query will not run on these shadow indexes, and user can not see them neither. + * But load process will load data into these shadow indexes. + */ + @SerializedName(value = "idToShadowIndex") + private Map idToShadowIndex = Maps.newHashMap(); + + /** + * committed version(hash): after txn is committed, set committed version(hash) + * visible version(hash): after txn is published, set visible version + * next version(hash): next version is set after finished committing, it should equal to committed version + 1 + */ + + // not have committedVersion because committedVersion = nextVersion - 1 + @SerializedName(value = "visibleVersion") + private volatile long visibleVersion; + @SerializedName(value = "visibleVersionTime") + private volatile long visibleVersionTime; + @SerializedName(value = "nextVersion") + private volatile long nextVersion; + + /* + * in shared-nothing mode, data version is always equals to visible version + * in shared-data mode, compactions increase visible version but not data version + */ + @SerializedName(value = "dataVersion") + private volatile long dataVersion; + @SerializedName(value = "nextDataVersion") + private volatile long nextDataVersion; + + /* + * if the visible version and version epoch are unchanged, the data is unchanged + */ + @SerializedName(value = "versionEpoch") + private volatile long versionEpoch; + @SerializedName(value = "versionTxnType") + private volatile TransactionType versionTxnType; + + public long nextVersionEpoch() { + return GlobalStateMgr.getCurrentState().getGtidGenerator().nextGtid(); } -} +} \ No newline at end of file diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/PhysicalPartition.java b/fe/fe-core/src/main/java/com/starrocks/catalog/PhysicalPartition.java index 8e524d9758c62e..abc56d11ba3502 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/PhysicalPartition.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/PhysicalPartition.java @@ -14,92 +14,472 @@ package com.starrocks.catalog; +import com.google.common.base.Objects; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.gson.annotations.SerializedName; import com.starrocks.catalog.MaterializedIndex.IndexExtState; +import com.starrocks.catalog.MaterializedIndex.IndexState; +import com.starrocks.common.FeConstants; +import com.starrocks.persist.gson.GsonPostProcessable; +import com.starrocks.server.GlobalStateMgr; import com.starrocks.transaction.TransactionType; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import java.io.IOException; import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.atomic.AtomicBoolean; -/* - * PhysicalPartition is the interface that describes the physical storage of a partition. - * It includes version information and one or more MaterializedIndexes. - * Each MaterializedIndex contains multiple tablets. +/** + * Physical Partition implementation */ -public interface PhysicalPartition { - - // partition id which contains this physical partition - public long getParentId(); - public void setParentId(long parentId); - - // physical partition id - public long getId(); - public String getName(); - public void setName(String name); - public void setIdForRestore(long id); - public long getBeforeRestoreId(); - - public long getShardGroupId(); - - public void setImmutable(boolean isImmutable); - public boolean isImmutable(); - - // version interface - - public void updateVersionForRestore(long visibleVersion); - - public void updateVisibleVersion(long visibleVersion); - - public void updateVisibleVersion(long visibleVersion, long visibleVersionTime); - public void updateVisibleVersion(long visibleVersion, long visibleVersionTime, long visibleTxnId); - public long getVisibleVersion(); - public long getVisibleVersionTime(); - public void setVisibleVersion(long visibleVersion, long visibleVersionTime); - public long getNextVersion(); - public void setNextVersion(long nextVersion); - public long getCommittedVersion(); - public long getDataVersion(); - public void setDataVersion(long dataVersion); - public long getNextDataVersion(); - public void setNextDataVersion(long nextDataVersion); - public long getCommittedDataVersion(); - public long getVersionEpoch(); - public void setVersionEpoch(long versionEpoch); - public long nextVersionEpoch(); - public TransactionType getVersionTxnType(); - public void setVersionTxnType(TransactionType versionTxnType); - public long getVisibleTxnId(); - - // materialized index interface - - public void createRollupIndex(MaterializedIndex mIndex); - public MaterializedIndex deleteRollupIndex(long indexId); - public void setBaseIndex(MaterializedIndex baseIndex); - public MaterializedIndex getBaseIndex(); - public MaterializedIndex getIndex(long indexId); - public List getMaterializedIndices(IndexExtState extState); +public class PhysicalPartition extends MetaObject implements GsonPostProcessable { + private static final Logger LOG = LogManager.getLogger(PhysicalPartition.class); + + public static final long PARTITION_INIT_VERSION = 1L; + + @SerializedName(value = "id") + private long id; + + @SerializedName(value = "name") + private String name; + + private long beforeRestoreId; + + @SerializedName(value = "parentId") + private long parentId; + + @SerializedName(value = "shardGroupId") + private long shardGroupId; + + /* Physical Partition Member */ + @SerializedName(value = "isImmutable") + private AtomicBoolean isImmutable = new AtomicBoolean(false); + + @SerializedName(value = "baseIndex") + private MaterializedIndex baseIndex; + /** + * Visible rollup indexes are indexes which are visible to user. + * User can do query on them, show them in related 'show' stmt. + */ + @SerializedName(value = "idToVisibleRollupIndex") + private Map idToVisibleRollupIndex = Maps.newHashMap(); + /** + * Shadow indexes are indexes which are not visible to user. + * Query will not run on these shadow indexes, and user can not see them neither. + * But load process will load data into these shadow indexes. + */ + @SerializedName(value = "idToShadowIndex") + private Map idToShadowIndex = Maps.newHashMap(); + + /** + * committed version(hash): after txn is committed, set committed version(hash) + * visible version(hash): after txn is published, set visible version + * next version(hash): next version is set after finished committing, it should equal to committed version + 1 + */ + + // not have committedVersion because committedVersion = nextVersion - 1 + @SerializedName(value = "visibleVersion") + private long visibleVersion; + @SerializedName(value = "visibleVersionTime") + private long visibleVersionTime; + @SerializedName(value = "nextVersion") + private long nextVersion; + + @SerializedName(value = "dataVersion") + private long dataVersion; + @SerializedName(value = "nextDataVersion") + private long nextDataVersion; + + @SerializedName(value = "versionEpoch") + private long versionEpoch; + @SerializedName(value = "versionTxnType") + private TransactionType versionTxnType; + /** + * ID of the transaction that has committed current visible version. + * Just for tracing the txn log, no need to persist. + */ + private long visibleTxnId = -1; + + private volatile long lastVacuumTime = 0; + + private volatile long minRetainVersion = 0; + + public PhysicalPartition(long id, String name, long parentId, long sharedGroupId, MaterializedIndex baseIndex) { + this.id = id; + this.name = name; + this.parentId = parentId; + this.baseIndex = baseIndex; + this.visibleVersion = PARTITION_INIT_VERSION; + this.visibleVersionTime = System.currentTimeMillis(); + this.nextVersion = this.visibleVersion + 1; + this.dataVersion = this.visibleVersion; + this.nextDataVersion = this.nextVersion; + this.versionEpoch = this.nextVersionEpoch(); + this.versionTxnType = TransactionType.TXN_NORMAL; + this.shardGroupId = sharedGroupId; + } + + public long getId() { + return this.id; + } + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + + public void setIdForRestore(long id) { + this.beforeRestoreId = this.id; + this.id = id; + } + + public long getBeforeRestoreId() { + return this.beforeRestoreId; + } + + public long getParentId() { + return this.parentId; + } + + public void setParentId(long parentId) { + this.parentId = parentId; + } + + public long getShardGroupId() { + return this.shardGroupId; + } + + public void setImmutable(boolean isImmutable) { + this.isImmutable.set(isImmutable); + } + + public boolean isImmutable() { + return this.isImmutable.get(); + } + + public long getLastVacuumTime() { + return lastVacuumTime; + } + + public void setLastVacuumTime(long lastVacuumTime) { + this.lastVacuumTime = lastVacuumTime; + } + + public long getMinRetainVersion() { + return minRetainVersion; + } + + public void setMinRetainVersion(long minRetainVersion) { + this.minRetainVersion = minRetainVersion; + } + + /* + * If a partition is overwritten by a restore job, we need to reset all version info to + * the restored partition version info) + */ + + public void updateVersionForRestore(long visibleVersion) { + this.setVisibleVersion(visibleVersion, System.currentTimeMillis()); + this.nextVersion = this.visibleVersion + 1; + LOG.info("update partition {} version for restore: visible: {}, next: {}", + id, visibleVersion, nextVersion); + } + + public void updateVisibleVersion(long visibleVersion) { + updateVisibleVersion(visibleVersion, System.currentTimeMillis()); + } + + public void updateVisibleVersion(long visibleVersion, long visibleVersionTime) { + this.setVisibleVersion(visibleVersion, visibleVersionTime); + } + + public void updateVisibleVersion(long visibleVersion, long visibleVersionTime, long visibleTxnId) { + setVisibleVersion(visibleVersion, visibleVersionTime); + this.visibleTxnId = visibleTxnId; + } + + public long getVisibleTxnId() { + return visibleTxnId; + } + + public long getVisibleVersion() { + return visibleVersion; + } + + public long getVisibleVersionTime() { + return visibleVersionTime; + } + + public void setVisibleVersion(long visibleVersion, long visibleVersionTime) { + this.visibleVersion = visibleVersion; + this.visibleVersionTime = visibleVersionTime; + } + + public void createRollupIndex(MaterializedIndex mIndex) { + if (mIndex.getState().isVisible()) { + this.idToVisibleRollupIndex.put(mIndex.getId(), mIndex); + } else { + this.idToShadowIndex.put(mIndex.getId(), mIndex); + } + } + + public MaterializedIndex deleteRollupIndex(long indexId) { + if (this.idToVisibleRollupIndex.containsKey(indexId)) { + return idToVisibleRollupIndex.remove(indexId); + } else { + return idToShadowIndex.remove(indexId); + } + } + + public void setBaseIndex(MaterializedIndex baseIndex) { + this.baseIndex = baseIndex; + } + + public MaterializedIndex getBaseIndex() { + return baseIndex; + } + + public long getNextVersion() { + return nextVersion; + } + + public void setNextVersion(long nextVersion) { + this.nextVersion = nextVersion; + } + + public long getCommittedVersion() { + return this.nextVersion - 1; + } + + public long getDataVersion() { + return dataVersion; + } + + public void setDataVersion(long dataVersion) { + this.dataVersion = dataVersion; + } + + public long getNextDataVersion() { + return nextDataVersion; + } + + public void setNextDataVersion(long nextDataVersion) { + this.nextDataVersion = nextDataVersion; + } + + public long getCommittedDataVersion() { + return this.nextDataVersion - 1; + } + + public long getVersionEpoch() { + return versionEpoch; + } + + public void setVersionEpoch(long versionEpoch) { + this.versionEpoch = versionEpoch; + } + + public long nextVersionEpoch() { + return GlobalStateMgr.getCurrentState().getGtidGenerator().nextGtid(); + } + + public TransactionType getVersionTxnType() { + return versionTxnType; + } + + public void setVersionTxnType(TransactionType versionTxnType) { + this.versionTxnType = versionTxnType; + } + + public MaterializedIndex getIndex(long indexId) { + if (baseIndex.getId() == indexId) { + return baseIndex; + } + if (idToVisibleRollupIndex.containsKey(indexId)) { + return idToVisibleRollupIndex.get(indexId); + } else { + return idToShadowIndex.get(indexId); + } + } + + public List getMaterializedIndices(IndexExtState extState) { + int expectedSize = 1 + idToVisibleRollupIndex.size() + idToShadowIndex.size(); + List indices = Lists.newArrayListWithExpectedSize(expectedSize); + switch (extState) { + case ALL: + indices.add(baseIndex); + indices.addAll(idToVisibleRollupIndex.values()); + indices.addAll(idToShadowIndex.values()); + break; + case VISIBLE: + indices.add(baseIndex); + indices.addAll(idToVisibleRollupIndex.values()); + break; + case SHADOW: + indices.addAll(idToShadowIndex.values()); + default: + break; + } + return indices; + } + + public long getTabletMaxDataSize() { + long maxDataSize = 0; + for (MaterializedIndex mIndex : getMaterializedIndices(IndexExtState.VISIBLE)) { + maxDataSize = Math.max(maxDataSize, mIndex.getTabletMaxDataSize()); + } + return maxDataSize; + } + + public long storageDataSize() { + long dataSize = 0; + for (MaterializedIndex mIndex : getMaterializedIndices(IndexExtState.VISIBLE)) { + dataSize += mIndex.getDataSize(); + } + return dataSize; + } + + public long storageRowCount() { + long rowCount = 0; + for (MaterializedIndex mIndex : getMaterializedIndices(IndexExtState.VISIBLE)) { + rowCount += mIndex.getRowCount(); + } + return rowCount; + } + + public long storageReplicaCount() { + long replicaCount = 0; + for (MaterializedIndex mIndex : getMaterializedIndices(IndexExtState.VISIBLE)) { + replicaCount += mIndex.getReplicaCount(); + } + return replicaCount; + } + + public boolean hasMaterializedView() { + return !idToVisibleRollupIndex.isEmpty(); + } + + public boolean hasStorageData() { + // The fe unit test need to check the selected index id without any data. + // So if set FeConstants.runningUnitTest, we can ensure that the number of partitions is not empty, + // And the test case can continue to execute the logic of 'select best roll up' + return ((visibleVersion != PARTITION_INIT_VERSION) + || FeConstants.runningUnitTest); + } + + public boolean isFirstLoad() { + return visibleVersion == PARTITION_INIT_VERSION + 1; + } + /* * Change the index' state from SHADOW to NORMAL + * Also move it to idToVisibleRollupIndex if it is not the base index. */ - public boolean visualiseShadowIndex(long shadowIndexId, boolean isBaseIndex); - - // statistic interface - - // max data size of one tablet in this physical partition - public long getTabletMaxDataSize(); - // partition data size reported by be, but may be not accurate - public long storageDataSize(); - // partition row count reported by be, but may be not accurate - public long storageRowCount(); - // partition replica count, it's accurate - public long storageReplicaCount(); - // has data judge by fe version, it's accurate - public boolean hasStorageData(); - public boolean hasMaterializedView(); - public boolean isFirstLoad(); - - // for lake partition - public long getMinRetainVersion(); - public void setMinRetainVersion(long minRetainVersion); - public long getLastVacuumTime(); - public void setLastVacuumTime(long lastVacuumTime); + public boolean visualiseShadowIndex(long shadowIndexId, boolean isBaseIndex) { + MaterializedIndex shadowIdx = idToShadowIndex.remove(shadowIndexId); + if (shadowIdx == null) { + return false; + } + Preconditions.checkState(!idToVisibleRollupIndex.containsKey(shadowIndexId), shadowIndexId); + shadowIdx.setState(IndexState.NORMAL); + if (isBaseIndex) { + baseIndex = shadowIdx; + } else { + idToVisibleRollupIndex.put(shadowIndexId, shadowIdx); + } + LOG.info("visualise the shadow index: {}", shadowIndexId); + return true; + } + + public int hashCode() { + return Objects.hashCode(visibleVersion, baseIndex); + } + + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof PhysicalPartition)) { + return false; + } + + PhysicalPartition partition = (PhysicalPartition) obj; + if (idToVisibleRollupIndex != partition.idToVisibleRollupIndex) { + if (idToVisibleRollupIndex.size() != partition.idToVisibleRollupIndex.size()) { + return false; + } + for (Entry entry : idToVisibleRollupIndex.entrySet()) { + long key = entry.getKey(); + if (!partition.idToVisibleRollupIndex.containsKey(key)) { + return false; + } + if (!entry.getValue().equals(partition.idToVisibleRollupIndex.get(key))) { + return false; + } + } + } + + return (visibleVersion == partition.visibleVersion) + && (baseIndex.equals(partition.baseIndex)); + } + + public String toString() { + StringBuilder buffer = new StringBuilder(); + buffer.append("partitionId: ").append(id).append("; "); + buffer.append("partitionName: ").append(name).append("; "); + buffer.append("parentPartitionId: ").append(parentId).append("; "); + buffer.append("shardGroupId: ").append(shardGroupId).append("; "); + buffer.append("isImmutable: ").append(isImmutable()).append("; "); + + buffer.append("baseIndex: ").append(baseIndex.toString()).append("; "); + + int rollupCount = (idToVisibleRollupIndex != null) ? idToVisibleRollupIndex.size() : 0; + buffer.append("rollupCount: ").append(rollupCount).append("; "); + + if (idToVisibleRollupIndex != null) { + for (Map.Entry entry : idToVisibleRollupIndex.entrySet()) { + buffer.append("rollupIndex: ").append(entry.getValue().toString()).append("; "); + } + } + + buffer.append("visibleVersion: ").append(visibleVersion).append("; "); + buffer.append("visibleVersionTime: ").append(visibleVersionTime).append("; "); + buffer.append("committedVersion: ").append(getCommittedVersion()).append("; "); + + buffer.append("dataVersion: ").append(dataVersion).append("; "); + buffer.append("committedDataVersion: ").append(getCommittedDataVersion()).append("; "); + + buffer.append("versionEpoch: ").append(versionEpoch).append("; "); + buffer.append("versionTxnType: ").append(versionTxnType).append("; "); + + buffer.append("storageDataSize: ").append(storageDataSize()).append("; "); + buffer.append("storageRowCount: ").append(storageRowCount()).append("; "); + buffer.append("storageReplicaCount: ").append(storageReplicaCount()).append("; "); + + return buffer.toString(); + } + + public void gsonPostProcess() throws IOException { + if (dataVersion == 0) { + dataVersion = visibleVersion; + } + if (nextDataVersion == 0) { + nextDataVersion = nextVersion; + } + if (versionEpoch == 0) { + versionEpoch = nextVersionEpoch(); + } + if (versionTxnType == null) { + versionTxnType = TransactionType.TXN_NORMAL; + } + } } diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/PhysicalPartitionImpl.java b/fe/fe-core/src/main/java/com/starrocks/catalog/PhysicalPartitionImpl.java deleted file mode 100644 index 0d69444053c9f8..00000000000000 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/PhysicalPartitionImpl.java +++ /dev/null @@ -1,533 +0,0 @@ -// Copyright 2021-present StarRocks, Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package com.starrocks.catalog; - -import com.google.common.base.Objects; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.gson.annotations.SerializedName; -import com.starrocks.catalog.MaterializedIndex.IndexExtState; -import com.starrocks.catalog.MaterializedIndex.IndexState; -import com.starrocks.common.FeConstants; -import com.starrocks.persist.gson.GsonPostProcessable; -import com.starrocks.server.GlobalStateMgr; -import com.starrocks.transaction.TransactionType; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * Physical Partition implementation - */ -public class PhysicalPartitionImpl extends MetaObject implements PhysicalPartition, GsonPostProcessable { - private static final Logger LOG = LogManager.getLogger(PhysicalPartitionImpl.class); - - public static final long PARTITION_INIT_VERSION = 1L; - - @SerializedName(value = "id") - private long id; - - @SerializedName(value = "name") - private String name; - - private long beforeRestoreId; - - @SerializedName(value = "parentId") - private long parentId; - - @SerializedName(value = "shardGroupId") - private long shardGroupId; - - /* Physical Partition Member */ - @SerializedName(value = "isImmutable") - private AtomicBoolean isImmutable = new AtomicBoolean(false); - - @SerializedName(value = "baseIndex") - private MaterializedIndex baseIndex; - /** - * Visible rollup indexes are indexes which are visible to user. - * User can do query on them, show them in related 'show' stmt. - */ - @SerializedName(value = "idToVisibleRollupIndex") - private Map idToVisibleRollupIndex = Maps.newHashMap(); - /** - * Shadow indexes are indexes which are not visible to user. - * Query will not run on these shadow indexes, and user can not see them neither. - * But load process will load data into these shadow indexes. - */ - @SerializedName(value = "idToShadowIndex") - private Map idToShadowIndex = Maps.newHashMap(); - - /** - * committed version(hash): after txn is committed, set committed version(hash) - * visible version(hash): after txn is published, set visible version - * next version(hash): next version is set after finished committing, it should equal to committed version + 1 - */ - - // not have committedVersion because committedVersion = nextVersion - 1 - @SerializedName(value = "visibleVersion") - private long visibleVersion; - @SerializedName(value = "visibleVersionTime") - private long visibleVersionTime; - @SerializedName(value = "nextVersion") - private long nextVersion; - - @SerializedName(value = "dataVersion") - private long dataVersion; - @SerializedName(value = "nextDataVersion") - private long nextDataVersion; - - @SerializedName(value = "versionEpoch") - private long versionEpoch; - @SerializedName(value = "versionTxnType") - private TransactionType versionTxnType; - /** - * ID of the transaction that has committed current visible version. - * Just for tracing the txn log, no need to persist. - */ - private long visibleTxnId = -1; - - private volatile long lastVacuumTime = 0; - - private volatile long minRetainVersion = 0; - - public PhysicalPartitionImpl(long id, String name, long parentId, long sharedGroupId, MaterializedIndex baseIndex) { - this.id = id; - this.name = name; - this.parentId = parentId; - this.baseIndex = baseIndex; - this.visibleVersion = PARTITION_INIT_VERSION; - this.visibleVersionTime = System.currentTimeMillis(); - this.nextVersion = this.visibleVersion + 1; - this.dataVersion = this.visibleVersion; - this.nextDataVersion = this.nextVersion; - this.versionEpoch = this.nextVersionEpoch(); - this.versionTxnType = TransactionType.TXN_NORMAL; - this.shardGroupId = sharedGroupId; - } - - @Override - public long getId() { - return this.id; - } - - @Override - public String getName() { - return this.name; - } - - @Override - public void setName(String name) { - this.name = name; - } - - @Override - public void setIdForRestore(long id) { - this.beforeRestoreId = this.id; - this.id = id; - } - - @Override - public long getBeforeRestoreId() { - return this.beforeRestoreId; - } - - @Override - public long getParentId() { - return this.parentId; - } - - @Override - public void setParentId(long parentId) { - this.parentId = parentId; - } - - @Override - public long getShardGroupId() { - return this.shardGroupId; - } - - @Override - public void setImmutable(boolean isImmutable) { - this.isImmutable.set(isImmutable); - } - - @Override - public boolean isImmutable() { - return this.isImmutable.get(); - } - - @Override - public long getLastVacuumTime() { - return lastVacuumTime; - } - - @Override - public void setLastVacuumTime(long lastVacuumTime) { - this.lastVacuumTime = lastVacuumTime; - } - - @Override - public long getMinRetainVersion() { - return minRetainVersion; - } - - @Override - public void setMinRetainVersion(long minRetainVersion) { - this.minRetainVersion = minRetainVersion; - } - - /* - * If a partition is overwritten by a restore job, we need to reset all version info to - * the restored partition version info) - */ - @Override - public void updateVersionForRestore(long visibleVersion) { - this.setVisibleVersion(visibleVersion, System.currentTimeMillis()); - this.nextVersion = this.visibleVersion + 1; - LOG.info("update partition {} version for restore: visible: {}, next: {}", - id, visibleVersion, nextVersion); - } - - @Override - public void updateVisibleVersion(long visibleVersion) { - updateVisibleVersion(visibleVersion, System.currentTimeMillis()); - } - - @Override - public void updateVisibleVersion(long visibleVersion, long visibleVersionTime) { - this.setVisibleVersion(visibleVersion, visibleVersionTime); - } - - @Override - public void updateVisibleVersion(long visibleVersion, long visibleVersionTime, long visibleTxnId) { - setVisibleVersion(visibleVersion, visibleVersionTime); - this.visibleTxnId = visibleTxnId; - } - - @Override - public long getVisibleTxnId() { - return visibleTxnId; - } - - @Override - public long getVisibleVersion() { - return visibleVersion; - } - - @Override - public long getVisibleVersionTime() { - return visibleVersionTime; - } - - @Override - public void setVisibleVersion(long visibleVersion, long visibleVersionTime) { - this.visibleVersion = visibleVersion; - this.visibleVersionTime = visibleVersionTime; - } - - @Override - public void createRollupIndex(MaterializedIndex mIndex) { - if (mIndex.getState().isVisible()) { - this.idToVisibleRollupIndex.put(mIndex.getId(), mIndex); - } else { - this.idToShadowIndex.put(mIndex.getId(), mIndex); - } - } - - @Override - public MaterializedIndex deleteRollupIndex(long indexId) { - if (this.idToVisibleRollupIndex.containsKey(indexId)) { - return idToVisibleRollupIndex.remove(indexId); - } else { - return idToShadowIndex.remove(indexId); - } - } - - @Override - public void setBaseIndex(MaterializedIndex baseIndex) { - this.baseIndex = baseIndex; - } - - @Override - public MaterializedIndex getBaseIndex() { - return baseIndex; - } - - @Override - public long getNextVersion() { - return nextVersion; - } - - @Override - public void setNextVersion(long nextVersion) { - this.nextVersion = nextVersion; - } - - @Override - public long getCommittedVersion() { - return this.nextVersion - 1; - } - - @Override - public long getDataVersion() { - return dataVersion; - } - - @Override - public void setDataVersion(long dataVersion) { - this.dataVersion = dataVersion; - } - - @Override - public long getNextDataVersion() { - return nextDataVersion; - } - - @Override - public void setNextDataVersion(long nextDataVersion) { - this.nextDataVersion = nextDataVersion; - } - - @Override - public long getCommittedDataVersion() { - return this.nextDataVersion - 1; - } - - @Override - public long getVersionEpoch() { - return versionEpoch; - } - - @Override - public void setVersionEpoch(long versionEpoch) { - this.versionEpoch = versionEpoch; - } - - @Override - public long nextVersionEpoch() { - return GlobalStateMgr.getCurrentState().getGtidGenerator().nextGtid(); - } - - public TransactionType getVersionTxnType() { - return versionTxnType; - } - - public void setVersionTxnType(TransactionType versionTxnType) { - this.versionTxnType = versionTxnType; - } - - @Override - public MaterializedIndex getIndex(long indexId) { - if (baseIndex.getId() == indexId) { - return baseIndex; - } - if (idToVisibleRollupIndex.containsKey(indexId)) { - return idToVisibleRollupIndex.get(indexId); - } else { - return idToShadowIndex.get(indexId); - } - } - - @Override - public List getMaterializedIndices(IndexExtState extState) { - List indices = Lists.newArrayList(); - switch (extState) { - case ALL: - indices.add(baseIndex); - indices.addAll(idToVisibleRollupIndex.values()); - indices.addAll(idToShadowIndex.values()); - break; - case VISIBLE: - indices.add(baseIndex); - indices.addAll(idToVisibleRollupIndex.values()); - break; - case SHADOW: - indices.addAll(idToShadowIndex.values()); - default: - break; - } - return indices; - } - - @Override - public long getTabletMaxDataSize() { - long maxDataSize = 0; - for (MaterializedIndex mIndex : getMaterializedIndices(IndexExtState.VISIBLE)) { - maxDataSize = Math.max(maxDataSize, mIndex.getTabletMaxDataSize()); - } - return maxDataSize; - } - - @Override - public long storageDataSize() { - long dataSize = 0; - for (MaterializedIndex mIndex : getMaterializedIndices(IndexExtState.VISIBLE)) { - dataSize += mIndex.getDataSize(); - } - return dataSize; - } - - @Override - public long storageRowCount() { - long rowCount = 0; - for (MaterializedIndex mIndex : getMaterializedIndices(IndexExtState.VISIBLE)) { - rowCount += mIndex.getRowCount(); - } - return rowCount; - } - - @Override - public long storageReplicaCount() { - long replicaCount = 0; - for (MaterializedIndex mIndex : getMaterializedIndices(IndexExtState.VISIBLE)) { - replicaCount += mIndex.getReplicaCount(); - } - return replicaCount; - } - - @Override - public boolean hasMaterializedView() { - return !idToVisibleRollupIndex.isEmpty(); - } - - @Override - public boolean hasStorageData() { - // The fe unit test need to check the selected index id without any data. - // So if set FeConstants.runningUnitTest, we can ensure that the number of partitions is not empty, - // And the test case can continue to execute the logic of 'select best roll up' - return ((visibleVersion != PARTITION_INIT_VERSION) - || FeConstants.runningUnitTest); - } - - @Override - public boolean isFirstLoad() { - return visibleVersion == PARTITION_INIT_VERSION + 1; - } - - /* - * Change the index' state from SHADOW to NORMAL - * Also move it to idToVisibleRollupIndex if it is not the base index. - */ - @Override - public boolean visualiseShadowIndex(long shadowIndexId, boolean isBaseIndex) { - MaterializedIndex shadowIdx = idToShadowIndex.remove(shadowIndexId); - if (shadowIdx == null) { - return false; - } - Preconditions.checkState(!idToVisibleRollupIndex.containsKey(shadowIndexId), shadowIndexId); - shadowIdx.setState(IndexState.NORMAL); - if (isBaseIndex) { - baseIndex = shadowIdx; - } else { - idToVisibleRollupIndex.put(shadowIndexId, shadowIdx); - } - LOG.info("visualise the shadow index: {}", shadowIndexId); - return true; - } - - @Override - public int hashCode() { - return Objects.hashCode(visibleVersion, baseIndex); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!(obj instanceof PhysicalPartitionImpl)) { - return false; - } - - PhysicalPartitionImpl partition = (PhysicalPartitionImpl) obj; - if (idToVisibleRollupIndex != partition.idToVisibleRollupIndex) { - if (idToVisibleRollupIndex.size() != partition.idToVisibleRollupIndex.size()) { - return false; - } - for (Entry entry : idToVisibleRollupIndex.entrySet()) { - long key = entry.getKey(); - if (!partition.idToVisibleRollupIndex.containsKey(key)) { - return false; - } - if (!entry.getValue().equals(partition.idToVisibleRollupIndex.get(key))) { - return false; - } - } - } - - return (visibleVersion == partition.visibleVersion) - && (baseIndex.equals(partition.baseIndex)); - } - - @Override - public String toString() { - StringBuilder buffer = new StringBuilder(); - buffer.append("partitionId: ").append(id).append("; "); - buffer.append("partitionName: ").append(name).append("; "); - buffer.append("parentPartitionId: ").append(parentId).append("; "); - buffer.append("shardGroupId: ").append(shardGroupId).append("; "); - buffer.append("isImmutable: ").append(isImmutable()).append("; "); - - buffer.append("baseIndex: ").append(baseIndex.toString()).append("; "); - - int rollupCount = (idToVisibleRollupIndex != null) ? idToVisibleRollupIndex.size() : 0; - buffer.append("rollupCount: ").append(rollupCount).append("; "); - - if (idToVisibleRollupIndex != null) { - for (Map.Entry entry : idToVisibleRollupIndex.entrySet()) { - buffer.append("rollupIndex: ").append(entry.getValue().toString()).append("; "); - } - } - - buffer.append("visibleVersion: ").append(visibleVersion).append("; "); - buffer.append("visibleVersionTime: ").append(visibleVersionTime).append("; "); - buffer.append("committedVersion: ").append(getCommittedVersion()).append("; "); - - buffer.append("dataVersion: ").append(dataVersion).append("; "); - buffer.append("committedDataVersion: ").append(getCommittedDataVersion()).append("; "); - - buffer.append("versionEpoch: ").append(versionEpoch).append("; "); - buffer.append("versionTxnType: ").append(versionTxnType).append("; "); - - buffer.append("storageDataSize: ").append(storageDataSize()).append("; "); - buffer.append("storageRowCount: ").append(storageRowCount()).append("; "); - buffer.append("storageReplicaCount: ").append(storageReplicaCount()).append("; "); - - return buffer.toString(); - } - - @Override - public void gsonPostProcess() throws IOException { - if (dataVersion == 0) { - dataVersion = visibleVersion; - } - if (nextDataVersion == 0) { - nextDataVersion = nextVersion; - } - if (versionEpoch == 0) { - versionEpoch = nextVersionEpoch(); - } - if (versionTxnType == null) { - versionTxnType = TransactionType.TXN_NORMAL; - } - } -} diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/RecyclePartitionInfo.java b/fe/fe-core/src/main/java/com/starrocks/catalog/RecyclePartitionInfo.java index ae8be23fbe2e69..fe3d4d7a92522f 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/RecyclePartitionInfo.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/RecyclePartitionInfo.java @@ -91,7 +91,7 @@ public void setRecoverable(boolean recoverable) { } public boolean delete() { - GlobalStateMgr.getCurrentState().getLocalMetastore().onErasePartition(partition); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().onErasePartition(partition); return true; } diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/TabletInvertedIndex.java b/fe/fe-core/src/main/java/com/starrocks/catalog/TabletInvertedIndex.java index 0a8667e0931b6f..2f8584067b7c54 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/TabletInvertedIndex.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/TabletInvertedIndex.java @@ -57,6 +57,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; +import static com.starrocks.server.GlobalStateMgr.isCheckpointThread; + /* * this class stores an inverted index * key is tablet id. value is the related ids of this tablet @@ -432,11 +434,52 @@ public void clear() { } } + public void recreateTabletInvertIndex() { + if (isCheckpointThread()) { + return; + } + + // create inverted index + TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); + for (Database db : GlobalStateMgr.getCurrentState().getLocalMetastore().getFullNameToDb().values()) { + long dbId = db.getId(); + for (com.starrocks.catalog.Table table : db.getTables()) { + if (!table.isNativeTableOrMaterializedView()) { + continue; + } + + OlapTable olapTable = (OlapTable) table; + long tableId = olapTable.getId(); + for (PhysicalPartition partition : olapTable.getAllPhysicalPartitions()) { + long physicalPartitionId = partition.getId(); + TStorageMedium medium = olapTable.getPartitionInfo().getDataProperty( + partition.getParentId()).getStorageMedium(); + for (MaterializedIndex index : partition + .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + long indexId = index.getId(); + int schemaHash = olapTable.getSchemaHashByIndexId(indexId); + TabletMeta tabletMeta = new TabletMeta(dbId, tableId, partition.getParentId(), physicalPartitionId, + indexId, schemaHash, medium, table.isCloudNativeTableOrMaterializedView()); + for (Tablet tablet : index.getTablets()) { + long tabletId = tablet.getId(); + invertedIndex.addTablet(tabletId, tabletMeta); + if (table.isOlapTableOrMaterializedView()) { + for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { + invertedIndex.addReplica(tabletId, replica); + } + } + } + } // end for indices + } // end for partitions + } // end for tables + } // end for dbs + } + @Override public Map estimateCount() { return ImmutableMap.of("TabletMeta", (long) tabletMetaMap.size(), - "TabletCount", getTabletCount(), - "ReplicateCount", getReplicaCount()); + "TabletCount", getTabletCount(), + "ReplicateCount", getReplicaCount()); } @Override diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/TabletMeta.java b/fe/fe-core/src/main/java/com/starrocks/catalog/TabletMeta.java index 9743b026d0cf79..b12df5527ea8e1 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/TabletMeta.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/TabletMeta.java @@ -44,6 +44,7 @@ public class TabletMeta { private final long partitionId; private final long physicalPartitionId; private final long indexId; + private final long tabletId = 0; private final int oldSchemaHash; private final int newSchemaHash; diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/TabletStatMgr.java b/fe/fe-core/src/main/java/com/starrocks/catalog/TabletStatMgr.java index ddaa5de0aef417..3dc482efbc27d1 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/TabletStatMgr.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/TabletStatMgr.java @@ -119,8 +119,12 @@ protected void runAfterCatalogReady() { Map indexRowCountMap = Maps.newHashMap(); try { OlapTable olapTable = (OlapTable) table; - for (Partition partition : olapTable.getAllPartitions()) { - for (PhysicalPartition physicalPartition : partition.getSubPartitions()) { + List partitionList = GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .getAllPartitions(db, olapTable); + for (Partition partition : partitionList) { + List physicalPartitionList = GlobalStateMgr.getCurrentState().getLocalMetastore() + .getAllPhysicalPartition(partition); + for (PhysicalPartition physicalPartition : physicalPartitionList) { long version = physicalPartition.getVisibleVersion(); for (MaterializedIndex index : physicalPartition.getMaterializedIndices( IndexExtState.VISIBLE)) { diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/TempPartitions.java b/fe/fe-core/src/main/java/com/starrocks/catalog/TempPartitions.java index 8a3e82335eba57..2a30b15b407417 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/TempPartitions.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/TempPartitions.java @@ -85,7 +85,8 @@ public void dropPartition(String partitionName, boolean needDropTablet) { nameToPartition.remove(partitionName); if (!GlobalStateMgr.isCheckpointThread() && needDropTablet) { TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); - for (MaterializedIndex index : partition.getMaterializedIndices(IndexExtState.ALL)) { + for (MaterializedIndex index : partition.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.ALL)) { for (Tablet tablet : index.getTablets()) { invertedIndex.deleteTablet(tablet.getId()); } diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/mv/MVTimelinessArbiter.java b/fe/fe-core/src/main/java/com/starrocks/catalog/mv/MVTimelinessArbiter.java index 8a784674e500f5..d796ad17ff3235 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/mv/MVTimelinessArbiter.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/mv/MVTimelinessArbiter.java @@ -199,7 +199,7 @@ protected void addEmptyPartitionsToRefresh(MvUpdateInfo mvUpdateInfo) { return; } mv.getRangePartitionMap().keySet().forEach(mvPartitionName -> { - if (!mv.getPartition(mvPartitionName).hasStorageData()) { + if (!mv.getPartition(mvPartitionName).getDefaultPhysicalPartition().hasStorageData()) { // add empty partitions mvUpdateInfo.addMvToRefreshPartitionNames(mvPartitionName); } diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/mv/MVTimelinessNonPartitionArbiter.java b/fe/fe-core/src/main/java/com/starrocks/catalog/mv/MVTimelinessNonPartitionArbiter.java index 3b5375b58f5e06..ce7c5e75b1fe01 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/mv/MVTimelinessNonPartitionArbiter.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/mv/MVTimelinessNonPartitionArbiter.java @@ -83,7 +83,7 @@ protected MvUpdateInfo getMVTimelinessUpdateInfoInChecked() { @Override protected MvUpdateInfo getMVTimelinessUpdateInfoInLoose() { List partitions = Lists.newArrayList(mv.getPartitions()); - if (partitions.size() > 0 && partitions.get(0).getVisibleVersion() <= 1) { + if (partitions.size() > 0 && partitions.get(0).getDefaultPhysicalPartition().getVisibleVersion() <= 1) { // the mv is newly created, can not use it to rewrite query. return new MvUpdateInfo(MvUpdateInfo.MvToRefreshType.FULL); } diff --git a/fe/fe-core/src/main/java/com/starrocks/clone/ColocateTableBalancer.java b/fe/fe-core/src/main/java/com/starrocks/clone/ColocateTableBalancer.java index 6c2d3c00c0e21d..4234fe11030f3e 100644 --- a/fe/fe-core/src/main/java/com/starrocks/clone/ColocateTableBalancer.java +++ b/fe/fe-core/src/main/java/com/starrocks/clone/ColocateTableBalancer.java @@ -328,7 +328,7 @@ private boolean relocateAndBalancePerGroup() { Set toIgnoreGroupIds = new HashSet<>(); boolean isAnyGroupChanged = false; for (GroupId groupId : groupIds) { - Database db = globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(groupId.dbId); + Database db = globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(groupId.dbId); if (db == null) { continue; } @@ -726,7 +726,7 @@ private ColocateMatchResult doMatchOneGroup(GroupId groupId, long lockTotalTime = 0; long waitTotalTimeMs = 0; List tableIds = colocateIndex.getAllTableIds(groupId); - Database db = globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(groupId.dbId); + Database db = globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(groupId.dbId); if (db == null) { return new ColocateMatchResult(lockTotalTime, Status.UNKNOWN); } @@ -746,7 +746,7 @@ private ColocateMatchResult doMatchOneGroup(GroupId groupId, try { TABLE: for (Long tableId : tableIds) { - OlapTable olapTable = (OlapTable) globalStateMgr.getLocalMetastore().getTableIncludeRecycleBin(db, tableId); + OlapTable olapTable = (OlapTable) globalStateMgr.getStarRocksMetadata().getTableIncludeRecycleBin(db, tableId); if (olapTable == null || !colocateIndex.isColocateTable(olapTable.getId())) { continue; } @@ -755,7 +755,7 @@ private ColocateMatchResult doMatchOneGroup(GroupId groupId, continue; } - for (Partition partition : globalStateMgr.getLocalMetastore().getPartitionsIncludeRecycleBin(olapTable)) { + for (Partition partition : globalStateMgr.getStarRocksMetadata().getPartitionsIncludeRecycleBin(olapTable)) { partitionChecked++; boolean isPartitionUrgent = @@ -771,28 +771,29 @@ private ColocateMatchResult doMatchOneGroup(GroupId groupId, locker.unLockDatabase(db.getId(), LockType.READ); locker.lockDatabase(db.getId(), LockType.READ); lockStart = System.nanoTime(); - if (globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(groupId.dbId) == null) { + if (globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(groupId.dbId) == null) { return new ColocateMatchResult(lockTotalTime, Status.UNKNOWN); } - if (globalStateMgr.getLocalMetastore().getTableIncludeRecycleBin(db, olapTable.getId()) == null) { + if (globalStateMgr.getStarRocksMetadata().getTableIncludeRecycleBin(db, olapTable.getId()) == null) { continue TABLE; } - if (globalStateMgr.getLocalMetastore().getPartitionIncludeRecycleBin(olapTable, partition.getId()) == + if (globalStateMgr.getStarRocksMetadata().getPartitionIncludeRecycleBin(olapTable, partition.getId()) == null) { continue; } } short replicationNum = - globalStateMgr.getLocalMetastore().getReplicationNumIncludeRecycleBin(olapTable.getPartitionInfo(), - partition.getId()); + globalStateMgr.getStarRocksMetadata() + .getReplicationNumIncludeRecycleBin(olapTable.getPartitionInfo(), partition.getId()); if (replicationNum == (short) -1) { continue; } - long visibleVersion = partition.getVisibleVersion(); + long visibleVersion = partition.getDefaultPhysicalPartition().getVisibleVersion(); // Here we only get VISIBLE indexes. All other indexes are not queryable. // So it does not matter if tablets of other indexes are not matched. - for (MaterializedIndex index : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { + for (MaterializedIndex index : partition.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.VISIBLE)) { Preconditions.checkState(backendBucketsSeq.size() == index.getTablets().size(), backendBucketsSeq.size() + " v.s. " + index.getTablets().size()); int idx = 0; diff --git a/fe/fe-core/src/main/java/com/starrocks/clone/DiskAndTabletLoadReBalancer.java b/fe/fe-core/src/main/java/com/starrocks/clone/DiskAndTabletLoadReBalancer.java index 66e90822f6f9bf..6523441e810df4 100644 --- a/fe/fe-core/src/main/java/com/starrocks/clone/DiskAndTabletLoadReBalancer.java +++ b/fe/fe-core/src/main/java/com/starrocks/clone/DiskAndTabletLoadReBalancer.java @@ -37,6 +37,7 @@ import com.starrocks.clone.BackendLoadStatistic.Classification; import com.starrocks.common.Config; import com.starrocks.common.Pair; +import com.starrocks.common.util.concurrent.lock.AutoCloseableLock; import com.starrocks.common.util.concurrent.lock.LockType; import com.starrocks.common.util.concurrent.lock.Locker; import com.starrocks.server.GlobalStateMgr; @@ -706,7 +707,7 @@ private List balanceBackendDisk(ClusterLoadStatistic clusterStat private OlapTable getOlapTableById(long dbId, long tblId) { GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); - Database db = globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(dbId); + Database db = globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(dbId); if (db == null) { return null; } @@ -714,7 +715,7 @@ private OlapTable getOlapTableById(long dbId, long tblId) { Locker locker = new Locker(); try { locker.lockDatabase(db.getId(), LockType.READ); - return (OlapTable) globalStateMgr.getLocalMetastore().getTableIncludeRecycleBin(db, tblId); + return (OlapTable) globalStateMgr.getStarRocksMetadata().getTableIncludeRecycleBin(db, tblId); } finally { locker.unLockDatabase(db.getId(), LockType.READ); } @@ -1005,7 +1006,7 @@ private Map, Double> getPartitionAvgReplicaSize(long beId, private int getPartitionTabletNumOnBePath(long dbId, long tableId, long partitionId, long indexId, long beId, long pathHash) { GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); - Database db = globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(dbId); + Database db = globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(dbId); if (db == null) { return 0; } @@ -1013,12 +1014,12 @@ private int getPartitionTabletNumOnBePath(long dbId, long tableId, long partitio Locker locker = new Locker(); locker.lockDatabase(db.getId(), LockType.READ); try { - OlapTable table = (OlapTable) globalStateMgr.getLocalMetastore().getTableIncludeRecycleBin(db, tableId); + OlapTable table = (OlapTable) globalStateMgr.getStarRocksMetadata().getTableIncludeRecycleBin(db, tableId); if (table == null) { return 0; } - Partition partition = globalStateMgr.getLocalMetastore().getPartitionIncludeRecycleBin(table, partitionId); + Partition partition = globalStateMgr.getStarRocksMetadata().getPartitionIncludeRecycleBin(table, partitionId); if (partition == null) { return 0; } @@ -1341,7 +1342,7 @@ private boolean isDestBackendLocationMismatch(OlapTable olapTable, long partitionId, long tabletId) { short replicationFactor = - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getReplicationNumIncludeRecycleBin(olapTable.getPartitionInfo(), partitionId); if (replicationFactor == (short) -1) { return true; @@ -1451,14 +1452,14 @@ private List>> getPartitionTablets(Long dbId, Long tableId, List>> result = Lists.newArrayList(); GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); - Database db = globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(dbId); + Database db = globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(dbId); if (db == null) { return result; } Locker locker = new Locker(); try { locker.lockDatabase(db.getId(), LockType.READ); - OlapTable table = (OlapTable) globalStateMgr.getLocalMetastore().getTableIncludeRecycleBin(db, tableId); + OlapTable table = (OlapTable) globalStateMgr.getStarRocksMetadata().getTableIncludeRecycleBin(db, tableId); if (table == null) { return result; } @@ -1467,7 +1468,7 @@ private List>> getPartitionTablets(Long dbId, Long tableId, return result; } - Partition partition = globalStateMgr.getLocalMetastore().getPartitionIncludeRecycleBin(table, partitionId); + Partition partition = globalStateMgr.getStarRocksMetadata().getPartitionIncludeRecycleBin(table, partitionId); if (partition == null) { return result; } @@ -1538,31 +1539,25 @@ private List>> getPartitionTablets(Long dbId, Long tableId, private boolean isTabletUnhealthy(long dbId, OlapTable olapTable, Long tabletId, TabletMeta tabletMeta, List aliveBeIds) { GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); - Database db = globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(dbId); - if (db == null) { - return false; - } - - Locker locker = new Locker(); - try { - locker.lockDatabase(db.getId(), LockType.READ); - Partition partition = globalStateMgr.getLocalMetastore() + try (AutoCloseableLock ignore = + new AutoCloseableLock(new Locker(), dbId, Lists.newArrayList(olapTable.getId()), LockType.READ)) { + Partition partition = globalStateMgr.getStarRocksMetadata() .getPartitionIncludeRecycleBin(olapTable, tabletMeta.getPartitionId()); if (partition == null) { return true; } - MaterializedIndex index = partition.getIndex(tabletMeta.getIndexId()); + MaterializedIndex index = partition.getDefaultPhysicalPartition().getIndex(tabletMeta.getIndexId()); if (index == null) { return true; } - LocalTablet tablet = (LocalTablet) index.getTablet(tabletId); + LocalTablet tablet = (LocalTablet) GlobalStateMgr.getCurrentState().getLocalMetastore().getTablet(index, tabletId); if (tablet == null) { return true; } - short replicaNum = globalStateMgr.getLocalMetastore() + short replicaNum = globalStateMgr.getStarRocksMetadata() .getReplicationNumIncludeRecycleBin(olapTable.getPartitionInfo(), partition.getId()); if (replicaNum == (short) -1) { return true; @@ -1572,15 +1567,13 @@ private boolean isTabletUnhealthy(long dbId, OlapTable olapTable, Long tabletId, TabletChecker.getTabletHealthStatusWithPriority( tablet, globalStateMgr.getNodeMgr().getClusterInfo(), - partition.getVisibleVersion(), + partition.getDefaultPhysicalPartition().getVisibleVersion(), replicaNum, aliveBeIds, olapTable.getLocation()); return statusPair.first != LocalTablet.TabletHealthStatus.LOCATION_MISMATCH && statusPair.first != LocalTablet.TabletHealthStatus.HEALTHY; - } finally { - locker.unLockDatabase(db.getId(), LockType.READ); } } @@ -1607,10 +1600,10 @@ private Map, PartitionStat> getPartitionStats(TStorageMedium me long start = System.nanoTime(); long lockTotalTime = 0; long lockStart; - List dbIds = globalStateMgr.getLocalMetastore().getDbIdsIncludeRecycleBin(); + List dbIds = globalStateMgr.getStarRocksMetadata().getDbIdsIncludeRecycleBin(); DATABASE: for (Long dbId : dbIds) { - Database db = globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(dbId); + Database db = globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(dbId); if (db == null) { continue; } @@ -1627,7 +1620,7 @@ private Map, PartitionStat> getPartitionStats(TStorageMedium me lockStart = System.nanoTime(); try { TABLE: - for (Table table : globalStateMgr.getLocalMetastore().getTablesIncludeRecycleBin(db)) { + for (Table table : globalStateMgr.getStarRocksMetadata().getTablesIncludeRecycleBin(db)) { // check table is olap table or colocate table if (!table.needSchedule(isLocalBalance)) { continue; @@ -1644,7 +1637,8 @@ private Map, PartitionStat> getPartitionStats(TStorageMedium me continue; } - for (Partition partition : globalStateMgr.getLocalMetastore().getAllPartitionsIncludeRecycleBin(olapTbl)) { + for (Partition partition : globalStateMgr.getStarRocksMetadata() + .getAllPartitionsIncludeRecycleBin(olapTbl)) { partitionChecked++; if (partitionChecked % partitionBatchNum == 0) { lockTotalTime += System.nanoTime() - lockStart; @@ -1654,14 +1648,14 @@ private Map, PartitionStat> getPartitionStats(TStorageMedium me locker.lockDatabase(db.getId(), LockType.READ); LOG.debug("balancer get lock again"); lockStart = System.nanoTime(); - if (globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(dbId) == null) { + if (globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(dbId) == null) { continue DATABASE; } - if (globalStateMgr.getLocalMetastore().getTableIncludeRecycleBin(db, olapTbl.getId()) == null) { + if (globalStateMgr.getStarRocksMetadata().getTableIncludeRecycleBin(db, olapTbl.getId()) == null) { continue TABLE; } - if (globalStateMgr.getLocalMetastore().getPartitionIncludeRecycleBin(olapTbl, partition.getId()) == - null) { + if (globalStateMgr.getStarRocksMetadata() + .getPartitionIncludeRecycleBin(olapTbl, partition.getId()) == null) { continue; } } @@ -1671,7 +1665,7 @@ private Map, PartitionStat> getPartitionStats(TStorageMedium me continue; } - DataProperty dataProperty = globalStateMgr.getLocalMetastore() + DataProperty dataProperty = globalStateMgr.getStarRocksMetadata() .getDataPropertyIncludeRecycleBin(olapTbl.getPartitionInfo(), partition.getId()); if (dataProperty == null) { continue; @@ -1681,7 +1675,7 @@ private Map, PartitionStat> getPartitionStats(TStorageMedium me continue; } - int replicationFactor = globalStateMgr.getLocalMetastore() + int replicationFactor = globalStateMgr.getStarRocksMetadata() .getReplicationNumIncludeRecycleBin(olapTbl.getPartitionInfo(), partition.getId()); int replicaNum = partition.getDistributionInfo().getBucketNum() * replicationFactor; // replicaNum may be negative, cause getReplicationNumIncludeRecycleBin can return -1 @@ -1691,7 +1685,7 @@ private Map, PartitionStat> getPartitionStats(TStorageMedium me /* * Tablet in SHADOW index can not be repaired of balanced */ - for (MaterializedIndex idx : partition + for (MaterializedIndex idx : partition.getDefaultPhysicalPartition() .getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { PartitionStat pStat = new PartitionStat(dbId, table.getId(), 0, replicaNum, replicationFactor); @@ -1773,9 +1767,9 @@ private static Map getBackendOrPathToReplicaNum(List beIds, private Map getPartitionReplicaCnt() { GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); Map partitionReplicaCnt = new HashMap<>(); - List dbIds = globalStateMgr.getLocalMetastore().getDbIdsIncludeRecycleBin(); + List dbIds = globalStateMgr.getStarRocksMetadata().getDbIdsIncludeRecycleBin(); for (Long dbId : dbIds) { - Database db = globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(dbId); + Database db = globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(dbId); if (db == null) { continue; } @@ -1787,7 +1781,7 @@ private Map getPartitionReplicaCnt() { Locker locker = new Locker(); locker.lockDatabase(db.getId(), LockType.READ); try { - for (Table table : globalStateMgr.getLocalMetastore().getTablesIncludeRecycleBin(db)) { + for (Table table : globalStateMgr.getStarRocksMetadata().getTablesIncludeRecycleBin(db)) { // check table is olap table or colocate table if (!table.needSchedule(false)) { continue; @@ -1798,11 +1792,12 @@ private Map getPartitionReplicaCnt() { } OlapTable olapTbl = (OlapTable) table; - for (Partition partition : globalStateMgr.getLocalMetastore().getAllPartitionsIncludeRecycleBin(olapTbl)) { + for (Partition partition : globalStateMgr.getStarRocksMetadata() + .getAllPartitionsIncludeRecycleBin(olapTbl)) { int replicaTotalCnt = partition.getDistributionInfo().getBucketNum() * - globalStateMgr.getLocalMetastore().getReplicationNumIncludeRecycleBin(olapTbl.getPartitionInfo(), - partition.getId()); + globalStateMgr.getStarRocksMetadata() + .getReplicationNumIncludeRecycleBin(olapTbl.getPartitionInfo(), partition.getId()); partitionReplicaCnt.put(partition.getId(), replicaTotalCnt); } } diff --git a/fe/fe-core/src/main/java/com/starrocks/clone/DynamicPartitionScheduler.java b/fe/fe-core/src/main/java/com/starrocks/clone/DynamicPartitionScheduler.java index 3cc2ad92d54a07..9c5cf9cff37eb3 100644 --- a/fe/fe-core/src/main/java/com/starrocks/clone/DynamicPartitionScheduler.java +++ b/fe/fe-core/src/main/java/com/starrocks/clone/DynamicPartitionScheduler.java @@ -352,6 +352,10 @@ private ArrayList getDropPartitionClause(Database db, OlapT Range checkDropPartitionKey = idToRange.getValue(); RangeUtils.checkRangeIntersect(reservePartitionKeyRange, checkDropPartitionKey); if (checkDropPartitionKey.upperEndpoint().compareTo(reservePartitionKeyRange.lowerEndpoint()) <= 0) { + + Partition partition = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getPartition( + db, olapTable, checkDropPartitionId); + String dropPartitionName = olapTable.getPartition(checkDropPartitionId).getName(); dropPartitionClauses.add(new DropPartitionClause(false, dropPartitionName, false, true)); } @@ -456,7 +460,7 @@ public boolean executeDynamicPartitionForTable(Long dbId, Long tableId) { AlterTableClauseAnalyzer analyzer = new AlterTableClauseAnalyzer(olapTable); analyzer.analyze(ctx, dropPartitionClause); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropPartition(db, olapTable, dropPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropPartition(db, olapTable, dropPartitionClause); clearDropPartitionFailedMsg(tableName); } catch (DdlException e) { recordDropPartitionFailedMsg(db.getOriginName(), tableName, e.getMessage()); @@ -471,7 +475,7 @@ public boolean executeDynamicPartitionForTable(Long dbId, Long tableId) { AlterTableClauseAnalyzer alterTableClauseVisitor = new AlterTableClauseAnalyzer(olapTable); alterTableClauseVisitor.analyze(ctx, addPartitionClause); - GlobalStateMgr.getCurrentState().getLocalMetastore().addPartitions(ctx, + GlobalStateMgr.getCurrentState().getStarRocksMetadata().addPartitions(ctx, db, tableName, addPartitionClause); clearCreatePartitionFailedMsg(tableName); } catch (DdlException e) { @@ -551,7 +555,7 @@ private void scheduleTTLPartition() { new Locker(), db.getId(), Lists.newArrayList(olapTable.getId()), LockType.WRITE)) { AlterTableClauseAnalyzer analyzer = new AlterTableClauseAnalyzer(olapTable); analyzer.analyze(new ConnectContext(), dropPartitionClause); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropPartition(db, olapTable, dropPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropPartition(db, olapTable, dropPartitionClause); clearDropPartitionFailedMsg(tableName); } catch (DdlException e) { recordDropPartitionFailedMsg(db.getOriginName(), tableName, e.getMessage()); diff --git a/fe/fe-core/src/main/java/com/starrocks/clone/TabletChecker.java b/fe/fe-core/src/main/java/com/starrocks/clone/TabletChecker.java index 622a90df3efa49..1ff14d5e81f966 100644 --- a/fe/fe-core/src/main/java/com/starrocks/clone/TabletChecker.java +++ b/fe/fe-core/src/main/java/com/starrocks/clone/TabletChecker.java @@ -267,10 +267,10 @@ private void doCheck(boolean isUrgent) { long lockTotalTime = 0; long waitTotalTime = 0; long lockStart; - List dbIds = GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIdsIncludeRecycleBin(); + List dbIds = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIdsIncludeRecycleBin(); DATABASE: for (Long dbId : dbIds) { - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIncludeRecycleBin(dbId); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIncludeRecycleBin(dbId); if (db == null) { continue; } @@ -289,7 +289,7 @@ private void doCheck(boolean isUrgent) { List aliveBeIdsInCluster = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getBackendIds(true); TABLE: - for (Table table : GlobalStateMgr.getCurrentState().getLocalMetastore().getTablesIncludeRecycleBin(db)) { + for (Table table : GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTablesIncludeRecycleBin(db)) { if (!table.needSchedule(false)) { continue; } @@ -303,7 +303,7 @@ private void doCheck(boolean isUrgent) { } OlapTable olapTbl = (OlapTable) table; - for (Partition partition : GlobalStateMgr.getCurrentState().getLocalMetastore() + for (Partition partition : GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getAllPartitionsIncludeRecycleBin(olapTbl)) { partitionChecked++; @@ -321,15 +321,15 @@ private void doCheck(boolean isUrgent) { locker.lockDatabase(db.getId(), LockType.READ); LOG.debug("checker get lock again"); lockStart = System.nanoTime(); - if (GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIncludeRecycleBin(dbId) == null) { + if (GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIncludeRecycleBin(dbId) == null) { continue DATABASE; } - if (GlobalStateMgr.getCurrentState().getLocalMetastore() + if (GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getTableIncludeRecycleBin(db, olapTbl.getId()) == null) { continue TABLE; } if (GlobalStateMgr.getCurrentState() - .getLocalMetastore().getPartitionIncludeRecycleBin(olapTbl, partition.getId()) == null) { + .getStarRocksMetadata().getPartitionIncludeRecycleBin(olapTbl, partition.getId()) == null) { continue; } } @@ -341,7 +341,7 @@ private void doCheck(boolean isUrgent) { } short replicaNum = GlobalStateMgr.getCurrentState() - .getLocalMetastore() + .getStarRocksMetadata() .getReplicationNumIncludeRecycleBin(olapTbl.getPartitionInfo(), partition.getId()); if (replicaNum == (short) -1) { continue; diff --git a/fe/fe-core/src/main/java/com/starrocks/clone/TabletSchedCtx.java b/fe/fe-core/src/main/java/com/starrocks/clone/TabletSchedCtx.java index 31573532334c49..a68eba618deff2 100644 --- a/fe/fe-core/src/main/java/com/starrocks/clone/TabletSchedCtx.java +++ b/fe/fe-core/src/main/java/com/starrocks/clone/TabletSchedCtx.java @@ -64,6 +64,7 @@ import com.starrocks.privilege.AccessDeniedException; import com.starrocks.privilege.PrivilegeType; import com.starrocks.server.GlobalStateMgr; +import com.starrocks.server.LocalMetastore; import com.starrocks.sql.analyzer.Authorizer; import com.starrocks.sql.ast.UserIdentity; import com.starrocks.system.Backend; @@ -810,10 +811,6 @@ public void setDecommissionedReplica(Replica replica) { this.decommissionedReplicaPreviousState = replica.getState(); } - public boolean deleteReplica(Replica replica) { - return tablet.deleteReplicaByBackendId(replica.getBackendId()); - } - public CloneTask createCloneReplicaAndTask() throws SchedException { Backend srcBe = infoService.getBackend(srcReplica.getBackendId()); if (srcBe == null) { @@ -848,7 +845,7 @@ public CloneTask createCloneReplicaAndTask() throws SchedException { // if this is a balance task, or this is a repair task with REPLICA_MISSING/REPLICA_RELOCATING, // we create a new replica with state CLONE - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIncludeRecycleBin(dbId); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIncludeRecycleBin(dbId); if (db == null) { throw new SchedException(Status.UNRECOVERABLE, "db " + dbId + " not exist"); } @@ -923,15 +920,15 @@ public CreateReplicaTask createEmptyReplicaAndTask() throws SchedException { tabletId, tablet.getSingleReplica(), tablet.getSingleReplica().getBackendId(), destBackendId); final GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); - Database db = globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(dbId); + Database db = globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(dbId); if (db == null) { throw new SchedException(Status.UNRECOVERABLE, "db " + dbId + " does not exist"); } Locker locker = new Locker(); try { locker.lockDatabase(db.getId(), LockType.WRITE); - OlapTable olapTable = (OlapTable) globalStateMgr.getLocalMetastore().getTableIncludeRecycleBin( - globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(dbId), + OlapTable olapTable = (OlapTable) globalStateMgr.getStarRocksMetadata().getTableIncludeRecycleBin( + globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(dbId), tblId); if (olapTable == null) { throw new SchedException(Status.UNRECOVERABLE, "table " + tblId + " does not exist"); @@ -1010,6 +1007,7 @@ public void finishCloneTask(CloneTask cloneTask, TFinishTaskRequest request) Preconditions.checkArgument(cloneTask.getTaskVersion() == CloneTask.VERSION_2); setLastVisitedTime(System.currentTimeMillis()); GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); + LocalMetastore localMetastore = GlobalStateMgr.getCurrentState().getLocalMetastore(); // check if clone task success if (request.getTask_status().getStatus_code() != TStatusCode.OK) { @@ -1035,32 +1033,33 @@ public void finishCloneTask(CloneTask cloneTask, TFinishTaskRequest request) } // 1. check the tablet status first - Database db = globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(dbId); + Database db = globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(dbId); if (db == null) { throw new SchedException(Status.UNRECOVERABLE, "db does not exist"); } Locker locker = new Locker(); try { locker.lockDatabase(db.getId(), LockType.WRITE); - OlapTable olapTable = (OlapTable) globalStateMgr.getLocalMetastore().getTableIncludeRecycleBin(db, tblId); + OlapTable olapTable = (OlapTable) globalStateMgr.getStarRocksMetadata().getTableIncludeRecycleBin(db, tblId); if (olapTable == null) { throw new SchedException(Status.UNRECOVERABLE, "tbl does not exist"); } - PhysicalPartition partition = globalStateMgr.getLocalMetastore() + PhysicalPartition partition = globalStateMgr.getStarRocksMetadata() .getPhysicalPartitionIncludeRecycleBin(olapTable, physicalPartitionId); if (partition == null) { throw new SchedException(Status.UNRECOVERABLE, "partition does not exist"); } short replicationNum = - globalStateMgr.getLocalMetastore() + globalStateMgr.getStarRocksMetadata() .getReplicationNumIncludeRecycleBin(olapTable.getPartitionInfo(), partitionId); if (replicationNum == (short) -1) { throw new SchedException(Status.UNRECOVERABLE, "invalid replication number"); } - MaterializedIndex index = partition.getIndex(indexId); + MaterializedIndex index = + GlobalStateMgr.getCurrentState().getLocalMetastore().getMaterializedIndex(partition, indexId); if (index == null) { throw new SchedException(Status.UNRECOVERABLE, "index does not exist"); } @@ -1071,7 +1070,7 @@ public void finishCloneTask(CloneTask cloneTask, TFinishTaskRequest request) + ", task's: " + schemaHash); } - LocalTablet tablet = (LocalTablet) index.getTablet(tabletId); + LocalTablet tablet = (LocalTablet) localMetastore.getTablet(index, tabletId); if (tablet == null) { throw new SchedException(Status.UNRECOVERABLE, "tablet does not exist"); } @@ -1184,11 +1183,11 @@ private String unprotectedFinishClone(TFinishTaskRequest request, PhysicalPartit if (replica.getState() == ReplicaState.CLONE) { replica.setState(ReplicaState.NORMAL); tablet.setLastFullCloneFinishedTimeMs(System.currentTimeMillis()); - GlobalStateMgr.getCurrentState().getEditLog().logAddReplica(info); + GlobalStateMgr.getCurrentState().getLocalMetastore().addReplica(info); } else { // if in VERSION_INCOMPLETE, replica is not newly created, thus the state is not CLONE // so, we keep it state unchanged, and log update replica - GlobalStateMgr.getCurrentState().getEditLog().logUpdateReplica(info); + GlobalStateMgr.getCurrentState().getLocalMetastore().updateReplica(info); } return String.format("version:%d min_readable_version:%d", reportedTablet.getVersion(), reportedTablet.getMin_readable_version()); diff --git a/fe/fe-core/src/main/java/com/starrocks/clone/TabletScheduler.java b/fe/fe-core/src/main/java/com/starrocks/clone/TabletScheduler.java index 283fe9204fca5c..e19c00fb99c4a4 100644 --- a/fe/fe-core/src/main/java/com/starrocks/clone/TabletScheduler.java +++ b/fe/fe-core/src/main/java/com/starrocks/clone/TabletScheduler.java @@ -85,6 +85,7 @@ import com.starrocks.task.CloneTask; import com.starrocks.task.CreateReplicaTask; import com.starrocks.task.DropReplicaTask; +import com.starrocks.task.TabletTaskExecutor; import com.starrocks.thrift.TFinishTaskRequest; import com.starrocks.thrift.TGetTabletScheduleRequest; import com.starrocks.thrift.TGetTabletScheduleResponse; @@ -691,7 +692,7 @@ private void scheduleTablet(TabletSchedCtx tabletCtx, AgentBatchTask batchTask) stat.counterTabletScheduled.incrementAndGet(); // check this tablet again - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIncludeRecycleBin(tabletCtx.getDbId()); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIncludeRecycleBin(tabletCtx.getDbId()); if (db == null) { throw new SchedException(Status.UNRECOVERABLE, "db does not exist"); } @@ -701,7 +702,7 @@ private void scheduleTablet(TabletSchedCtx tabletCtx, AgentBatchTask batchTask) locker.lockDatabase(db.getId(), LockType.READ); try { OlapTable tbl = (OlapTable) GlobalStateMgr.getCurrentState() - .getLocalMetastore().getTableIncludeRecycleBin(db, tabletCtx.getTblId()); + .getStarRocksMetadata().getTableIncludeRecycleBin(db, tabletCtx.getTblId()); if (tbl == null) { throw new SchedException(Status.UNRECOVERABLE, "tbl does not exist"); } @@ -714,24 +715,25 @@ private void scheduleTablet(TabletSchedCtx tabletCtx, AgentBatchTask batchTask) OlapTableState tableState = tbl.getState(); Partition partition = GlobalStateMgr.getCurrentState() - .getLocalMetastore().getPartitionIncludeRecycleBin(tbl, tabletCtx.getPartitionId()); + .getStarRocksMetadata().getPartitionIncludeRecycleBin(tbl, tabletCtx.getPartitionId()); if (partition == null) { throw new SchedException(Status.UNRECOVERABLE, "partition does not exist"); } short replicaNum = GlobalStateMgr.getCurrentState() - .getLocalMetastore().getReplicationNumIncludeRecycleBin(tbl.getPartitionInfo(), partition.getId()); + .getStarRocksMetadata().getReplicationNumIncludeRecycleBin(tbl.getPartitionInfo(), partition.getId()); if (replicaNum == (short) -1) { throw new SchedException(Status.UNRECOVERABLE, "invalid replication number"); } - DataProperty dataProperty = GlobalStateMgr.getCurrentState().getLocalMetastore() + DataProperty dataProperty = GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getDataPropertyIncludeRecycleBin(tbl.getPartitionInfo(), partition.getId()); if (dataProperty == null) { throw new SchedException(Status.UNRECOVERABLE, "partition data property not exist"); } - PhysicalPartition physicalPartition = partition.getSubPartition(tabletCtx.getPhysicalPartitionId()); + PhysicalPartition physicalPartition = GlobalStateMgr.getCurrentState().getLocalMetastore() + .getPhysicalPartition(partition, tabletCtx.getPhysicalPartitionId()); if (physicalPartition == null) { throw new SchedException(Status.UNRECOVERABLE, "physical partition " + tabletCtx.getPhysicalPartitionId() + "does not exist"); @@ -742,7 +744,8 @@ private void scheduleTablet(TabletSchedCtx tabletCtx, AgentBatchTask batchTask) throw new SchedException(Status.UNRECOVERABLE, "index does not exist"); } - LocalTablet tablet = (LocalTablet) idx.getTablet(tabletCtx.getTabletId()); + LocalTablet tablet = (LocalTablet) GlobalStateMgr.getCurrentState().getLocalMetastore() + .getTablet(idx, tabletCtx.getTabletId()); Preconditions.checkNotNull(tablet); if (isColocateTable) { @@ -1067,7 +1070,7 @@ private void handleReplicaRelocating(TabletSchedCtx tabletCtx, AgentBatchTask ba private void handleRedundantReplica(TabletSchedCtx tabletCtx, boolean force) throws SchedException { stat.counterReplicaRedundantErr.incrementAndGet(); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIncludeRecycleBin(tabletCtx.getDbId()); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIncludeRecycleBin(tabletCtx.getDbId()); if (db == null) { throw new SchedException(Status.UNRECOVERABLE, "db " + tabletCtx.getDbId() + " not exist"); } @@ -1301,7 +1304,7 @@ private boolean handleColocateRedundant(TabletSchedCtx tabletCtx) throws SchedEx Set backendSet = tabletCtx.getColocateBackendsSet(); Preconditions.checkNotNull(backendSet); stat.counterReplicaColocateRedundant.incrementAndGet(); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIncludeRecycleBin(tabletCtx.getDbId()); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIncludeRecycleBin(tabletCtx.getDbId()); if (db == null) { throw new SchedException(Status.UNRECOVERABLE, "db " + tabletCtx.getDbId() + " not exist"); } @@ -1372,13 +1375,14 @@ private void deleteReplicaInternal(TabletSchedCtx tabletCtx, Replica replica, St } } - String replicaInfos = tabletCtx.getTablet().getReplicaInfos(); - // delete this replica from globalStateMgr. - // it will also delete replica from tablet inverted index. - if (!tabletCtx.deleteReplica(replica)) { - LOG.warn("delete replica for tablet: {} failed backend {} not found replicas:{}", tabletCtx.getTabletId(), - replica.getBackendId(), replicaInfos); - } + // write edit log + ReplicaPersistInfo info = ReplicaPersistInfo.createForDelete(tabletCtx.getDbId(), + tabletCtx.getTblId(), + tabletCtx.getPhysicalPartitionId(), + tabletCtx.getIndexId(), + tabletCtx.getTabletId(), + replica.getBackendId()); + GlobalStateMgr.getCurrentState().getLocalMetastore().deleteReplica(info); if (force) { // send the replica deletion task. @@ -1386,32 +1390,13 @@ private void deleteReplicaInternal(TabletSchedCtx tabletCtx, Replica replica, St // NOTICE: only delete the replica from meta may not work. sometimes we can depend on tablet report // to delete these replicas, but in FORCE_REDUNDANT case, replica may be added to meta again in report // process. - sendDeleteReplicaTask(replica.getBackendId(), tabletCtx.getTabletId(), tabletCtx.getSchemaHash()); + DropReplicaTask task = new DropReplicaTask( + replica.getBackendId(), tabletCtx.getTabletId(), tabletCtx.getSchemaHash(), true); + TabletTaskExecutor.sendTask(task); } // NOTE: TabletScheduler is specific for LocalTablet, LakeTablet will never go here. GlobalStateMgr.getCurrentState().getTabletInvertedIndex() .markTabletForceDelete(tabletCtx.getTabletId(), replica.getBackendId()); - - // write edit log - ReplicaPersistInfo info = ReplicaPersistInfo.createForDelete(tabletCtx.getDbId(), - tabletCtx.getTblId(), - tabletCtx.getPhysicalPartitionId(), - tabletCtx.getIndexId(), - tabletCtx.getTabletId(), - replica.getBackendId()); - - GlobalStateMgr.getCurrentState().getEditLog().logDeleteReplica(info); - - LOG.info("delete replica. tablet id: {}, backend id: {}. reason: {}, force: {} replicas: {}", - tabletCtx.getTabletId(), replica.getBackendId(), reason, force, replicaInfos); - } - - private void sendDeleteReplicaTask(long backendId, long tabletId, int schemaHash) { - DropReplicaTask task = new DropReplicaTask(backendId, tabletId, schemaHash, true); - AgentBatchTask batchTask = new AgentBatchTask(); - batchTask.addTask(task); - AgentTaskExecutor.submit(batchTask); - LOG.info("send forceful replica delete task for tablet {} on backend {}", tabletId, backendId); } /** @@ -1872,7 +1857,7 @@ public void finishCreateReplicaTask(CreateReplicaTask task, TFinishTaskRequest r replica.getSchemaHash(), replica.getDataSize(), replica.getRowCount(), replica.getLastFailedVersion(), replica.getLastSuccessVersion(), replica.getMinReadableVersion()); - GlobalStateMgr.getCurrentState().getEditLog().logAddReplica(info); + GlobalStateMgr.getCurrentState().getLocalMetastore().addReplica(info); finalizeTabletCtx(tabletCtx, TabletSchedCtx.State.FINISHED, "create replica finished"); LOG.info("create replica for recovery successfully, tablet:{} backend:{}", tabletId, task.getBackendId()); } @@ -2075,19 +2060,20 @@ public TGetTabletScheduleResponse getTabletSchedule(TGetTabletScheduleRequest re // caller should hold db lock private void checkMetaExist(TabletSchedCtx ctx) throws SchedException { - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIncludeRecycleBin(ctx.getDbId()); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIncludeRecycleBin(ctx.getDbId()); if (db == null) { throw new SchedException(Status.UNRECOVERABLE, "db " + ctx.getDbId() + " dose not exist"); } OlapTable tbl = - (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTableIncludeRecycleBin(db, ctx.getTblId()); + (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .getTableIncludeRecycleBin(db, ctx.getTblId()); if (tbl == null) { throw new SchedException(Status.UNRECOVERABLE, "table " + ctx.getTblId() + " dose not exist"); } - Partition partition = - GlobalStateMgr.getCurrentState().getLocalMetastore().getPartitionIncludeRecycleBin(tbl, ctx.getPartitionId()); + Partition partition = GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .getPartitionIncludeRecycleBin(tbl, ctx.getPartitionId()); if (partition == null) { throw new SchedException(Status.UNRECOVERABLE, "partition " + ctx.getPartitionId() + " dose not exist"); } @@ -2103,7 +2089,8 @@ private void checkMetaExist(TabletSchedCtx ctx) throws SchedException { throw new SchedException(Status.UNRECOVERABLE, "materialized index " + ctx.getIndexId() + " dose not exist"); } - Tablet tablet = idx.getTablet(ctx.getTabletId()); + Tablet tablet = GlobalStateMgr.getCurrentState().getLocalMetastore().getTablet(idx, ctx.getTabletId()); + if (tablet == null) { throw new SchedException(Status.UNRECOVERABLE, "tablet " + ctx.getTabletId() + " dose not exist"); } diff --git a/fe/fe-core/src/main/java/com/starrocks/common/proc/LakeTabletsProcDir.java b/fe/fe-core/src/main/java/com/starrocks/common/proc/LakeTabletsProcDir.java index 5004e62b9d553f..2a3e9e83e56afe 100644 --- a/fe/fe-core/src/main/java/com/starrocks/common/proc/LakeTabletsProcDir.java +++ b/fe/fe-core/src/main/java/com/starrocks/common/proc/LakeTabletsProcDir.java @@ -30,6 +30,7 @@ import com.starrocks.lake.LakeTablet; import com.starrocks.monitor.unit.ByteSizeValue; import com.starrocks.qe.ConnectContext; +import com.starrocks.server.GlobalStateMgr; import java.util.Arrays; import java.util.Collections; @@ -135,7 +136,7 @@ public ProcNodeInterface lookup(String tabletIdStr) throws AnalysisException { Locker locker = new Locker(); locker.lockDatabase(db.getId(), LockType.READ); try { - Tablet tablet = index.getTablet(tabletId); + Tablet tablet = GlobalStateMgr.getCurrentState().getLocalMetastore().getTablet(index, tabletId); if (tablet == null) { throw new AnalysisException("Can't find tablet id: " + tabletIdStr); } diff --git a/fe/fe-core/src/main/java/com/starrocks/common/proc/PartitionsProcDir.java b/fe/fe-core/src/main/java/com/starrocks/common/proc/PartitionsProcDir.java index b62ab7b874271b..6f99d4b1c049c6 100644 --- a/fe/fe-core/src/main/java/com/starrocks/common/proc/PartitionsProcDir.java +++ b/fe/fe-core/src/main/java/com/starrocks/common/proc/PartitionsProcDir.java @@ -439,9 +439,9 @@ public ProcNodeInterface lookup(String partitionIdOrName) throws AnalysisExcepti try { partition = table.getPhysicalPartition(Long.parseLong(partitionIdOrName)); } catch (NumberFormatException e) { - partition = table.getPartition(partitionIdOrName, false); + partition = table.getPartition(partitionIdOrName, false).getDefaultPhysicalPartition(); if (partition == null) { - partition = table.getPartition(partitionIdOrName, true); + partition = table.getPartition(partitionIdOrName, true).getDefaultPhysicalPartition(); } } diff --git a/fe/fe-core/src/main/java/com/starrocks/common/util/PropertyAnalyzer.java b/fe/fe-core/src/main/java/com/starrocks/common/util/PropertyAnalyzer.java index dd8c55a5888cea..ae50a6d3fcd249 100644 --- a/fe/fe-core/src/main/java/com/starrocks/common/util/PropertyAnalyzer.java +++ b/fe/fe-core/src/main/java/com/starrocks/common/util/PropertyAnalyzer.java @@ -1564,7 +1564,7 @@ public static void analyzeMVProperties(Database db, StorageVolumeMgr svm = GlobalStateMgr.getCurrentState().getStorageVolumeMgr(); svm.bindTableToStorageVolume(volume, db.getId(), materializedView.getId()); String storageVolumeId = svm.getStorageVolumeIdOfTable(materializedView.getId()); - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .setLakeStorageInfo(db, materializedView, storageVolumeId, properties); } diff --git a/fe/fe-core/src/main/java/com/starrocks/common/util/concurrent/lock/AutoCloseableLock.java b/fe/fe-core/src/main/java/com/starrocks/common/util/concurrent/lock/AutoCloseableLock.java index 0498ea1f81aac6..e737ceab77a36a 100644 --- a/fe/fe-core/src/main/java/com/starrocks/common/util/concurrent/lock/AutoCloseableLock.java +++ b/fe/fe-core/src/main/java/com/starrocks/common/util/concurrent/lock/AutoCloseableLock.java @@ -22,6 +22,15 @@ public class AutoCloseableLock implements AutoCloseable { private final List tableList; private final LockType lockType; + public AutoCloseableLock(Locker locker, Long dbId, LockType lockType) { + this.locker = locker; + this.dbId = dbId; + this.tableList = new ArrayList<>(); + this.lockType = lockType; + + locker.lockTablesWithIntensiveDbLock(dbId, tableList, lockType); + } + public AutoCloseableLock(Locker locker, Long dbId, List tableList, LockType lockType) { this.locker = locker; this.dbId = dbId; @@ -31,6 +40,7 @@ public AutoCloseableLock(Locker locker, Long dbId, List tableList, LockTyp locker.lockTablesWithIntensiveDbLock(dbId, tableList, lockType); } + @Override public void close() { locker.unLockTablesWithIntensiveDbLock(dbId, tableList, lockType); diff --git a/fe/fe-core/src/main/java/com/starrocks/common/util/concurrent/lock/Locker.java b/fe/fe-core/src/main/java/com/starrocks/common/util/concurrent/lock/Locker.java index 2528874e1210b5..551df4a00bc6b9 100644 --- a/fe/fe-core/src/main/java/com/starrocks/common/util/concurrent/lock/Locker.java +++ b/fe/fe-core/src/main/java/com/starrocks/common/util/concurrent/lock/Locker.java @@ -133,6 +133,37 @@ public void lockDatabase(Long dbId, LockType lockType) { } } + public void lockDatabaseWithIntendLock(Database database, LockType lockType) { + if (Config.lock_manager_enabled) { + Preconditions.checkState(database != null); + try { + lock(database.getId(), lockType, 0); + } catch (LockException e) { + throw ErrorReportException.report(ErrorCode.ERR_LOCK_ERROR, e.getMessage()); + } + } else { + QueryableReentrantReadWriteLock rwLock = database.getRwLock(); + if (lockType == LockType.INTENTION_EXCLUSIVE) { + LockUtils.dbWriteLock(rwLock, database.getId(), database.getFullName(), database.getSlowLockLogStats()); + } else { + LockUtils.dbReadLock(rwLock, database.getId(), database.getFullName(), database.getSlowLockLogStats()); + } + } + } + + public void lockTable(Table table, LockType lockType) { + if (Config.lock_manager_enabled) { + Preconditions.checkState(table != null); + try { + lock(table.getId(), lockType, 0); + } catch (LockException e) { + throw ErrorReportException.report(ErrorCode.ERR_LOCK_ERROR, e.getMessage()); + } + } else { + // + } + } + /** * Before the new version of LockManager is fully enabled, it is used to be compatible with the original db lock logic. */ @@ -433,7 +464,7 @@ public void unLockTableWithIntensiveDbLock(Long dbId, Long tableId, LockType loc /** * Lock table with intensive db lock. * - * @param dbId db for intensive db lock + * @param dbId db for intensive db lock * @param tableId table to be locked * @param lockType lock type */ diff --git a/fe/fe-core/src/main/java/com/starrocks/connector/partitiontraits/OlapPartitionTraits.java b/fe/fe-core/src/main/java/com/starrocks/connector/partitiontraits/OlapPartitionTraits.java index 8810246b7daf8b..e51ecb07bcff2f 100644 --- a/fe/fe-core/src/main/java/com/starrocks/connector/partitiontraits/OlapPartitionTraits.java +++ b/fe/fe-core/src/main/java/com/starrocks/connector/partitiontraits/OlapPartitionTraits.java @@ -84,8 +84,9 @@ public Set getUpdatedPartitionNames(List baseTables, List baseTablePartitionInfos = Lists.newArrayList(); for (String p : baseTable.getVisiblePartitionNames()) { Partition partition = baseTable.getPartition(p); - baseTablePartitionInfos.add(String.format("%s:%s:%s:%s", p, partition.getId(), partition.getVisibleVersion(), - partition.getVisibleVersionTime())); + baseTablePartitionInfos.add(String.format("%s:%s:%s:%s", p, partition.getId(), + partition.getDefaultPhysicalPartition().getVisibleVersion(), + partition.getDefaultPhysicalPartition().getVisibleVersionTime())); } LOG.debug("baseTable: {}, baseTablePartitions:{}, mvBaseTableVisibleVersionMap: {}", baseTable.getName(), baseTablePartitionInfos, mvBaseTableVisibleVersionMap); @@ -96,7 +97,7 @@ public Set getUpdatedPartitionNames(List baseTables, for (String partitionName : baseTable.getVisiblePartitionNames()) { if (!mvBaseTableVisibleVersionMap.containsKey(partitionName)) { Partition partition = baseTable.getPartition(partitionName); - if (partition.getVisibleVersion() != 1) { + if (partition.getDefaultPhysicalPartition().getVisibleVersion() != 1) { result.add(partitionName); } } @@ -134,8 +135,9 @@ public Set getUpdatedPartitionNames(List baseTables, public static boolean isBaseTableChanged(Partition partition, MaterializedView.BasePartitionInfo mvRefreshedPartitionInfo) { return mvRefreshedPartitionInfo.getId() != partition.getId() - || partition.getVisibleVersion() != mvRefreshedPartitionInfo.getVersion() - || partition.getVisibleVersionTime() > mvRefreshedPartitionInfo.getLastRefreshTime(); + || partition.getDefaultPhysicalPartition().getVisibleVersion() != mvRefreshedPartitionInfo.getVersion() + || partition.getDefaultPhysicalPartition().getVisibleVersionTime() + > mvRefreshedPartitionInfo.getLastRefreshTime(); } public List getPartitionColumns() { diff --git a/fe/fe-core/src/main/java/com/starrocks/consistency/CheckConsistencyJob.java b/fe/fe-core/src/main/java/com/starrocks/consistency/CheckConsistencyJob.java index d7cc2f8357967b..0893fd2068c552 100644 --- a/fe/fe-core/src/main/java/com/starrocks/consistency/CheckConsistencyJob.java +++ b/fe/fe-core/src/main/java/com/starrocks/consistency/CheckConsistencyJob.java @@ -42,6 +42,7 @@ import com.starrocks.catalog.MaterializedIndex; import com.starrocks.catalog.OlapTable; import com.starrocks.catalog.Partition; +import com.starrocks.catalog.PhysicalPartition; import com.starrocks.catalog.Replica; import com.starrocks.catalog.Replica.ReplicaState; import com.starrocks.catalog.Table; @@ -53,7 +54,6 @@ import com.starrocks.common.util.concurrent.lock.Locker; import com.starrocks.journal.JournalTask; import com.starrocks.persist.ConsistencyCheckInfo; -import com.starrocks.persist.EditLog; import com.starrocks.server.GlobalStateMgr; import com.starrocks.task.AgentBatchTask; import com.starrocks.task.AgentTask; @@ -160,19 +160,21 @@ public boolean sendTasks() { return false; } - MaterializedIndex index = partition.getIndex(tabletMeta.getIndexId()); + PhysicalPartition physicalPartition = partition.getSubPartition(tabletMeta.getPhysicalPartitionId()); + + MaterializedIndex index = physicalPartition.getIndex(tabletMeta.getIndexId()); if (index == null) { LOG.debug("index[{}] does not exist", tabletMeta.getIndexId()); return false; } - tablet = (LocalTablet) index.getTablet(tabletId); + tablet = (LocalTablet) GlobalStateMgr.getCurrentState().getLocalMetastore().getTablet(index, tabletId); if (tablet == null) { LOG.debug("tablet[{}] does not exist", tabletId); return false; } - checkedVersion = partition.getVisibleVersion(); + checkedVersion = physicalPartition.getVisibleVersion(); checkedSchemaHash = olapTable.getSchemaHashByIndexId(tabletMeta.getIndexId()); int sentTaskReplicaNum = 0; @@ -278,7 +280,9 @@ public synchronized int tryFinishJob() { return -1; } - MaterializedIndex index = partition.getIndex(tabletMeta.getIndexId()); + PhysicalPartition physicalPartition = partition.getSubPartition(tabletMeta.getPhysicalPartitionId()); + + MaterializedIndex index = physicalPartition.getIndex(tabletMeta.getIndexId()); if (index == null) { LOG.warn("index[{}] does not exist", tabletMeta.getIndexId()); return -1; @@ -368,11 +372,10 @@ public synchronized int tryFinishJob() { ConsistencyCheckInfo info = new ConsistencyCheckInfo(db.getId(), table.getId(), partition.getId(), index.getId(), tabletId, lastCheckTime, checkedVersion, isConsistent); - journalTask = GlobalStateMgr.getCurrentState().getEditLog().logFinishConsistencyCheckNoWait(info); + + GlobalStateMgr.getCurrentState().getLocalMetastore().finishConsistencyCheck(info); } - // Wait for edit log write finish out of db lock. - EditLog.waitInfinity(journalTask); return 1; } diff --git a/fe/fe-core/src/main/java/com/starrocks/consistency/ConsistencyChecker.java b/fe/fe-core/src/main/java/com/starrocks/consistency/ConsistencyChecker.java index 8afdd1812b35d5..1316456e57e774 100644 --- a/fe/fe-core/src/main/java/com/starrocks/consistency/ConsistencyChecker.java +++ b/fe/fe-core/src/main/java/com/starrocks/consistency/ConsistencyChecker.java @@ -46,7 +46,6 @@ import com.starrocks.catalog.OlapTable.OlapTableState; import com.starrocks.catalog.Partition; import com.starrocks.catalog.PhysicalPartition; -import com.starrocks.catalog.PhysicalPartitionImpl; import com.starrocks.catalog.Table; import com.starrocks.catalog.Tablet; import com.starrocks.catalog.TabletInvertedIndex; @@ -54,11 +53,9 @@ import com.starrocks.common.Config; import com.starrocks.common.Pair; import com.starrocks.common.util.FrontendDaemon; -import com.starrocks.common.util.concurrent.lock.AutoCloseableLock; import com.starrocks.common.util.concurrent.lock.LockType; import com.starrocks.common.util.concurrent.lock.Locker; import com.starrocks.consistency.CheckConsistencyJob.JobState; -import com.starrocks.persist.ConsistencyCheckInfo; import com.starrocks.server.GlobalStateMgr; import com.starrocks.server.LocalMetastore; import com.starrocks.task.CheckConsistencyTask; @@ -264,7 +261,7 @@ private void checkTabletMetaConsistency(Map creatingTableIds) { if (!table.isCloudNativeTableOrMaterializedView()) { // validate tablet - Tablet tablet = index.getTablet(tabletId); + Tablet tablet = GlobalStateMgr.getCurrentState().getLocalMetastore().getTablet(index, tabletId); if (tablet == null) { deleteTabletByConsistencyChecker(tabletMeta, tabletId, backendId, "tablet " + dbId + "." + tableId + "." + @@ -507,11 +504,7 @@ protected List chooseTablets() { Partition.PARTITION_INIT_VERSION); continue; } - if (partition instanceof Partition) { - partitionQueue.add((Partition) partition); - } else if (partition instanceof PhysicalPartitionImpl) { - partitionQueue.add((PhysicalPartitionImpl) partition); - } + partitionQueue.add(partition); } while ((chosenOne = partitionQueue.poll()) != null) { @@ -589,48 +582,7 @@ public void handleFinishedConsistencyCheck(CheckConsistencyTask task, long check job.handleFinishedReplica(backendId, checksum); } - public void replayFinishConsistencyCheck(ConsistencyCheckInfo info, GlobalStateMgr globalStateMgr) { - Database db = globalStateMgr.getLocalMetastore().getDb(info.getDbId()); - if (db == null) { - LOG.warn("replay finish consistency check failed, db is null, info: {}", info); - return; - } - OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getId(), info.getTableId()); - if (table == null) { - LOG.warn("replay finish consistency check failed, table is null, info: {}", info); - return; - } - try (AutoCloseableLock ignore - = new AutoCloseableLock(new Locker(), db.getId(), Lists.newArrayList(table.getId()), LockType.WRITE)) { - Partition partition = table.getPartition(info.getPartitionId()); - if (partition == null) { - LOG.warn("replay finish consistency check failed, partition is null, info: {}", info); - return; - } - MaterializedIndex index = partition.getIndex(info.getIndexId()); - if (index == null) { - LOG.warn("replay finish consistency check failed, index is null, info: {}", info); - return; - } - LocalTablet tablet = (LocalTablet) index.getTablet(info.getTabletId()); - if (tablet == null) { - LOG.warn("replay finish consistency check failed, tablet is null, info: {}", info); - return; - } - - long lastCheckTime = info.getLastCheckTime(); - db.setLastCheckTime(lastCheckTime); - table.setLastCheckTime(lastCheckTime); - partition.setLastCheckTime(lastCheckTime); - index.setLastCheckTime(lastCheckTime); - tablet.setLastCheckTime(lastCheckTime); - tablet.setCheckedVersion(info.getCheckedVersion()); - - tablet.setIsConsistent(info.isConsistent()); - } - } // manually adding tablets to check public void addTabletsToCheck(List tabletIds) { diff --git a/fe/fe-core/src/main/java/com/starrocks/http/meta/ColocateMetaService.java b/fe/fe-core/src/main/java/com/starrocks/http/meta/ColocateMetaService.java index 23407e2a58e8de..b62b4776155be3 100644 --- a/fe/fe-core/src/main/java/com/starrocks/http/meta/ColocateMetaService.java +++ b/fe/fe-core/src/main/java/com/starrocks/http/meta/ColocateMetaService.java @@ -262,7 +262,7 @@ public void executeInLeaderWithAdmin(BaseRequest request, BaseResponse response) isJoin = false; } - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIncludeRecycleBin(groupId.dbId); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIncludeRecycleBin(groupId.dbId); if (db == null) { response.appendContent("Non-exist db"); writeResponse(request, response, HttpResponseStatus.BAD_REQUEST); @@ -271,7 +271,7 @@ public void executeInLeaderWithAdmin(BaseRequest request, BaseResponse response) Locker locker = new Locker(); locker.lockDatabase(db.getId(), LockType.WRITE); try { - OlapTable table = (OlapTable) globalStateMgr.getLocalMetastore().getTableIncludeRecycleBin(db, tableId); + OlapTable table = (OlapTable) globalStateMgr.getStarRocksMetadata().getTableIncludeRecycleBin(db, tableId); if (table == null) { response.appendContent("Non-exist table"); writeResponse(request, response, HttpResponseStatus.BAD_REQUEST); diff --git a/fe/fe-core/src/main/java/com/starrocks/http/meta/GlobalDictMetaService.java b/fe/fe-core/src/main/java/com/starrocks/http/meta/GlobalDictMetaService.java index 74c34acdf5987a..c29421642724ab 100644 --- a/fe/fe-core/src/main/java/com/starrocks/http/meta/GlobalDictMetaService.java +++ b/fe/fe-core/src/main/java/com/starrocks/http/meta/GlobalDictMetaService.java @@ -16,22 +16,33 @@ package com.starrocks.http.meta; import com.google.common.base.Strings; +import com.starrocks.catalog.Database; +import com.starrocks.catalog.OlapTable; +import com.starrocks.catalog.Table; import com.starrocks.common.DdlException; +import com.starrocks.common.util.PropertyAnalyzer; +import com.starrocks.common.util.concurrent.lock.LockType; +import com.starrocks.common.util.concurrent.lock.Locker; import com.starrocks.http.ActionController; import com.starrocks.http.BaseRequest; import com.starrocks.http.BaseResponse; import com.starrocks.http.IllegalArgException; import com.starrocks.http.rest.RestBaseAction; import com.starrocks.http.rest.RestBaseResult; +import com.starrocks.persist.ModifyTablePropertyOperationLog; import com.starrocks.privilege.AccessDeniedException; import com.starrocks.qe.ConnectContext; import com.starrocks.server.GlobalStateMgr; import com.starrocks.sql.ast.UserIdentity; +import com.starrocks.sql.optimizer.statistics.IDictManager; import io.netty.handler.codec.http.HttpMethod; import io.netty.handler.codec.http.HttpResponseStatus; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import java.util.HashMap; +import java.util.Map; + /** * eg: * POST /api/global_dict/table/enable?db_name=test&table_name=test_basic&enable=false @@ -98,8 +109,7 @@ public void executeInLeaderWithAdmin(BaseRequest request, BaseResponse response) return; } boolean isEnable = Boolean.parseBoolean(enableParam.trim()); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .setHasForbiddenGlobalDict(dbName, tableName, isEnable); + setHasForbiddenGlobalDict(dbName, tableName, isEnable); response.appendContent(new RestBaseResult("apply success").toJson()); } else { response.appendContent(new RestBaseResult("HTTP method is not allowed.").toJson()); @@ -108,5 +118,38 @@ public void executeInLeaderWithAdmin(BaseRequest request, BaseResponse response) } sendResult(request, response); } + + public void setHasForbiddenGlobalDict(String dbName, String tableName, boolean isForbit) throws DdlException { + Map property = new HashMap<>(); + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbName); + if (db == null) { + throw new DdlException("the DB " + dbName + " is not exist"); + } + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.READ); + try { + Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), tableName); + if (table == null) { + throw new DdlException("the DB " + dbName + " table: " + tableName + "isn't exist"); + } + + if (table instanceof OlapTable) { + OlapTable olapTable = (OlapTable) table; + olapTable.setHasForbiddenGlobalDict(isForbit); + if (isForbit) { + property.put(PropertyAnalyzer.ENABLE_LOW_CARD_DICT_TYPE, PropertyAnalyzer.DISABLE_LOW_CARD_DICT); + IDictManager.getInstance().disableGlobalDict(olapTable.getId()); + } else { + property.put(PropertyAnalyzer.ENABLE_LOW_CARD_DICT_TYPE, PropertyAnalyzer.ABLE_LOW_CARD_DICT); + IDictManager.getInstance().enableGlobalDict(olapTable.getId()); + } + ModifyTablePropertyOperationLog info = + new ModifyTablePropertyOperationLog(db.getId(), table.getId(), property); + GlobalStateMgr.getCurrentState().getEditLog().logSetHasForbiddenGlobalDict(info); + } + } finally { + locker.unLockDatabase(db.getId(), LockType.READ); + } + } } } diff --git a/fe/fe-core/src/main/java/com/starrocks/http/rest/MigrationAction.java b/fe/fe-core/src/main/java/com/starrocks/http/rest/MigrationAction.java index 0b27df6bd9fb0b..80aa8993cce3ff 100644 --- a/fe/fe-core/src/main/java/com/starrocks/http/rest/MigrationAction.java +++ b/fe/fe-core/src/main/java/com/starrocks/http/rest/MigrationAction.java @@ -41,6 +41,7 @@ import com.starrocks.catalog.MaterializedIndex; import com.starrocks.catalog.OlapTable; import com.starrocks.catalog.Partition; +import com.starrocks.catalog.PhysicalPartition; import com.starrocks.catalog.Replica; import com.starrocks.catalog.Table; import com.starrocks.catalog.Table.TableType; @@ -118,19 +119,21 @@ protected void executeWithoutPassword(BaseRequest request, BaseResponse response OlapTable olapTable = (OlapTable) table; for (Partition partition : olapTable.getPartitions()) { - String partitionName = partition.getName(); - MaterializedIndex baseIndex = partition.getBaseIndex(); - for (Tablet tablet : baseIndex.getTablets()) { - List row = Lists.newArrayList(); - row.add(tableName); - row.add(partitionName); - row.add(tablet.getId()); - row.add(olapTable.getSchemaHashByIndexId(baseIndex.getId())); - if (CollectionUtils.isNotEmpty(((LocalTablet) tablet).getImmutableReplicas())) { - Replica replica = ((LocalTablet) tablet).getImmutableReplicas().get(0); - row.add(replica.getBackendId()); + for (PhysicalPartition physicalPartition : partition.getSubPartitions()) { + String partitionName = physicalPartition.getName(); + MaterializedIndex baseIndex = physicalPartition.getBaseIndex(); + for (Tablet tablet : baseIndex.getTablets()) { + List row = Lists.newArrayList(); + row.add(tableName); + row.add(partitionName); + row.add(tablet.getId()); + row.add(olapTable.getSchemaHashByIndexId(baseIndex.getId())); + if (CollectionUtils.isNotEmpty(((LocalTablet) tablet).getImmutableReplicas())) { + Replica replica = ((LocalTablet) tablet).getImmutableReplicas().get(0); + row.add(replica.getBackendId()); + } + rows.add(row); } - rows.add(row); } } } else { @@ -144,19 +147,21 @@ protected void executeWithoutPassword(BaseRequest request, BaseResponse response tableName = table.getName(); for (Partition partition : olapTable.getPartitions()) { - String partitionName = partition.getName(); - MaterializedIndex baseIndex = partition.getBaseIndex(); - for (Tablet tablet : baseIndex.getTablets()) { - List row = Lists.newArrayList(); - row.add(tableName); - row.add(partitionName); - row.add(tablet.getId()); - row.add(olapTable.getSchemaHashByIndexId(baseIndex.getId())); - if (CollectionUtils.isNotEmpty(((LocalTablet) tablet).getImmutableReplicas())) { - Replica replica = ((LocalTablet) tablet).getImmutableReplicas().get(0); - row.add(replica.getBackendId()); + for (PhysicalPartition physicalPartition : partition.getSubPartitions()) { + String partitionName = physicalPartition.getName(); + MaterializedIndex baseIndex = physicalPartition.getBaseIndex(); + for (Tablet tablet : baseIndex.getTablets()) { + List row = Lists.newArrayList(); + row.add(tableName); + row.add(partitionName); + row.add(tablet.getId()); + row.add(olapTable.getSchemaHashByIndexId(baseIndex.getId())); + if (CollectionUtils.isNotEmpty(((LocalTablet) tablet).getImmutableReplicas())) { + Replica replica = ((LocalTablet) tablet).getImmutableReplicas().get(0); + row.add(replica.getBackendId()); + } + rows.add(row); } - rows.add(row); } } } diff --git a/fe/fe-core/src/main/java/com/starrocks/http/rest/v2/vo/PartitionInfoView.java b/fe/fe-core/src/main/java/com/starrocks/http/rest/v2/vo/PartitionInfoView.java index 316695f3289cf5..01717ec1a5fb7f 100644 --- a/fe/fe-core/src/main/java/com/starrocks/http/rest/v2/vo/PartitionInfoView.java +++ b/fe/fe-core/src/main/java/com/starrocks/http/rest/v2/vo/PartitionInfoView.java @@ -21,6 +21,7 @@ import com.starrocks.catalog.Partition; import com.starrocks.catalog.PartitionInfo; import com.starrocks.catalog.PartitionType; +import com.starrocks.catalog.PhysicalPartition; import com.starrocks.catalog.RangePartitionInfo; import com.starrocks.catalog.Table; import com.starrocks.catalog.Tablet; @@ -140,9 +141,11 @@ public static PartitionView createFrom(PartitionInfo partitionInfo, Partition pa pvo.setDistributionType(distributionInfo.getTypeStr()); }); - pvo.setVisibleVersion(partition.getVisibleVersion()); - pvo.setVisibleVersionTime(partition.getVisibleVersionTime()); - pvo.setNextVersion(partition.getNextVersion()); + PhysicalPartition physicalPartition = partition.getDefaultPhysicalPartition(); + + pvo.setVisibleVersion(physicalPartition.getVisibleVersion()); + pvo.setVisibleVersionTime(physicalPartition.getVisibleVersionTime()); + pvo.setNextVersion(physicalPartition.getNextVersion()); PartitionType partitionType = partitionInfo.getType(); switch (partitionType) { @@ -166,7 +169,7 @@ public static PartitionView createFrom(PartitionInfo partitionInfo, Partition pa // TODO add more type support in the future } - List allIndices = partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL); + List allIndices = physicalPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL); if (CollectionUtils.isNotEmpty(allIndices)) { MaterializedIndex materializedIndex = allIndices.get(0); List tablets = materializedIndex.getTablets(); diff --git a/fe/fe-core/src/main/java/com/starrocks/journal/JournalEntity.java b/fe/fe-core/src/main/java/com/starrocks/journal/JournalEntity.java index ba3412bd0c546e..07326873a57dda 100644 --- a/fe/fe-core/src/main/java/com/starrocks/journal/JournalEntity.java +++ b/fe/fe-core/src/main/java/com/starrocks/journal/JournalEntity.java @@ -47,7 +47,6 @@ import com.starrocks.catalog.FunctionSearchDesc; import com.starrocks.catalog.MetaVersion; import com.starrocks.catalog.Resource; -import com.starrocks.cluster.Cluster; import com.starrocks.common.Config; import com.starrocks.common.io.Text; import com.starrocks.common.io.Writable; @@ -55,12 +54,9 @@ import com.starrocks.ha.LeaderInfo; import com.starrocks.journal.bdbje.Timestamp; import com.starrocks.leader.Checkpoint; -import com.starrocks.load.DeleteInfo; import com.starrocks.load.ExportJob; -import com.starrocks.load.LoadErrorHub; import com.starrocks.load.MultiDeleteInfo; import com.starrocks.load.loadv2.LoadJob; -import com.starrocks.load.loadv2.LoadJob.LoadJobStateUpdateInfo; import com.starrocks.load.loadv2.LoadJobFinalOperation; import com.starrocks.load.routineload.RoutineLoadJob; import com.starrocks.load.streamload.StreamLoadTask; @@ -70,11 +66,9 @@ import com.starrocks.persist.AlterLoadJobOperationLog; import com.starrocks.persist.AlterMaterializedViewBaseTableInfosLog; import com.starrocks.persist.AlterMaterializedViewStatusLog; -import com.starrocks.persist.AlterRoutineLoadJobOperationLog; import com.starrocks.persist.AlterUserInfo; import com.starrocks.persist.AlterViewInfo; import com.starrocks.persist.AutoIncrementInfo; -import com.starrocks.persist.BackendIdsUpdateInfo; import com.starrocks.persist.BackendTabletsInfo; import com.starrocks.persist.BatchDeleteReplicaInfo; import com.starrocks.persist.BatchDropInfo; @@ -106,7 +100,6 @@ import com.starrocks.persist.DropStorageVolumeLog; import com.starrocks.persist.GlobalVarPersistInfo; import com.starrocks.persist.HbPackage; -import com.starrocks.persist.ImpersonatePrivInfo; import com.starrocks.persist.InsertOverwriteStateChangeInfo; import com.starrocks.persist.ModifyPartitionInfo; import com.starrocks.persist.ModifyTableColumnOperationLog; @@ -127,7 +120,6 @@ import com.starrocks.persist.RoutineLoadOperation; import com.starrocks.persist.SetDefaultStorageVolumeLog; import com.starrocks.persist.SetReplicaStatusOperationLog; -import com.starrocks.persist.ShardInfo; import com.starrocks.persist.SwapTableOperationLog; import com.starrocks.persist.TableAddOrDropColumnsInfo; import com.starrocks.persist.TableInfo; @@ -137,12 +129,10 @@ import com.starrocks.persist.UserPrivilegeCollectionInfo; import com.starrocks.persist.gson.GsonUtils; import com.starrocks.plugin.PluginInfo; -import com.starrocks.qe.SessionVariable; import com.starrocks.scheduler.Task; import com.starrocks.scheduler.mv.MVEpoch; import com.starrocks.scheduler.mv.MVMaintenanceJob; import com.starrocks.scheduler.persist.ArchiveTaskRunsLog; -import com.starrocks.scheduler.persist.DropTaskRunsLog; import com.starrocks.scheduler.persist.DropTasksLog; import com.starrocks.scheduler.persist.TaskRunPeriodStatusChange; import com.starrocks.scheduler.persist.TaskRunStatus; @@ -212,9 +202,7 @@ public void readFields(DataInput in) throws IOException { switch (opCode) { case OperationType.OP_SAVE_NEXTID: case OperationType.OP_ERASE_DB: - case OperationType.OP_ERASE_TABLE: case OperationType.OP_ERASE_PARTITION: - case OperationType.OP_META_VERSION: case OperationType.OP_DROP_ALL_BROKER: case OperationType.OP_DROP_REPOSITORY: { data = new Text(); @@ -244,13 +232,6 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_ALTER_DB: - case OperationType.OP_RENAME_DB: { - data = new DatabaseInfo(); - ((DatabaseInfo) data).readFields(in); - isRead = true; - break; - } case OperationType.OP_ALTER_DB_V2: case OperationType.OP_RENAME_DB_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), DatabaseInfo.class); @@ -262,13 +243,6 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_DROP_TABLE: - case OperationType.OP_DROP_ROLLUP: { - data = new DropInfo(); - ((DropInfo) data).readFields(in); - isRead = true; - break; - } case OperationType.OP_DROP_TABLE_V2: case OperationType.OP_DROP_ROLLUP_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), DropInfo.class); @@ -315,12 +289,6 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_MODIFY_PARTITION: { - data = new ModifyPartitionInfo(); - ((ModifyPartitionInfo) data).readFields(in); - isRead = true; - break; - } case OperationType.OP_MODIFY_PARTITION_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), ModifyPartitionInfo.class); isRead = true; @@ -331,14 +299,6 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_RECOVER_DB: - case OperationType.OP_RECOVER_TABLE: - case OperationType.OP_RECOVER_PARTITION: { - data = new RecoverInfo(); - ((RecoverInfo) data).readFields(in); - isRead = true; - break; - } case OperationType.OP_RECOVER_DB_V2: case OperationType.OP_RECOVER_TABLE_V2: case OperationType.OP_RECOVER_PARTITION_V2: { @@ -351,14 +311,6 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_RENAME_TABLE: - case OperationType.OP_RENAME_ROLLUP: - case OperationType.OP_RENAME_PARTITION: { - data = new TableInfo(); - ((TableInfo) data).readFields(in); - isRead = true; - break; - } case OperationType.OP_RENAME_TABLE_V2: case OperationType.OP_RENAME_ROLLUP_V2: case OperationType.OP_RENAME_PARTITION_V2: { @@ -396,21 +348,11 @@ public void readFields(DataInput in) throws IOException { data = AlterMaterializedViewBaseTableInfosLog.read(in); isRead = true; break; - case OperationType.OP_BACKUP_JOB: { - data = AbstractJob.read(in); - isRead = true; - break; - } case OperationType.OP_BACKUP_JOB_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), AbstractJob.class); isRead = true; break; } - case OperationType.OP_RESTORE_JOB: { - data = AbstractJob.read(in); - isRead = true; - break; - } case OperationType.OP_RESTORE_JOB_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), AbstractJob.class); isRead = true; @@ -427,43 +369,21 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_EXPORT_CREATE: - data = new ExportJob(); - ((ExportJob) data).readFields(in); - isRead = true; - break; case OperationType.OP_EXPORT_CREATE_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), ExportJob.class); isRead = true; break; } - case OperationType.OP_EXPORT_UPDATE_STATE: - data = new ExportJob.StateTransfer(); - ((ExportJob.StateTransfer) data).readFields(in); - isRead = true; - break; - case OperationType.OP_EXPORT_UPDATE_INFO: - data = ExportJob.ExportUpdateInfo.read(in); - isRead = true; - break; case OperationType.OP_EXPORT_UPDATE_INFO_V2: data = GsonUtils.GSON.fromJson(Text.readString(in), ExportJob.ExportUpdateInfo.class); isRead = true; break; - case OperationType.OP_FINISH_DELETE: - data = new DeleteInfo(); - ((DeleteInfo) data).readFields(in); - isRead = true; - break; case OperationType.OP_FINISH_MULTI_DELETE: { data = MultiDeleteInfo.read(in); isRead = true; break; } - case OperationType.OP_ADD_REPLICA: - case OperationType.OP_UPDATE_REPLICA: - case OperationType.OP_DELETE_REPLICA: - case OperationType.OP_CLEAR_ROLLUP_INFO: { + case OperationType.OP_ADD_REPLICA: { data = ReplicaPersistInfo.read(in); isRead = true; break; @@ -480,14 +400,6 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_ADD_BACKEND: - case OperationType.OP_DROP_BACKEND: - case OperationType.OP_BACKEND_STATE_CHANGE: { - data = new Backend(); - ((Backend) data).readFields(in); - isRead = true; - break; - } case OperationType.OP_ADD_BACKEND_V2: case OperationType.OP_DROP_BACKEND_V2: case OperationType.OP_BACKEND_STATE_CHANGE_V2: { @@ -505,15 +417,6 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_ADD_FRONTEND: - case OperationType.OP_ADD_FIRST_FRONTEND: - case OperationType.OP_UPDATE_FRONTEND: - case OperationType.OP_REMOVE_FRONTEND: { - data = new Frontend(); - ((Frontend) data).readFields(in); - isRead = true; - break; - } case OperationType.OP_ADD_FRONTEND_V2: case OperationType.OP_ADD_FIRST_FRONTEND_V2: case OperationType.OP_UPDATE_FRONTEND_V2: @@ -522,39 +425,11 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_SET_LOAD_ERROR_HUB: { - data = new LoadErrorHub.Param(); - ((LoadErrorHub.Param) data).readFields(in); - isRead = true; - break; - } - case OperationType.OP_NEW_DROP_USER: { - data = UserIdentity.read(in); - isRead = true; - break; - } - case OperationType.OP_UPDATE_USER_PROPERTY: { - data = UserPropertyInfo.read(in); - isRead = true; - break; - } - case OperationType.OP_LEADER_INFO_CHANGE: { - data = new LeaderInfo(); - ((LeaderInfo) data).readFields(in); - isRead = true; - break; - } case OperationType.OP_LEADER_INFO_CHANGE_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), LeaderInfo.class); isRead = true; break; } - case OperationType.OP_TIMESTAMP: { - data = new Timestamp(); - ((Timestamp) data).readFields(in); - isRead = true; - break; - } case OperationType.OP_TIMESTAMP_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), Timestamp.class); isRead = true; @@ -565,36 +440,12 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_GLOBAL_VARIABLE: { - data = new SessionVariable(); - ((SessionVariable) data).readFields(in); - isRead = true; - break; - } - case OperationType.OP_CREATE_CLUSTER: { - data = Cluster.read(in); - isRead = true; - break; - } - case OperationType.OP_ADD_BROKER: - case OperationType.OP_DROP_BROKER: { - data = new BrokerMgr.ModifyBrokerInfo(); - ((BrokerMgr.ModifyBrokerInfo) data).readFields(in); - isRead = true; - break; - } case OperationType.OP_ADD_BROKER_V2: case OperationType.OP_DROP_BROKER_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), BrokerMgr.ModifyBrokerInfo.class); isRead = true; break; } - case OperationType.OP_UPDATE_CLUSTER_AND_BACKENDS: { - data = new BackendIdsUpdateInfo(); - ((BackendIdsUpdateInfo) data).readFields(in); - isRead = true; - break; - } case OperationType.OP_UPSERT_TRANSACTION_STATE_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), TransactionState.class); isRead = true; @@ -605,11 +456,6 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_CREATE_REPOSITORY: { - data = Repository.read(in); - isRead = true; - break; - } case OperationType.OP_CREATE_REPOSITORY_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), Repository.class); isRead = true; @@ -620,16 +466,6 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_COLOCATE_ADD_TABLE: - case OperationType.OP_COLOCATE_REMOVE_TABLE: - case OperationType.OP_COLOCATE_BACKENDS_PER_BUCKETSEQ: - case OperationType.OP_COLOCATE_MARK_UNSTABLE: - case OperationType.OP_COLOCATE_MARK_STABLE: { - data = new ColocatePersistInfo(); - ((ColocatePersistInfo) data).readFields(in); - isRead = true; - break; - } case OperationType.OP_COLOCATE_ADD_TABLE_V2: case OperationType.OP_COLOCATE_BACKENDS_PER_BUCKETSEQ_V2: case OperationType.OP_COLOCATE_MARK_UNSTABLE_V2: @@ -638,110 +474,58 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_MODIFY_TABLE_COLOCATE: { - data = new TablePropertyInfo(); - ((TablePropertyInfo) data).readFields(in); - isRead = true; - break; - } case OperationType.OP_MODIFY_TABLE_COLOCATE_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), TablePropertyInfo.class); isRead = true; break; } - case OperationType.OP_HEARTBEAT: { - data = HbPackage.read(in); - isRead = true; - break; - } case OperationType.OP_HEARTBEAT_V2: { data = HbPackage.readV2(in); isRead = true; break; } - case OperationType.OP_ADD_FUNCTION: { - data = Function.read(in); - isRead = true; - break; - } case OperationType.OP_ADD_FUNCTION_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), Function.class); isRead = true; break; } - case OperationType.OP_DROP_FUNCTION: { - data = FunctionSearchDesc.read(in); - isRead = true; - break; - } case OperationType.OP_DROP_FUNCTION_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), FunctionSearchDesc.class); isRead = true; break; } - case OperationType.OP_BACKEND_TABLETS_INFO: { - data = BackendTabletsInfo.read(in); - isRead = true; - break; - } case OperationType.OP_BACKEND_TABLETS_INFO_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), BackendTabletsInfo.class); isRead = true; break; } - case OperationType.OP_CREATE_ROUTINE_LOAD_JOB: { - data = RoutineLoadJob.read(in); - isRead = true; - break; - } case OperationType.OP_CREATE_ROUTINE_LOAD_JOB_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), RoutineLoadJob.class); isRead = true; break; } - case OperationType.OP_CHANGE_ROUTINE_LOAD_JOB: - case OperationType.OP_REMOVE_ROUTINE_LOAD_JOB: { - data = RoutineLoadOperation.read(in); - isRead = true; - break; - } case OperationType.OP_CHANGE_ROUTINE_LOAD_JOB_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), RoutineLoadOperation.class); isRead = true; break; } - case OperationType.OP_CREATE_STREAM_LOAD_TASK: { - data = StreamLoadTask.read(in); - isRead = true; - break; - } case OperationType.OP_CREATE_STREAM_LOAD_TASK_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), StreamLoadTask.class); isRead = true; break; } - case OperationType.OP_CREATE_LOAD_JOB: { - data = com.starrocks.load.loadv2.LoadJob.read(in); - isRead = true; - break; - } case OperationType.OP_CREATE_LOAD_JOB_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), LoadJob.class); isRead = true; break; } - case OperationType.OP_END_LOAD_JOB: { - data = LoadJobFinalOperation.read(in); - isRead = true; - break; - } case OperationType.OP_END_LOAD_JOB_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), LoadJobFinalOperation.class); isRead = true; break; } case OperationType.OP_UPDATE_LOAD_JOB: { - data = LoadJobStateUpdateInfo.read(in); + data = LoadJob.LoadJobStateUpdateInfo.read(in); isRead = true; break; } @@ -782,21 +566,11 @@ public void readFields(DataInput in) throws IOException { data = TaskRunPeriodStatusChange.read(in); isRead = true; break; - case OperationType.OP_DROP_TASK_RUNS: - data = DropTaskRunsLog.read(in); - isRead = true; - break; case OperationType.OP_ARCHIVE_TASK_RUNS: { data = ArchiveTaskRunsLog.read(in); isRead = true; break; } - case OperationType.OP_CREATE_SMALL_FILE: - case OperationType.OP_DROP_SMALL_FILE: { - data = SmallFile.read(in); - isRead = true; - break; - } case OperationType.OP_CREATE_SMALL_FILE_V2: case OperationType.OP_DROP_SMALL_FILE_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), SmallFile.class); @@ -808,21 +582,11 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_BATCH_ADD_ROLLUP: { - data = BatchAlterJobPersistInfo.read(in); - isRead = true; - break; - } case OperationType.OP_BATCH_ADD_ROLLUP_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), BatchAlterJobPersistInfo.class); isRead = true; break; } - case OperationType.OP_MODIFY_DISTRIBUTION_TYPE: { - data = TableInfo.read(in); - isRead = true; - break; - } case OperationType.OP_MODIFY_DISTRIBUTION_TYPE_V2: { data = GsonUtils.GSON.fromJson(Text.readString(in), TableInfo.class); isRead = true; @@ -872,11 +636,6 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_ALTER_ROUTINE_LOAD_JOB: { - data = AlterRoutineLoadJobOperationLog.read(in); - isRead = true; - break; - } case OperationType.OP_ALTER_LOAD_JOB: { data = AlterLoadJobOperationLog.read(in); isRead = true; @@ -977,16 +736,6 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_GRANT_IMPERSONATE: { - data = ImpersonatePrivInfo.read(in); - isRead = true; - break; - } - case OperationType.OP_REVOKE_IMPERSONATE: { - data = ImpersonatePrivInfo.read(in); - isRead = true; - break; - } case OperationType.OP_CREATE_CATALOG: { data = Catalog.read(in); isRead = true; @@ -1012,16 +761,6 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_ADD_UNUSED_SHARD: { // Deprecated - data = ShardInfo.read(in); - isRead = true; - break; - } - case OperationType.OP_DELETE_UNUSED_SHARD: { // Deprecated - data = ShardInfo.read(in); - isRead = true; - break; - } case OperationType.OP_STARMGR: { data = StarMgrJournal.read(in); isRead = true; @@ -1037,21 +776,11 @@ public void readFields(DataInput in) throws IOException { isRead = true; break; } - case OperationType.OP_UPDATE_USER_PROP_V2: { - data = UserPropertyInfo.read(in); - isRead = true; - break; - } case OperationType.OP_UPDATE_USER_PROP_V3: { data = GsonUtils.GSON.fromJson(Text.readString(in), UserPropertyInfo.class); isRead = true; break; } - case OperationType.OP_DROP_USER_V2: { - data = UserIdentity.read(in); - isRead = true; - break; - } case OperationType.OP_DROP_USER_V3: { data = GsonUtils.GSON.fromJson(Text.readString(in), UserIdentity.class); isRead = true; diff --git a/fe/fe-core/src/main/java/com/starrocks/journal/JournalWriter.java b/fe/fe-core/src/main/java/com/starrocks/journal/JournalWriter.java index fde313bf0fcf26..5cc332f9eefe81 100644 --- a/fe/fe-core/src/main/java/com/starrocks/journal/JournalWriter.java +++ b/fe/fe-core/src/main/java/com/starrocks/journal/JournalWriter.java @@ -16,12 +16,19 @@ package com.starrocks.journal; import com.starrocks.common.Config; +import com.starrocks.common.io.Text; +import com.starrocks.common.io.Writable; import com.starrocks.common.util.Daemon; import com.starrocks.common.util.Util; import com.starrocks.metric.MetricRepo; +import com.starrocks.persist.DatabaseInfo; +import com.starrocks.persist.OperationType; +import com.starrocks.persist.gson.GsonUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; import java.util.ArrayList; import java.util.List; import java.util.concurrent.BlockingQueue; @@ -105,6 +112,20 @@ protected void writeOneBatch() throws InterruptedException { long nextJournalId = nextVisibleJournalId; initBatch(); + JournalEntity journalEntity = new JournalEntity(); + DataInputStream in = new DataInputStream(new ByteArrayInputStream(currentJournal.getBuffer().getData())); + try { + journalEntity.readFields(in); + if (journalEntity.getOpCode() == OperationType.OP_RENAME_DB_V2) { + Writable data = GsonUtils.GSON.fromJson(Text.readString(in), DatabaseInfo.class); + DatabaseInfo databaseInfo = (DatabaseInfo) data; + + //databaseInfo.transaction; + } + } catch (Exception e) { + + } + try { this.journal.batchWriteBegin(); diff --git a/fe/fe-core/src/main/java/com/starrocks/lake/LakeTable.java b/fe/fe-core/src/main/java/com/starrocks/lake/LakeTable.java index 27995d46cb83c0..b343bfdfb884e8 100644 --- a/fe/fe-core/src/main/java/com/starrocks/lake/LakeTable.java +++ b/fe/fe-core/src/main/java/com/starrocks/lake/LakeTable.java @@ -204,7 +204,7 @@ public List> getArbitraryTabletBucketsSeq() throws DdlException { public List getShardGroupIds() { List shardGroupIds = new ArrayList<>(); for (Partition p : getAllPartitions()) { - shardGroupIds.add(p.getShardGroupId()); + shardGroupIds.add(p.getDefaultPhysicalPartition().getShardGroupId()); } return shardGroupIds; } diff --git a/fe/fe-core/src/main/java/com/starrocks/lake/RecycleLakeListPartitionInfo.java b/fe/fe-core/src/main/java/com/starrocks/lake/RecycleLakeListPartitionInfo.java index 29cfcfaa456360..6222915a49edbf 100644 --- a/fe/fe-core/src/main/java/com/starrocks/lake/RecycleLakeListPartitionInfo.java +++ b/fe/fe-core/src/main/java/com/starrocks/lake/RecycleLakeListPartitionInfo.java @@ -39,7 +39,7 @@ public boolean delete() { WarehouseManager manager = GlobalStateMgr.getCurrentState().getWarehouseMgr(); Warehouse warehouse = manager.getBackgroundWarehouse(); if (LakeTableHelper.removePartitionDirectory(partition, warehouse.getId())) { - GlobalStateMgr.getCurrentState().getLocalMetastore().onErasePartition(partition); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().onErasePartition(partition); return true; } else { return false; diff --git a/fe/fe-core/src/main/java/com/starrocks/lake/RecycleLakeRangePartitionInfo.java b/fe/fe-core/src/main/java/com/starrocks/lake/RecycleLakeRangePartitionInfo.java index c5a5e43e65efa0..4bda846114f573 100644 --- a/fe/fe-core/src/main/java/com/starrocks/lake/RecycleLakeRangePartitionInfo.java +++ b/fe/fe-core/src/main/java/com/starrocks/lake/RecycleLakeRangePartitionInfo.java @@ -42,7 +42,7 @@ public boolean delete() { WarehouseManager manager = GlobalStateMgr.getCurrentState().getWarehouseMgr(); Warehouse warehouse = manager.getBackgroundWarehouse(); if (LakeTableHelper.removePartitionDirectory(partition, warehouse.getId())) { - GlobalStateMgr.getCurrentState().getLocalMetastore().onErasePartition(partition); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().onErasePartition(partition); return true; } else { return false; diff --git a/fe/fe-core/src/main/java/com/starrocks/lake/RecycleLakeUnPartitionInfo.java b/fe/fe-core/src/main/java/com/starrocks/lake/RecycleLakeUnPartitionInfo.java index 1366381271f19b..ccd9eaa50b55f7 100644 --- a/fe/fe-core/src/main/java/com/starrocks/lake/RecycleLakeUnPartitionInfo.java +++ b/fe/fe-core/src/main/java/com/starrocks/lake/RecycleLakeUnPartitionInfo.java @@ -41,7 +41,7 @@ public boolean delete() { WarehouseManager manager = GlobalStateMgr.getCurrentState().getWarehouseMgr(); Warehouse warehouse = manager.getBackgroundWarehouse(); if (LakeTableHelper.removePartitionDirectory(partition, warehouse.getId())) { - GlobalStateMgr.getCurrentState().getLocalMetastore().onErasePartition(partition); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().onErasePartition(partition); return true; } else { return false; diff --git a/fe/fe-core/src/main/java/com/starrocks/lake/StarMgrMetaSyncer.java b/fe/fe-core/src/main/java/com/starrocks/lake/StarMgrMetaSyncer.java index 9d57c9fe14dc08..d90331c42ea693 100644 --- a/fe/fe-core/src/main/java/com/starrocks/lake/StarMgrMetaSyncer.java +++ b/fe/fe-core/src/main/java/com/starrocks/lake/StarMgrMetaSyncer.java @@ -63,9 +63,9 @@ public StarMgrMetaSyncer() { private List getAllPartitionShardGroupId() { List groupIds = new ArrayList<>(); - List dbIds = GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIdsIncludeRecycleBin(); + List dbIds = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIdsIncludeRecycleBin(); for (Long dbId : dbIds) { - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIncludeRecycleBin(dbId); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIncludeRecycleBin(dbId); if (db == null) { continue; } @@ -76,9 +76,9 @@ private List getAllPartitionShardGroupId() { Locker locker = new Locker(); locker.lockDatabase(db.getId(), LockType.READ); try { - for (Table table : GlobalStateMgr.getCurrentState().getLocalMetastore().getTablesIncludeRecycleBin(db)) { + for (Table table : GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTablesIncludeRecycleBin(db)) { if (table.isCloudNativeTableOrMaterializedView()) { - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getAllPartitionsIncludeRecycleBin((OlapTable) table) .stream() .map(Partition::getSubPartitions) @@ -296,7 +296,7 @@ public boolean syncTableMetaInternal(Database db, OlapTable table, boolean force if (GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getId(), table.getId()) == null) { return false; // table might be dropped } - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getAllPartitionsIncludeRecycleBin(table) .stream() .map(Partition::getSubPartitions) diff --git a/fe/fe-core/src/main/java/com/starrocks/lake/backup/LakeRestoreJob.java b/fe/fe-core/src/main/java/com/starrocks/lake/backup/LakeRestoreJob.java index fde1e0fb051ca5..efe4457e627110 100644 --- a/fe/fe-core/src/main/java/com/starrocks/lake/backup/LakeRestoreJob.java +++ b/fe/fe-core/src/main/java/com/starrocks/lake/backup/LakeRestoreJob.java @@ -99,7 +99,8 @@ protected void createReplicas(OlapTable localTbl, Partition restorePart) { @Override protected void genFileMapping(OlapTable localTbl, Partition localPartition, Long remoteTblId, BackupJobInfo.BackupPartitionInfo backupPartInfo, boolean overwrite) { - for (MaterializedIndex localIdx : localPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { + for (MaterializedIndex localIdx : localPartition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { BackupIndexInfo backupIdxInfo = backupPartInfo.getIdx(localTbl.getIndexNameById(localIdx.getId())); Preconditions.checkState(backupIdxInfo.tablets.size() == localIdx.getTablets().size()); for (int i = 0; i < localIdx.getTablets().size(); i++) { @@ -125,9 +126,9 @@ protected void prepareAndSendSnapshotTasks(Database db) { LakeTablet tablet = null; try { OlapTable tbl = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getId(), idChain.getTblId()); + .getTable(db.getId(), idChain.getTblId()); Partition part = tbl.getPartition(idChain.getPartId()); - MaterializedIndex index = part.getIndex(idChain.getIdxId()); + MaterializedIndex index = part.getDefaultPhysicalPartition().getIndex(idChain.getIdxId()); tablet = (LakeTablet) index.getTablet(idChain.getTabletId()); Long computeNodeId = GlobalStateMgr.getCurrentState().getWarehouseMgr() .getComputeNodeId(WarehouseManager.DEFAULT_WAREHOUSE_NAME, tablet); @@ -156,7 +157,7 @@ protected void prepareDownloadTasks(List beSnapshotInfos, Database request.restoreInfos = Lists.newArrayList(); for (SnapshotInfo info : beSnapshotInfos) { OlapTable tbl = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getId(), info.getTblId()); + .getTable(db.getId(), info.getTblId()); if (tbl == null) { status = new Status(Status.ErrCode.NOT_FOUND, "restored table " + info.getTblId() + " does not exist"); @@ -171,7 +172,7 @@ protected void prepareDownloadTasks(List beSnapshotInfos, Database return; } - MaterializedIndex idx = part.getIndex(info.getIndexId()); + MaterializedIndex idx = part.getDefaultPhysicalPartition().getIndex(info.getIndexId()); if (idx == null) { status = new Status(Status.ErrCode.NOT_FOUND, "index " + info.getIndexId() + " does not exist in partion " + part.getName() @@ -277,7 +278,8 @@ public static LakeRestoreJob read(DataInput in) throws IOException { @Override protected void modifyInvertedIndex(OlapTable restoreTbl, Partition restorePart) { - for (MaterializedIndex restoredIdx : restorePart.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { + for (MaterializedIndex restoredIdx : restorePart.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { MaterializedIndexMeta indexMeta = restoreTbl.getIndexMetaByIndexId(restoredIdx.getId()); TStorageMedium medium = restoreTbl.getPartitionInfo().getDataProperty(restorePart.getId()).getStorageMedium(); TabletMeta tabletMeta = new TabletMeta(dbId, restoreTbl.getId(), restorePart.getId(), @@ -292,7 +294,7 @@ protected void modifyInvertedIndex(OlapTable restoreTbl, Partition restorePart) protected void addRestoredPartitions(Database db, boolean modify) { for (Pair entry : restoredPartitions) { OlapTable localTbl = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), entry.first); + .getTable(db.getFullName(), entry.first); Partition restorePart = entry.second; OlapTable remoteTbl = (OlapTable) backupMeta.getTable(entry.first); RangePartitionInfo localPartitionInfo = (RangePartitionInfo) localTbl.getPartitionInfo(); diff --git a/fe/fe-core/src/main/java/com/starrocks/leader/LeaderImpl.java b/fe/fe-core/src/main/java/com/starrocks/leader/LeaderImpl.java index b15f2845805ae8..eb33b3b69d1dfa 100644 --- a/fe/fe-core/src/main/java/com/starrocks/leader/LeaderImpl.java +++ b/fe/fe-core/src/main/java/com/starrocks/leader/LeaderImpl.java @@ -951,13 +951,16 @@ public TGetTableMetaResponse getTableMeta(TGetTableMetaRequest request) { TBasePartitionDesc basePartitionDesc = new TBasePartitionDesc(); // fill partition meta info for (Partition partition : olapTable.getAllPartitions()) { + + PhysicalPartition physicalPartition = partition.getDefaultPhysicalPartition(); + TPartitionMeta partitionMeta = new TPartitionMeta(); partitionMeta.setPartition_id(partition.getId()); partitionMeta.setPartition_name(partition.getName()); partitionMeta.setState(partition.getState().name()); - partitionMeta.setVisible_version(partition.getVisibleVersion()); - partitionMeta.setVisible_time(partition.getVisibleVersionTime()); - partitionMeta.setNext_version(partition.getNextVersion()); + partitionMeta.setVisible_version(physicalPartition.getVisibleVersion()); + partitionMeta.setVisible_time(physicalPartition.getVisibleVersionTime()); + partitionMeta.setNext_version(physicalPartition.getNextVersion()); partitionMeta.setIs_temp(olapTable.getPartition(partition.getName(), true) != null); tableMeta.addToPartitions(partitionMeta); short replicaNum = partitionInfo.getReplicationNum(partition.getId()); @@ -1031,7 +1034,8 @@ public TGetTableMetaResponse getTableMeta(TGetTableMetaRequest request) { } for (Partition partition : olapTable.getAllPartitions()) { - List indexes = partition.getMaterializedIndices(IndexExtState.ALL); + List indexes = + partition.getDefaultPhysicalPartition().getMaterializedIndices(IndexExtState.ALL); for (MaterializedIndex index : indexes) { TIndexMeta indexMeta = new TIndexMeta(); indexMeta.setIndex_id(index.getId()); diff --git a/fe/fe-core/src/main/java/com/starrocks/leader/ReportHandler.java b/fe/fe-core/src/main/java/com/starrocks/leader/ReportHandler.java index 24c4f0b6b08332..4360876386ef6b 100644 --- a/fe/fe-core/src/main/java/com/starrocks/leader/ReportHandler.java +++ b/fe/fe-core/src/main/java/com/starrocks/leader/ReportHandler.java @@ -38,10 +38,12 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.HashBasedTable; +import com.google.common.collect.HashMultimap; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ListMultimap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; +import com.google.common.collect.Multimap; import com.google.common.collect.Queues; import com.google.common.collect.Sets; import com.google.common.collect.Table; @@ -49,6 +51,7 @@ import com.starrocks.catalog.ColocateTableIndex; import com.starrocks.catalog.Column; import com.starrocks.catalog.ColumnId; +import com.starrocks.catalog.DataProperty; import com.starrocks.catalog.Database; import com.starrocks.catalog.DiskInfo; import com.starrocks.catalog.KeysType; @@ -59,6 +62,7 @@ import com.starrocks.catalog.MaterializedIndexMeta; import com.starrocks.catalog.OlapTable; import com.starrocks.catalog.Partition; +import com.starrocks.catalog.PartitionInfo; import com.starrocks.catalog.PhysicalPartition; import com.starrocks.catalog.Replica; import com.starrocks.catalog.Replica.ReplicaState; @@ -79,11 +83,13 @@ import com.starrocks.common.util.concurrent.lock.Locker; import com.starrocks.datacache.DataCacheMetrics; import com.starrocks.memory.MemoryTrackable; +import com.starrocks.meta.StarRocksMetadata; import com.starrocks.metric.GaugeMetric; import com.starrocks.metric.Metric.MetricUnit; import com.starrocks.metric.MetricRepo; import com.starrocks.persist.BackendTabletsInfo; import com.starrocks.persist.BatchDeleteReplicaInfo; +import com.starrocks.persist.ModifyPartitionInfo; import com.starrocks.persist.ReplicaPersistInfo; import com.starrocks.server.GlobalStateMgr; import com.starrocks.server.RunMode; @@ -135,6 +141,7 @@ import org.apache.thrift.TException; import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -142,6 +149,7 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; public class ReportHandler extends Daemon implements MemoryTrackable { @@ -459,8 +467,7 @@ private static void tabletReport(long backendId, Map backendTable backendId, backendTablets.size(), backendReportVersion); // storage medium map - HashMap storageMediumMap = - GlobalStateMgr.getCurrentState().getLocalMetastore().getPartitionIdToStorageMediumMap(); + HashMap storageMediumMap = getPartitionIdToStorageMediumMap(); // db id -> tablet id ListMultimap tabletSyncMap = ArrayListMultimap.create(); @@ -739,6 +746,123 @@ public static void tabletReport(long backendId, Map backendTablet tabletMigrationMap.size(), transactionsToClear.size(), transactionsToPublish.size(), (end - start)); } + public static HashMap getPartitionIdToStorageMediumMap() { + StarRocksMetadata starRocksMetadata = GlobalStateMgr.getCurrentState().getStarRocksMetadata(); + HashMap storageMediumMap = new HashMap<>(); + + // record partition which need to change storage medium + // dbId -> (tableId -> partitionId) + HashMap> changedPartitionsMap = new HashMap<>(); + long currentTimeMs = System.currentTimeMillis(); + List dbIds = starRocksMetadata.getDbIds(); + + for (long dbId : dbIds) { + Database db = starRocksMetadata.getDb(dbId); + if (db == null) { + LOG.warn("db {} does not exist while doing backend report", dbId); + continue; + } + + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.READ); + try { + for (com.starrocks.catalog.Table table : db.getTables()) { + if (!table.isOlapTableOrMaterializedView()) { + continue; + } + + long tableId = table.getId(); + OlapTable olapTable = (OlapTable) table; + PartitionInfo partitionInfo = olapTable.getPartitionInfo(); + for (Partition partition : olapTable.getAllPartitions()) { + long partitionId = partition.getId(); + DataProperty dataProperty = partitionInfo.getDataProperty(partition.getId()); + Preconditions.checkNotNull(dataProperty, + partition.getName() + ", pId:" + partitionId + ", db: " + dbId + ", tbl: " + tableId); + // only normal state table can migrate. + // PRIMARY_KEYS table does not support local migration. + if (dataProperty.getStorageMedium() == TStorageMedium.SSD + && dataProperty.getCooldownTimeMs() < currentTimeMs + && olapTable.getState() == OlapTable.OlapTableState.NORMAL) { + // expire. change to HDD. + // record and change when holding write lock + Multimap multimap = changedPartitionsMap.get(dbId); + if (multimap == null) { + multimap = HashMultimap.create(); + changedPartitionsMap.put(dbId, multimap); + } + multimap.put(tableId, partitionId); + } else { + storageMediumMap.put(partitionId, dataProperty.getStorageMedium()); + } + } // end for partitions + } // end for tables + } finally { + locker.unLockDatabase(db.getId(), LockType.READ); + } + } // end for dbs + + // handle data property changed + for (Long dbId : changedPartitionsMap.keySet()) { + Database db = starRocksMetadata.getDb(dbId); + if (db == null) { + LOG.warn("db {} does not exist while checking backend storage medium", dbId); + continue; + } + Multimap tableIdToPartitionIds = changedPartitionsMap.get(dbId); + + // use try lock to avoid blocking a long time. + // if block too long, backend report rpc will timeout. + Locker locker = new Locker(); + if (!locker.tryLockDatabase(db.getId(), LockType.WRITE, Database.TRY_LOCK_TIMEOUT_MS, TimeUnit.MILLISECONDS)) { + LOG.warn("try get db {}-{} write lock but failed when checking backend storage medium", + db.getFullName(), dbId); + continue; + } + Preconditions.checkState(locker.isDbWriteLockHeldByCurrentThread(db)); + try { + for (Long tableId : tableIdToPartitionIds.keySet()) { + com.starrocks.catalog.Table table = starRocksMetadata.getTable(db.getId(), tableId); + if (table == null) { + continue; + } + OlapTable olapTable = (OlapTable) table; + PartitionInfo partitionInfo = olapTable.getPartitionInfo(); + + Collection partitionIds = tableIdToPartitionIds.get(tableId); + for (Long partitionId : partitionIds) { + Partition partition = olapTable.getPartition(partitionId); + if (partition == null) { + continue; + } + DataProperty dataProperty = partitionInfo.getDataProperty(partition.getId()); + if (dataProperty.getStorageMedium() == TStorageMedium.SSD + && dataProperty.getCooldownTimeMs() < currentTimeMs) { + // expire. change to HDD. + DataProperty hdd = new DataProperty(TStorageMedium.HDD); + partitionInfo.setDataProperty(partition.getId(), hdd); + storageMediumMap.put(partitionId, TStorageMedium.HDD); + LOG.debug("partition[{}-{}-{}] storage medium changed from SSD to HDD", + dbId, tableId, partitionId); + + // log + ModifyPartitionInfo info = + new ModifyPartitionInfo(db.getId(), olapTable.getId(), + partition.getId(), + hdd, + (short) -1, + partitionInfo.getIsInMemory(partition.getId())); + GlobalStateMgr.getCurrentState().getEditLog().logModifyPartition(info); + } + } // end for partitions + } // end for tables + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); + } + } // end for dbs + return storageMediumMap; + } + private static boolean needSync(Replica replicaInFe, TTabletInfo backendTabletInfo) { if (backendTabletInfo.isSetUsed() && !backendTabletInfo.isUsed()) { // tablet is bad, do not sync @@ -914,7 +1038,7 @@ private static void sync(Map backendTablets, ListMultimap backendTablets, ListMultimap backendTablets, ListMultimap backendTablets, ListMultimap tabletDeleteFromMeta List replicaPersistInfoList = new ArrayList<>(); DB_TRAVERSE: for (Long dbId : tabletDeleteFromMeta.keySet()) { - Database db = globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(dbId); + Database db = globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(dbId); if (db == null) { continue; } @@ -1094,7 +1220,7 @@ private static void deleteFromMeta(ListMultimap tabletDeleteFromMeta long currentTime = System.currentTimeMillis(); if (currentTime - lockStartTime > MAX_DB_WLOCK_HOLDING_TIME_MS) { locker.unLockDatabase(db.getId(), LockType.WRITE); - db = globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(dbId); + db = globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(dbId); if (db == null) { continue DB_TRAVERSE; } @@ -1113,25 +1239,25 @@ private static void deleteFromMeta(ListMultimap tabletDeleteFromMeta LOG.debug("delete tablet {} in partition {} of table {} in db {} from meta. backend[{}]", tabletId, partitionId, tableId, dbId, backendId); - OlapTable olapTable = (OlapTable) globalStateMgr.getLocalMetastore().getTableIncludeRecycleBin(db, tableId); + OlapTable olapTable = (OlapTable) globalStateMgr.getStarRocksMetadata() + .getTableIncludeRecycleBin(db, tableId); if (olapTable == null) { continue; } - if (globalStateMgr.getLocalMetastore() + if (globalStateMgr.getStarRocksMetadata() .getPartitionIncludeRecycleBin(olapTable, tabletMeta.getPartitionId()) == null) { continue; } - PhysicalPartition partition = globalStateMgr.getLocalMetastore() + PhysicalPartition partition = globalStateMgr.getStarRocksMetadata() .getPhysicalPartitionIncludeRecycleBin(olapTable, partitionId); if (partition == null) { continue; } - short replicationNum = - globalStateMgr.getLocalMetastore().getReplicationNumIncludeRecycleBin(olapTable.getPartitionInfo(), - tabletMeta.getPartitionId()); + short replicationNum = globalStateMgr.getStarRocksMetadata() + .getReplicationNumIncludeRecycleBin(olapTable.getPartitionInfo(), tabletMeta.getPartitionId()); if (replicationNum == (short) -1) { continue; } @@ -1147,7 +1273,8 @@ private static void deleteFromMeta(ListMultimap tabletDeleteFromMeta continue; } - LocalTablet tablet = (LocalTablet) index.getTablet(tabletId); + LocalTablet tablet = (LocalTablet) GlobalStateMgr.getCurrentState().getLocalMetastore() + .getTablet(index, tabletId); if (tablet == null) { continue; } @@ -1233,7 +1360,7 @@ private static void deleteFromMeta(ListMultimap tabletDeleteFromMeta createReplicaBatchTask.addTask(task); } else { // just set this replica as bad - if (replica.setBad(true)) { + if (replica.isBad()) { LOG.warn("tablet {} has only one replica {} on backend {}" + " and it is lost, set it as bad", tabletId, replica.getId(), backendId); @@ -1243,8 +1370,7 @@ private static void deleteFromMeta(ListMultimap tabletDeleteFromMeta dbId, tableId, partitionId, indexId, tabletId, backendId, replica.getId()); tabletsInfo.addReplicaInfo(replicaPersistInfo); - GlobalStateMgr.getCurrentState().getEditLog() - .logBackendTabletsInfo(tabletsInfo); + GlobalStateMgr.getCurrentState().getLocalMetastore().backendTabletsInfo(tabletsInfo); } } } @@ -1286,8 +1412,8 @@ private static void deleteFromMeta(ListMultimap tabletDeleteFromMeta if (deleteTablets.size() > 0) { // no need to be protected by db lock, if the related meta is dropped, the replay code will ignore that tablet - GlobalStateMgr.getCurrentState().getEditLog() - .logBatchDeleteReplica(new BatchDeleteReplicaInfo(backendId, deleteTablets, replicaPersistInfoList)); + GlobalStateMgr.getCurrentState().getLocalMetastore() + .batchDeleteReplicaInfo(new BatchDeleteReplicaInfo(backendId, deleteTablets, replicaPersistInfoList)); } if (Config.recover_with_empty_tablet && createReplicaBatchTask.getTaskNum() > 0) { @@ -1610,7 +1736,8 @@ private static void handleRecoverTablet(ListMultimap tabletRecoveryM int schemaHash = olapTable.getSchemaHashByIndexId(indexId); - LocalTablet tablet = (LocalTablet) index.getTablet(tabletId); + LocalTablet tablet = (LocalTablet) GlobalStateMgr.getCurrentState().getLocalMetastore() + .getTablet(index, tabletId); if (tablet == null) { continue; } @@ -1635,7 +1762,7 @@ private static void handleRecoverTablet(ListMultimap tabletRecoveryM if (!backendTabletsInfo.isEmpty()) { // need to write edit log the sync the bad info to other FEs - GlobalStateMgr.getCurrentState().getEditLog().logBackendTabletsInfo(backendTabletsInfo); + GlobalStateMgr.getCurrentState().getLocalMetastore().backendTabletsInfo(backendTabletsInfo); } } @@ -2052,28 +2179,28 @@ private static void addReplica(long tabletId, TTabletInfo backendTabletInfo, lon long dataSize = backendTabletInfo.getData_size(); long rowCount = backendTabletInfo.getRow_count(); - Database db = globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(dbId); + Database db = globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(dbId); if (db == null) { throw new MetaNotFoundException("db[" + dbId + "] does not exist"); } - OlapTable olapTable = (OlapTable) globalStateMgr.getLocalMetastore().getTableIncludeRecycleBin(db, tableId); + OlapTable olapTable = (OlapTable) globalStateMgr.getStarRocksMetadata().getTableIncludeRecycleBin(db, tableId); if (olapTable == null) { throw new MetaNotFoundException("table[" + tableId + "] does not exist"); } Locker locker = new Locker(); locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(olapTable.getId()), LockType.WRITE); try { - if (globalStateMgr.getLocalMetastore().getPartitionIncludeRecycleBin(olapTable, partitionId) == null) { + if (globalStateMgr.getStarRocksMetadata().getPartitionIncludeRecycleBin(olapTable, partitionId) == null) { throw new MetaNotFoundException("partition[" + partitionId + "] does not exist"); } short replicationNum = - globalStateMgr.getLocalMetastore() + globalStateMgr.getStarRocksMetadata() .getReplicationNumIncludeRecycleBin(olapTable.getPartitionInfo(), partitionId); if (replicationNum == (short) -1) { throw new MetaNotFoundException("invalid replication number of partition [" + partitionId + "]"); } - PhysicalPartition partition = globalStateMgr.getLocalMetastore() + PhysicalPartition partition = globalStateMgr.getStarRocksMetadata() .getPhysicalPartitionIncludeRecycleBin(olapTable, physicalPartitionId); if (partition == null) { throw new MetaNotFoundException("physical partition[" + physicalPartitionId + "] does not exist"); @@ -2084,7 +2211,8 @@ private static void addReplica(long tabletId, TTabletInfo backendTabletInfo, lon throw new MetaNotFoundException("index[" + indexId + "] does not exist"); } - LocalTablet tablet = (LocalTablet) materializedIndex.getTablet(tabletId); + LocalTablet tablet = (LocalTablet) GlobalStateMgr.getCurrentState() + .getLocalMetastore().getTablet(materializedIndex, tabletId); if (tablet == null) { throw new MetaNotFoundException("tablet[" + tabletId + "] does not exist"); } @@ -2148,18 +2276,13 @@ private static void addReplica(long tabletId, TTabletInfo backendTabletInfo, lon } long replicaId = GlobalStateMgr.getCurrentState().getNextId(); - Replica replica = new Replica(replicaId, backendId, version, schemaHash, - dataSize, rowCount, ReplicaState.NORMAL, - lastFailedVersion, version); - tablet.addReplica(replica); // write edit log ReplicaPersistInfo info = ReplicaPersistInfo.createForAdd(dbId, tableId, physicalPartitionId, indexId, tabletId, backendId, replicaId, version, schemaHash, dataSize, rowCount, lastFailedVersion, version, minReadableVersion); - - GlobalStateMgr.getCurrentState().getEditLog().logAddReplica(info); + GlobalStateMgr.getCurrentState().getLocalMetastore().addReplica(info); LOG.info("add replica[{}-{}] to globalStateMgr. backend:[{}] replicas: {}", tabletId, replicaId, backendId, tablet.getReplicaInfos()); diff --git a/fe/fe-core/src/main/java/com/starrocks/listener/LoadJobMVListener.java b/fe/fe-core/src/main/java/com/starrocks/listener/LoadJobMVListener.java index 36547a351eccee..25706e471c26fc 100644 --- a/fe/fe-core/src/main/java/com/starrocks/listener/LoadJobMVListener.java +++ b/fe/fe-core/src/main/java/com/starrocks/listener/LoadJobMVListener.java @@ -153,7 +153,7 @@ private void doTriggerToRefreshRelatedMVs(Database db, Table table) throws DdlEx LOG.info("Trigger auto materialized view refresh because of base table {} has changed, " + "db:{}, mv:{}", table.getName(), mvDb.getFullName(), materializedView.getName()); - GlobalStateMgr.getCurrentState().getLocalMetastore().refreshMaterializedView( + GlobalStateMgr.getCurrentState().getStarRocksMetadata().refreshMaterializedView( mvDb.getFullName(), materializedView.getName(), false, null, Constants.TaskRunPriority.NORMAL.value(), true, false); } diff --git a/fe/fe-core/src/main/java/com/starrocks/load/DeleteMgr.java b/fe/fe-core/src/main/java/com/starrocks/load/DeleteMgr.java index 8c00d46737b1d7..6115bcf3326ecd 100644 --- a/fe/fe-core/src/main/java/com/starrocks/load/DeleteMgr.java +++ b/fe/fe-core/src/main/java/com/starrocks/load/DeleteMgr.java @@ -627,7 +627,8 @@ private boolean checkDelete(OlapTable table, List partitions, List> indexIdToSchema = table.getIndexIdToSchema(); Partition partition = partitions.get(0); - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { + for (MaterializedIndex index : partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { if (table.getBaseIndexId() == index.getId()) { continue; } diff --git a/fe/fe-core/src/main/java/com/starrocks/load/ExportJob.java b/fe/fe-core/src/main/java/com/starrocks/load/ExportJob.java index 11fda3a296c472..9a302284fb3cad 100644 --- a/fe/fe-core/src/main/java/com/starrocks/load/ExportJob.java +++ b/fe/fe-core/src/main/java/com/starrocks/load/ExportJob.java @@ -364,7 +364,7 @@ private void genTaskFragments(List fragments, List scanN if (tabletMeta.isLakeTablet()) { Partition partition = exportTable.getPartition(tabletMeta.getPartitionId()); if (partition != null) { - MaterializedIndex index = partition.getIndex(tabletMeta.getIndexId()); + MaterializedIndex index = partition.getDefaultPhysicalPartition().getIndex(tabletMeta.getIndexId()); if (index != null) { Tablet tablet = index.getTablet(tabletId); if (tablet != null) { diff --git a/fe/fe-core/src/main/java/com/starrocks/load/InsertOverwriteJobRunner.java b/fe/fe-core/src/main/java/com/starrocks/load/InsertOverwriteJobRunner.java index d40c4565ffaffb..31c56f1f8cbef5 100644 --- a/fe/fe-core/src/main/java/com/starrocks/load/InsertOverwriteJobRunner.java +++ b/fe/fe-core/src/main/java/com/starrocks/load/InsertOverwriteJobRunner.java @@ -297,7 +297,7 @@ private void createPartitionByValue(InsertStmt insertStmt) { try { AlterTableClauseAnalyzer analyzer = new AlterTableClauseAnalyzer(olapTable); analyzer.analyze(context, addPartitionClause); - state.getLocalMetastore().addPartitions(context, db, olapTable.getName(), addPartitionClause); + state.getStarRocksMetadata().addPartitions(context, db, olapTable.getName(), addPartitionClause); } catch (Exception ex) { LOG.warn(ex.getMessage(), ex); throw new RuntimeException(ex); @@ -389,7 +389,8 @@ private void gc(boolean isReplay) { Partition partition = targetTable.getPartition(pid); if (partition != null) { - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + for (MaterializedIndex index : partition.getDefaultPhysicalPartition(). + getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { // hash set is able to deduplicate the elements sourceTablets.addAll(index.getTablets()); } @@ -438,7 +439,8 @@ private void doCommit(boolean isReplay) { Set sourceTablets = Sets.newHashSet(); sourcePartitionNames.forEach(name -> { Partition partition = targetTable.getPartition(name); - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + for (MaterializedIndex index : partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { sourceTablets.addAll(index.getTablets()); } }); diff --git a/fe/fe-core/src/main/java/com/starrocks/load/PartitionUtils.java b/fe/fe-core/src/main/java/com/starrocks/load/PartitionUtils.java index fdeec97534be43..9ef06c1fa65c97 100644 --- a/fe/fe-core/src/main/java/com/starrocks/load/PartitionUtils.java +++ b/fe/fe-core/src/main/java/com/starrocks/load/PartitionUtils.java @@ -56,7 +56,7 @@ public static void createAndAddTempPartitionsForTable(Database db, OlapTable tar List tmpPartitionIds, DistributionDesc distributionDesc, long warehouseId) throws DdlException { - List newTempPartitions = GlobalStateMgr.getCurrentState().getLocalMetastore() + List newTempPartitions = GlobalStateMgr.getCurrentState().getStarRocksMetadata() .createTempPartitionsFromPartitions(db, targetTable, postfix, sourcePartitionIds, tmpPartitionIds, distributionDesc, warehouseId); Locker locker = new Locker(); @@ -135,7 +135,7 @@ public static void createAndAddTempPartitionsForTable(Database db, OlapTable tar } AddPartitionsInfoV2 infos = new AddPartitionsInfoV2(partitionInfoV2List); - GlobalStateMgr.getCurrentState().getEditLog().logAddPartitions(infos); + GlobalStateMgr.getCurrentState().getLocalMetastore().addPartition(infos); success = true; } finally { @@ -153,7 +153,8 @@ public static void createAndAddTempPartitionsForTable(Database db, OlapTable tar public static void clearTabletsFromInvertedIndex(List partitions) { TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); for (Partition partition : partitions) { - for (MaterializedIndex materializedIndex : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + for (MaterializedIndex materializedIndex : partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { for (Tablet tablet : materializedIndex.getTablets()) { invertedIndex.deleteTablet(tablet.getId()); } diff --git a/fe/fe-core/src/main/java/com/starrocks/load/loadv2/LoadsHistorySyncer.java b/fe/fe-core/src/main/java/com/starrocks/load/loadv2/LoadsHistorySyncer.java index 3f7cbad5634911..a65263c48e0e43 100644 --- a/fe/fe-core/src/main/java/com/starrocks/load/loadv2/LoadsHistorySyncer.java +++ b/fe/fe-core/src/main/java/com/starrocks/load/loadv2/LoadsHistorySyncer.java @@ -98,7 +98,7 @@ public static void createTable() throws UserException { public static boolean correctTable() { int numBackends = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getTotalBackendNumber(); - int replica = GlobalStateMgr.getCurrentState().getLocalMetastore() + int replica = GlobalStateMgr.getCurrentState().getStarRocksMetadata() .mayGetTable(LOADS_HISTORY_DB_NAME, LOADS_HISTORY_TABLE_NAME) .map(tbl -> ((OlapTable) tbl).getPartitionInfo().getMinReplicationNum()) .orElse((short) 1); diff --git a/fe/fe-core/src/main/java/com/starrocks/load/loadv2/SparkLoadJob.java b/fe/fe-core/src/main/java/com/starrocks/load/loadv2/SparkLoadJob.java index d9bbfc59054209..33e609fb407ed3 100644 --- a/fe/fe-core/src/main/java/com/starrocks/load/loadv2/SparkLoadJob.java +++ b/fe/fe-core/src/main/java/com/starrocks/load/loadv2/SparkLoadJob.java @@ -493,7 +493,7 @@ private Set submitPushTasks() throws UserException { for (Map.Entry> entry : tableToLoadPartitions.entrySet()) { long tableId = entry.getKey(); OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getId(), tableId); + .getTable(db.getId(), tableId); if (table == null) { LOG.warn("table does not exist. id: {}", tableId); continue; @@ -506,12 +506,13 @@ private Set submitPushTasks() throws UserException { LOG.warn("partition does not exist. id: {}", partitionId); continue; } - long partitionVersion = partition.getVisibleVersion(); + long partitionVersion = partition.getDefaultPhysicalPartition().getVisibleVersion(); hasLoadPartitions = true; int quorumReplicaNum = table.getPartitionInfo().getQuorumNum(partitionId, table.writeQuorum()); - List indexes = partition.getMaterializedIndices(IndexExtState.ALL); + List indexes = partition.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.ALL); for (MaterializedIndex index : indexes) { long indexId = index.getId(); int schemaHash = indexToSchemaHash.get(indexId); diff --git a/fe/fe-core/src/main/java/com/starrocks/load/pipe/Pipe.java b/fe/fe-core/src/main/java/com/starrocks/load/pipe/Pipe.java index 834a1fd5433509..5ff1a3ce2b5751 100644 --- a/fe/fe-core/src/main/java/com/starrocks/load/pipe/Pipe.java +++ b/fe/fe-core/src/main/java/com/starrocks/load/pipe/Pipe.java @@ -314,7 +314,7 @@ private void buildNewTasks() { long taskId = GlobalStateMgr.getCurrentState().getNextId(); PipeId pipeId = getPipeId(); String uniqueName = PipeTaskDesc.genUniqueTaskName(getName(), taskId, 0); - String dbName = GlobalStateMgr.getCurrentState().getLocalMetastore().mayGetDb(pipeId.getDbId()) + String dbName = GlobalStateMgr.getCurrentState().getStarRocksMetadata().mayGetDb(pipeId.getDbId()) .map(Database::getOriginName) .orElseThrow(() -> ErrorReport.buildSemanticException(ErrorCode.ERR_BAD_DB_ERROR)); String sqlTask = FilePipeSource.buildInsertSql(this, piece, uniqueName); diff --git a/fe/fe-core/src/main/java/com/starrocks/load/pipe/PipeManager.java b/fe/fe-core/src/main/java/com/starrocks/load/pipe/PipeManager.java index f17b8cc9f25e50..52e74aaedf74a3 100644 --- a/fe/fe-core/src/main/java/com/starrocks/load/pipe/PipeManager.java +++ b/fe/fe-core/src/main/java/com/starrocks/load/pipe/PipeManager.java @@ -184,7 +184,7 @@ protected void updatePipe(Pipe pipe) { } private Pair resolvePipeNameUnlock(PipeName name) { - long dbId = GlobalStateMgr.getCurrentState().getLocalMetastore().mayGetDb(name.getDbName()) + long dbId = GlobalStateMgr.getCurrentState().getStarRocksMetadata().mayGetDb(name.getDbName()) .map(Database::getId) .orElseThrow(() -> ErrorReport.buildSemanticException(ErrorCode.ERR_NO_DB_ERROR)); return Pair.create(dbId, name.getPipeName()); diff --git a/fe/fe-core/src/main/java/com/starrocks/load/pipe/filelist/RepoCreator.java b/fe/fe-core/src/main/java/com/starrocks/load/pipe/filelist/RepoCreator.java index aea6240a646a2d..4ee369cf69c42d 100644 --- a/fe/fe-core/src/main/java/com/starrocks/load/pipe/filelist/RepoCreator.java +++ b/fe/fe-core/src/main/java/com/starrocks/load/pipe/filelist/RepoCreator.java @@ -71,7 +71,7 @@ public static void createTable() throws UserException { public static boolean correctTable() { int numBackends = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getTotalBackendNumber(); int replica = GlobalStateMgr.getCurrentState() - .getLocalMetastore().mayGetTable(FileListTableRepo.FILE_LIST_DB_NAME, FileListTableRepo.FILE_LIST_TABLE_NAME) + .getStarRocksMetadata().mayGetTable(FileListTableRepo.FILE_LIST_DB_NAME, FileListTableRepo.FILE_LIST_TABLE_NAME) .map(tbl -> ((OlapTable) tbl).getPartitionInfo().getMinReplicationNum()) .orElse((short) 1); if (numBackends < 3) { diff --git a/fe/fe-core/src/main/java/com/starrocks/meta/BDBDatabase.java b/fe/fe-core/src/main/java/com/starrocks/meta/BDBDatabase.java new file mode 100644 index 00000000000000..2d56f26eee81b4 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/meta/BDBDatabase.java @@ -0,0 +1,33 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.starrocks.meta; + +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.Environment; + +public class BDBDatabase { + public static Database openDatabase(Environment environment, String db) { + DatabaseConfig databaseConfig = new DatabaseConfig(); + databaseConfig.setTransactional(true); + databaseConfig.setAllowCreate(true); + databaseConfig.setReadOnly(false); + databaseConfig.setSortedDuplicatesVoid(false); + return environment.openDatabase(null, db, databaseConfig); + } + + public static void truncateDatabase(Environment environment, String db) { + environment.truncateDatabase(null, db, false); + } +} diff --git a/fe/fe-core/src/main/java/com/starrocks/meta/BDBTransaction.java b/fe/fe-core/src/main/java/com/starrocks/meta/BDBTransaction.java new file mode 100644 index 00000000000000..1d548f59fd456c --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/meta/BDBTransaction.java @@ -0,0 +1,33 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.starrocks.meta; + +import com.sleepycat.je.Durability; +import com.sleepycat.je.Environment; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; + +import java.util.concurrent.TimeUnit; + +public class BDBTransaction { + public static Transaction startTransaction(Environment environment) { + TransactionConfig transactionConfig = new TransactionConfig(); + transactionConfig.setTxnTimeout(500, TimeUnit.SECONDS); + transactionConfig.setDurability(new Durability(Durability.SyncPolicy.SYNC, + Durability.SyncPolicy.SYNC, Durability.ReplicaAckPolicy.ALL)); + + return environment.beginTransaction(null, transactionConfig); + } +} + diff --git a/fe/fe-core/src/main/java/com/starrocks/meta/ByteCoder.java b/fe/fe-core/src/main/java/com/starrocks/meta/ByteCoder.java new file mode 100644 index 00000000000000..cf580fb51038d4 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/meta/ByteCoder.java @@ -0,0 +1,36 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.starrocks.meta; + +import com.google.common.base.Joiner; +import org.apache.hadoop.util.Lists; + +import java.nio.charset.StandardCharsets; +import java.util.List; + +public class ByteCoder { + public static byte[] encode(List params) { + return Joiner.on("/").join(params).getBytes(StandardCharsets.UTF_8); + } + + public static byte[] encode(String... params) { + return Joiner.on("/").join(params).getBytes(StandardCharsets.UTF_8); + } + + public static List decode(byte[] params) { + String s = new String(params, StandardCharsets.UTF_8); + return Lists.newArrayList(s.split("/")); + } +} diff --git a/fe/fe-core/src/main/java/com/starrocks/meta/EditLogCommitter.java b/fe/fe-core/src/main/java/com/starrocks/meta/EditLogCommitter.java new file mode 100644 index 00000000000000..53dd2b6b98b460 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/meta/EditLogCommitter.java @@ -0,0 +1,27 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.starrocks.meta; + +import com.google.common.collect.Lists; +import com.starrocks.persist.OperationType; + +import java.util.List; + +public class EditLogCommitter { + public List ops = Lists.newArrayList( + OperationType.OP_CREATE_TABLE_V2 + ); + + +} diff --git a/fe/fe-core/src/main/java/com/starrocks/meta/LocalMetastoreInterface.java b/fe/fe-core/src/main/java/com/starrocks/meta/LocalMetastoreInterface.java new file mode 100644 index 00000000000000..cc6c969a430347 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/meta/LocalMetastoreInterface.java @@ -0,0 +1,191 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.starrocks.meta; + +import com.starrocks.catalog.Database; +import com.starrocks.catalog.LocalTablet; +import com.starrocks.catalog.MaterializedIndex; +import com.starrocks.catalog.OlapTable; +import com.starrocks.catalog.Partition; +import com.starrocks.catalog.PartitionInfo; +import com.starrocks.catalog.PhysicalPartition; +import com.starrocks.catalog.Replica; +import com.starrocks.catalog.Table; +import com.starrocks.catalog.Tablet; +import com.starrocks.common.DdlException; +import com.starrocks.persist.AddPartitionsInfoV2; +import com.starrocks.persist.AddSubPartitionsInfoV2; +import com.starrocks.persist.AlterMaterializedViewBaseTableInfosLog; +import com.starrocks.persist.AlterMaterializedViewStatusLog; +import com.starrocks.persist.AlterViewInfo; +import com.starrocks.persist.BackendTabletsInfo; +import com.starrocks.persist.BatchDeleteReplicaInfo; +import com.starrocks.persist.BatchDropInfo; +import com.starrocks.persist.ChangeMaterializedViewRefreshSchemeLog; +import com.starrocks.persist.ColumnRenameInfo; +import com.starrocks.persist.ConsistencyCheckInfo; +import com.starrocks.persist.CreateTableInfo; +import com.starrocks.persist.DatabaseInfo; +import com.starrocks.persist.DropInfo; +import com.starrocks.persist.DropPartitionInfo; +import com.starrocks.persist.DropPartitionsInfo; +import com.starrocks.persist.ModifyPartitionInfo; +import com.starrocks.persist.ModifyTablePropertyOperationLog; +import com.starrocks.persist.PartitionVersionRecoveryInfo; +import com.starrocks.persist.RenameMaterializedViewLog; +import com.starrocks.persist.ReplacePartitionOperationLog; +import com.starrocks.persist.ReplicaPersistInfo; +import com.starrocks.persist.SetReplicaStatusOperationLog; +import com.starrocks.persist.SwapTableOperationLog; +import com.starrocks.persist.TableInfo; +import com.starrocks.persist.TruncateTableInfo; +import com.starrocks.sql.ast.PartitionDesc; +import com.starrocks.thrift.TTabletMetaType; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +public interface LocalMetastoreInterface { + void createDb(Database db, String storageVolumeId); + + void dropDb(Database db, boolean isForceDrop); + + void recoverDatabase(Database db); + + void alterDatabaseQuota(DatabaseInfo dbInfo); + + void renameDatabase(String dbName, String newDbName); + + List listDbNames(); + + ConcurrentHashMap getIdToDb(); + + List getDbIds(); + + ConcurrentHashMap getFullNameToDb(); + + Database getDb(String name); + + Database getDb(long dbId); + + void createTable(CreateTableInfo createTableInfo); + + void dropTable(DropInfo dropInfo); + + void renameTable(TableInfo tableInfo); + + void truncateTable(TruncateTableInfo info); + + void swapTable(SwapTableOperationLog log); + + void updateTableMeta(Database db, String tableName, Map properties, TTabletMetaType metaType) + throws DdlException; + + void alterTable(ModifyTablePropertyOperationLog log); + + void renameColumn(ColumnRenameInfo columnRenameInfo); + + List listTableNames(String dbName); + + List getTables(Long dbId); + + Table getTable(String dbName, String tblName); + + Table getTable(Long dbId, Long tableId); + + void modifyViewDef(AlterViewInfo alterViewInfo); + + void renameMaterializedView(RenameMaterializedViewLog log); + + void alterMvBaseTableInfos(AlterMaterializedViewBaseTableInfosLog log); + + void alterMvStatus(AlterMaterializedViewStatusLog log); + + void alterMaterializedViewProperties(ModifyTablePropertyOperationLog log); + + void changeMaterializedRefreshScheme(ChangeMaterializedViewRefreshSchemeLog log); + + void addPartitionLog(Database db, OlapTable olapTable, List partitionDescs, + boolean isTempPartition, PartitionInfo partitionInfo, + List partitionList, Set existPartitionNameSet) throws DdlException; + + void addPartition(AddPartitionsInfoV2 addPartitionsInfo); + + void dropPartition(DropPartitionInfo dropPartitionInfo); + + void dropPartitions(DropPartitionsInfo dropPartitionsInfo); + + void renamePartition(TableInfo tableInfo); + + void replaceTempPartition(ReplacePartitionOperationLog info); + + void modifyPartition(ModifyPartitionInfo info); + + void setPartitionVersion(PartitionVersionRecoveryInfo info); + + void addSubPartitionLog(AddSubPartitionsInfoV2 addSubPartitionsInfo); + + List getAllPhysicalPartition(Partition partition); + + PhysicalPartition getPhysicalPartition(Partition partition, Long physicalPartitionId); + + void addPhysicalPartition(Partition partition, PhysicalPartition physicalPartition); + + void dropPhysicalPartition(Partition partition, Long physicalPartitionId); + + List getMaterializedIndices(PhysicalPartition physicalPartition, + MaterializedIndex.IndexExtState indexExtState); + + MaterializedIndex getMaterializedIndex(PhysicalPartition physicalPartition, Long mIndexId); + + void addMaterializedIndex(PhysicalPartition physicalPartition, MaterializedIndex materializedIndex); + + void dropMaterializedIndex(PhysicalPartition physicalPartition, Long mIndexId); + + void dropRollup(DropInfo dropInfo); + + void batchDropRollup(BatchDropInfo batchDropInfo); + + void renameRollup(TableInfo tableInfo); + + List getAllTablets(MaterializedIndex materializedIndex); + + List getAllTabletIDs(MaterializedIndex materializedIndex); + + Tablet getTablet(MaterializedIndex materializedIndex, Long tabletId); + + //void addTablet(MaterializedIndex materializedIndex, Tablet tablet, TabletMeta tabletMeta); + + List getAllReplicas(Tablet tablet); + + Replica getReplica(LocalTablet tablet, Long replicaId); + + void addReplica(ReplicaPersistInfo replicaPersistInfo); + + void deleteReplica(ReplicaPersistInfo replicaPersistInfo); + + void batchDeleteReplicaInfo(BatchDeleteReplicaInfo replicaPersistInfo); + + void updateReplica(ReplicaPersistInfo replicaPersistInfo); + + void setReplicaStatus(SetReplicaStatusOperationLog log); + + void backendTabletsInfo(BackendTabletsInfo backendTabletsInfo); + + void finishConsistencyCheck(ConsistencyCheckInfo info); +} + diff --git a/fe/fe-core/src/main/java/com/starrocks/meta/LocalMetastoreV2.java b/fe/fe-core/src/main/java/com/starrocks/meta/LocalMetastoreV2.java new file mode 100644 index 00000000000000..297e3c14c11634 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/meta/LocalMetastoreV2.java @@ -0,0 +1,468 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.starrocks.meta; + +import com.sleepycat.je.Transaction; +import com.starrocks.catalog.Database; +import com.starrocks.catalog.LocalTablet; +import com.starrocks.catalog.MaterializedIndex; +import com.starrocks.catalog.OlapTable; +import com.starrocks.catalog.Partition; +import com.starrocks.catalog.PartitionInfo; +import com.starrocks.catalog.PhysicalPartition; +import com.starrocks.catalog.Replica; +import com.starrocks.catalog.Table; +import com.starrocks.catalog.TableProperty; +import com.starrocks.catalog.Tablet; +import com.starrocks.common.DdlException; +import com.starrocks.persist.AddPartitionsInfoV2; +import com.starrocks.persist.AddSubPartitionsInfoV2; +import com.starrocks.persist.AlterMaterializedViewBaseTableInfosLog; +import com.starrocks.persist.AlterMaterializedViewStatusLog; +import com.starrocks.persist.AlterViewInfo; +import com.starrocks.persist.BackendTabletsInfo; +import com.starrocks.persist.BatchDeleteReplicaInfo; +import com.starrocks.persist.BatchDropInfo; +import com.starrocks.persist.ChangeMaterializedViewRefreshSchemeLog; +import com.starrocks.persist.ColumnRenameInfo; +import com.starrocks.persist.ConsistencyCheckInfo; +import com.starrocks.persist.CreateDbInfo; +import com.starrocks.persist.CreateTableInfo; +import com.starrocks.persist.DatabaseInfo; +import com.starrocks.persist.DropInfo; +import com.starrocks.persist.DropPartitionInfo; +import com.starrocks.persist.DropPartitionsInfo; +import com.starrocks.persist.ModifyPartitionInfo; +import com.starrocks.persist.ModifyTablePropertyOperationLog; +import com.starrocks.persist.OperationType; +import com.starrocks.persist.PartitionVersionRecoveryInfo; +import com.starrocks.persist.RenameMaterializedViewLog; +import com.starrocks.persist.ReplacePartitionOperationLog; +import com.starrocks.persist.ReplicaPersistInfo; +import com.starrocks.persist.SetReplicaStatusOperationLog; +import com.starrocks.persist.SwapTableOperationLog; +import com.starrocks.persist.TableInfo; +import com.starrocks.persist.TruncateTableInfo; +import com.starrocks.persist.gson.GsonUtils; +import com.starrocks.server.GlobalStateMgr; +import com.starrocks.sql.ast.PartitionDesc; +import com.starrocks.thrift.TTabletMetaType; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +public class LocalMetastoreV2 implements LocalMetastoreInterface { + @Override + public void createDb(Database db, String storageVolumeId) { + MetadataHandler metadataHandler = GlobalStateMgr.getCurrentState().getMetadataHandler(); + Transaction transaction = metadataHandler.starTransaction(); + metadataHandler.put(transaction, + ByteCoder.encode("meta object", "db", String.valueOf(db.getId())), + GsonUtils.GSON.toJson(db, Database.class)); + metadataHandler.put(transaction, + ByteCoder.encode("meta name", "db", db.getFullName()), + String.valueOf(db.getId())); + metadataHandler.put(transaction, + ByteCoder.encode("meta id", "instance", String.valueOf(db.getId())), + ""); + + CreateDbInfo createDbInfo = new CreateDbInfo(db.getId(), db.getFullName()); + createDbInfo.setStorageVolumeId(storageVolumeId); + createDbInfo.setTransaction(transaction); + GlobalStateMgr.getCurrentState().getEditLog().logJsonObject(OperationType.OP_CREATE_DB_V2, createDbInfo); + } + + @Override + public void dropDb(Database db, boolean isForceDrop) { + + } + + @Override + public void recoverDatabase(Database db) { + + } + + @Override + public void alterDatabaseQuota(DatabaseInfo dbInfo) { + + } + + @Override + public void renameDatabase(String dbName, String newDbName) { + + } + + @Override + public List listDbNames() { + return null; + } + + @Override + public ConcurrentHashMap getIdToDb() { + return null; + } + + @Override + public List getDbIds() { + return null; + } + + @Override + public ConcurrentHashMap getFullNameToDb() { + return null; + } + + @Override + public Database getDb(String name) { + MetadataHandler metadataHandler = GlobalStateMgr.getCurrentState().getMetadataHandler(); + Transaction transaction = metadataHandler.starTransaction(); + Long databaseId = metadataHandler.get(transaction, ByteCoder.encode("meta name", "db", name), Long.class); + + String value = metadataHandler.get(transaction, + ByteCoder.encode("meta object", "db", String.valueOf(databaseId)), String.class); + + Database database = GsonUtils.GSON.fromJson(value, Database.class); + return database; + } + + @Override + public Database getDb(long dbId) { + return null; + } + + @Override + public void createTable(CreateTableInfo createTableInfo) { + + } + + @Override + public void dropTable(DropInfo dropInfo) { + + } + + @Override + public void renameTable(TableInfo tableInfo) { + + } + + @Override + public void truncateTable(TruncateTableInfo info) { + + } + + @Override + public void swapTable(SwapTableOperationLog log) { + + } + + @Override + public void alterTable(ModifyTablePropertyOperationLog log) { + + } + + @Override + public void renameColumn(ColumnRenameInfo columnRenameInfo) { + + } + + @Override + public void updateTableMeta(Database db, String tableName, Map properties, TTabletMetaType metaType) { + MetadataHandler metadataHandler = GlobalStateMgr.getCurrentState().getMetadataHandler(); + Transaction transaction = metadataHandler.starTransaction(); + + long tabletId = 0; + String olapTableJson = metadataHandler.get(transaction, ByteCoder.encode(String.valueOf(tabletId)), String.class); + OlapTable table = GsonUtils.GSON.fromJson(olapTableJson, OlapTable.class); + TableProperty tableProperty = table.getTableProperty(); + if (tableProperty == null) { + tableProperty = new TableProperty(properties); + table.setTableProperty(tableProperty); + } else { + tableProperty.modifyTableProperties(properties); + } + + metadataHandler.put(transaction, + ByteCoder.encode("meta object", "table", String.valueOf(table.getId())), + GsonUtils.GSON.toJson(table, OlapTable.class)); + + ModifyTablePropertyOperationLog info = + new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties); + info.setTransaction(transaction); + + switch (metaType) { + case INMEMORY: + GlobalStateMgr.getCurrentState().getEditLog().logModifyInMemory(info); + break; + case WRITE_QUORUM: + GlobalStateMgr.getCurrentState().getEditLog().logModifyWriteQuorum(info); + break; + } + } + + /* + @Override + public void modifyTableProperty(Database db, OlapTable table, Map properties, short operationType) { + MetadataHandler metadataHandler = GlobalStateMgr.getCurrentState().getMetadataHandler(); + Transaction transaction = metadataHandler.starTransaction(); + + String olapTableJson = metadataHandler.get(transaction, ByteCoder.encode(String.valueOf(table.getId())), String.class); + OlapTable olapTable = GsonUtils.GSON.fromJson(olapTableJson, OlapTable.class); + TableProperty tableProperty = olapTable.getTableProperty(); + if (tableProperty == null) { + tableProperty = new TableProperty(properties); + olapTable.setTableProperty(tableProperty); + } else { + tableProperty.modifyTableProperties(properties); + } + + metadataHandler.put(transaction, + ByteCoder.encode("meta object", "table", String.valueOf(table.getId())), + GsonUtils.GSON.toJson(table, OlapTable.class)); + + ModifyTablePropertyOperationLog info = + new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties); + info.setTransaction(transaction); + + switch (operationType) { + case OperationType.OP_MODIFY_IN_MEMORY: + tableProperty.buildInMemory(); + GlobalStateMgr.getCurrentState().getEditLog().logModifyInMemory(info); + break; + case OperationType.OP_MODIFY_WRITE_QUORUM: + GlobalStateMgr.getCurrentState().getEditLog().logModifyWriteQuorum(info); + break; + } + } + + */ + + @Override + public List listTableNames(String dbName) { + return null; + } + + @Override + public List
getTables(Long dbId) { + return null; + } + + @Override + public Table getTable(String dbName, String tblName) { + return null; + } + + @Override + public Table getTable(Long dbId, Long tableId) { + return null; + } + + @Override + public void modifyViewDef(AlterViewInfo alterViewInfo) { + + } + + @Override + public void renameMaterializedView(RenameMaterializedViewLog log) { + + } + + @Override + public void alterMvBaseTableInfos(AlterMaterializedViewBaseTableInfosLog log) { + + } + + @Override + public void alterMvStatus(AlterMaterializedViewStatusLog log) { + + } + + @Override + public void alterMaterializedViewProperties(ModifyTablePropertyOperationLog log) { + + } + + @Override + public void changeMaterializedRefreshScheme(ChangeMaterializedViewRefreshSchemeLog log) { + + } + + @Override + public void addPartitionLog(Database db, + OlapTable olapTable, + List partitionDescs, + boolean isTempPartition, + PartitionInfo partitionInfo, + List partitionList, + Set existPartitionNameSet) throws DdlException { + + } + + @Override + public void addPartition(AddPartitionsInfoV2 addPartitionsInfo) { + + } + + @Override + public void dropPartition(DropPartitionInfo dropPartitionInfo) { + + } + + @Override + public void dropPartitions(DropPartitionsInfo dropPartitionsInfo) { + + } + + @Override + public void renamePartition(TableInfo tableInfo) { + + } + + @Override + public void replaceTempPartition(ReplacePartitionOperationLog info) { + + } + + @Override + public void modifyPartition(ModifyPartitionInfo info) { + + } + + @Override + public void setPartitionVersion(PartitionVersionRecoveryInfo info) { + + } + + @Override + public void addSubPartitionLog(AddSubPartitionsInfoV2 addSubPartitionsInfo) { + + } + + @Override + public List getAllPhysicalPartition(Partition partition) { + return null; + } + + @Override + public PhysicalPartition getPhysicalPartition(Partition partition, Long physicalPartitionId) { + return null; + } + + @Override + public void addPhysicalPartition(Partition partition, PhysicalPartition physicalPartition) { + + } + + @Override + public void dropPhysicalPartition(Partition partition, Long physicalPartitionId) { + + } + + @Override + public List getMaterializedIndices(PhysicalPartition physicalPartition, + MaterializedIndex.IndexExtState indexExtState) { + return null; + } + + @Override + public MaterializedIndex getMaterializedIndex(PhysicalPartition physicalPartition, Long mIndexId) { + return null; + } + + @Override + public void addMaterializedIndex(PhysicalPartition physicalPartition, MaterializedIndex materializedIndex) { + + } + + @Override + public void dropMaterializedIndex(PhysicalPartition physicalPartition, Long mIndexId) { + + } + + @Override + public void dropRollup(DropInfo dropInfo) { + + } + + @Override + public void batchDropRollup(BatchDropInfo batchDropInfo) { + + } + + @Override + public void renameRollup(TableInfo tableInfo) { + + } + + @Override + public List getAllTablets(MaterializedIndex materializedIndex) { + return null; + } + + @Override + public List getAllTabletIDs(MaterializedIndex materializedIndex) { + return null; + } + + @Override + public Tablet getTablet(MaterializedIndex materializedIndex, Long tabletId) { + return null; + } + + @Override + public List getAllReplicas(Tablet tablet) { + return null; + } + + @Override + public Replica getReplica(LocalTablet tablet, Long replicaId) { + return null; + } + + @Override + public void addReplica(ReplicaPersistInfo replicaPersistInfo) { + + } + + @Override + public void deleteReplica(ReplicaPersistInfo replicaPersistInfo) { + + } + + @Override + public void batchDeleteReplicaInfo(BatchDeleteReplicaInfo replicaPersistInfo) { + + } + + @Override + public void updateReplica(ReplicaPersistInfo replicaPersistInfo) { + + } + + @Override + public void setReplicaStatus(SetReplicaStatusOperationLog log) { + + } + + @Override + public void backendTabletsInfo(BackendTabletsInfo backendTabletsInfo) { + + } + + @Override + public void finishConsistencyCheck(ConsistencyCheckInfo info) { + + } +} diff --git a/fe/fe-core/src/main/java/com/starrocks/meta/MetadataHandler.java b/fe/fe-core/src/main/java/com/starrocks/meta/MetadataHandler.java new file mode 100644 index 00000000000000..fc287e02cae64d --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/meta/MetadataHandler.java @@ -0,0 +1,139 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.starrocks.meta; + +import com.sleepycat.bind.tuple.TupleBinding; +import com.sleepycat.je.Cursor; +import com.sleepycat.je.Database; +import com.sleepycat.je.DatabaseConfig; +import com.sleepycat.je.DatabaseEntry; +import com.sleepycat.je.Durability; +import com.sleepycat.je.LockMode; +import com.sleepycat.je.OperationStatus; +import com.sleepycat.je.Transaction; +import com.sleepycat.je.TransactionConfig; +import com.sleepycat.je.dbi.DupKeyData; +import com.starrocks.journal.bdbje.BDBEnvironment; +import com.starrocks.server.GlobalStateMgr; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +public class MetadataHandler { + public static MetadataHandler getInstance() { + return GlobalStateMgr.getCurrentState().getMetadataHandler(); + } + + private final BDBEnvironment bdbEnvironment; + private final Database database; + + public MetadataHandler(BDBEnvironment bdbEnvironment) { + this.bdbEnvironment = bdbEnvironment; + + DatabaseConfig dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + if (GlobalStateMgr.getCurrentState().isElectable()) { + dbConfig.setAllowCreate(true); + dbConfig.setReadOnly(false); + } else { + dbConfig.setAllowCreate(false); + dbConfig.setReadOnly(true); + } + this.database = bdbEnvironment.getReplicatedEnvironment().openDatabase(null, "meta", dbConfig); + } + + public Transaction starTransaction() { + TransactionConfig transactionConfig = new TransactionConfig(); + transactionConfig.setTxnTimeout(500, TimeUnit.SECONDS); + transactionConfig.setDurability(new Durability(Durability.SyncPolicy.SYNC, + Durability.SyncPolicy.SYNC, Durability.ReplicaAckPolicy.ALL)); + + return bdbEnvironment.getReplicatedEnvironment().beginTransaction(null, transactionConfig); + } + + public OperationStatus put(Transaction transaction, String keyS, String valueS) { + TupleBinding binding = TupleBinding.getPrimitiveBinding(String.class); + DatabaseEntry key = new DatabaseEntry(); + binding.objectToEntry(keyS, key); + + DatabaseEntry value = new DatabaseEntry(); + binding.objectToEntry(valueS, value); + return database.put(transaction, key, value); + } + + public OperationStatus put(Transaction transaction, byte[] keyS, String valueS) { + TupleBinding binding = TupleBinding.getPrimitiveBinding(String.class); + DatabaseEntry key = new DatabaseEntry(keyS); + DatabaseEntry value = new DatabaseEntry(); + binding.objectToEntry(valueS, value); + return database.put(transaction, key, value); + } + + public byte[] get(Transaction transaction, byte[] keyS) { + TupleBinding binding = TupleBinding.getPrimitiveBinding(String.class); + DatabaseEntry key = new DatabaseEntry(keyS); + DatabaseEntry result = new DatabaseEntry(); + database.get(transaction, key, result, LockMode.READ_COMMITTED); + + binding.entryToObject(result); + return result.getData(); + } + + public T get(Transaction transaction, byte[] key, Class c) { + TupleBinding binding = TupleBinding.getPrimitiveBinding(c); + DatabaseEntry databaseEntry = new DatabaseEntry(key); + DatabaseEntry result = new DatabaseEntry(); + OperationStatus status = database.get(transaction, databaseEntry, result, LockMode.READ_COMMITTED); + if (status.equals(OperationStatus.NOTFOUND)) { + return null; + } else { + return binding.entryToObject(result); + } + } + + public List getPrefix(Transaction transaction, byte[] prefix) { + Cursor cursor = database.openCursor(transaction, null); + + DatabaseEntry key = new DatabaseEntry(prefix); + + DatabaseEntry prefixStart = new DatabaseEntry(prefix); + DatabaseEntry prefixEnd = new DatabaseEntry(DupKeyData.makePrefixKey(key.getData(), key.getOffset(), key.getSize())); + + DatabaseEntry noReturnData = new DatabaseEntry(); + noReturnData.setPartial(0, 0, true); + cursor.getSearchKeyRange(key, noReturnData, LockMode.READ_COMMITTED); + + List keyList = new ArrayList<>(); + do { + if (DupKeyData.compareMainKey( + key.getData(), + prefixEnd.getData(), + prefixEnd.getOffset(), + prefixEnd.getSize(), + database.getConfig().getBtreeComparator()) == 0) { + break; + } + + keyList.add(key.getData()); + + } while (cursor.getNext(key, noReturnData, LockMode.READ_COMMITTED) == OperationStatus.SUCCESS); + + return keyList; + } + + public OperationStatus delete(Transaction transaction, byte[] keyS) { + return null; + } +} diff --git a/fe/fe-core/src/main/java/com/starrocks/meta/ReplicaHierarchyId.java b/fe/fe-core/src/main/java/com/starrocks/meta/ReplicaHierarchyId.java new file mode 100644 index 00000000000000..0d3953a2fdda80 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/meta/ReplicaHierarchyId.java @@ -0,0 +1,40 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.starrocks.meta; + +public class ReplicaHierarchyId { + public final long dbId; + public final long tableId; + public final long partitionId; + public final long physicalPartitionId; + public final long materializedIndexId; + public final long tabletId; + public final long replicaId; + + public ReplicaHierarchyId(long dbId, + long tableId, + long partitionId, + long physicalPartitionId, + long materializedIndexId, + long tabletId, + long replicaId) { + this.dbId = dbId; + this.tableId = tableId; + this.partitionId = partitionId; + this.physicalPartitionId = physicalPartitionId; + this.materializedIndexId = materializedIndexId; + this.tabletId = tabletId; + this.replicaId = replicaId; + } +} diff --git a/fe/fe-core/src/main/java/com/starrocks/meta/StarRocksMetadata.java b/fe/fe-core/src/main/java/com/starrocks/meta/StarRocksMetadata.java new file mode 100644 index 00000000000000..f171496d12fb5f --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/meta/StarRocksMetadata.java @@ -0,0 +1,2661 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.starrocks.meta; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Range; +import com.google.common.collect.Sets; +import com.staros.proto.FilePathInfo; +import com.starrocks.alter.AlterJobExecutor; +import com.starrocks.alter.AlterMVJobExecutor; +import com.starrocks.alter.MaterializedViewHandler; +import com.starrocks.analysis.Expr; +import com.starrocks.analysis.FunctionCallExpr; +import com.starrocks.analysis.HintNode; +import com.starrocks.analysis.IntLiteral; +import com.starrocks.analysis.SetVarHint; +import com.starrocks.analysis.SlotRef; +import com.starrocks.analysis.StringLiteral; +import com.starrocks.analysis.TableName; +import com.starrocks.analysis.TableRef; +import com.starrocks.analysis.UserVariableHint; +import com.starrocks.catalog.CatalogRecycleBin; +import com.starrocks.catalog.CatalogUtils; +import com.starrocks.catalog.ColocateGroupSchema; +import com.starrocks.catalog.Column; +import com.starrocks.catalog.DataProperty; +import com.starrocks.catalog.Database; +import com.starrocks.catalog.DistributionInfo; +import com.starrocks.catalog.FunctionSet; +import com.starrocks.catalog.HashDistributionInfo; +import com.starrocks.catalog.Index; +import com.starrocks.catalog.KeysType; +import com.starrocks.catalog.ListPartitionInfo; +import com.starrocks.catalog.LocalTablet; +import com.starrocks.catalog.MaterializedIndex; +import com.starrocks.catalog.MaterializedIndexMeta; +import com.starrocks.catalog.MaterializedView; +import com.starrocks.catalog.MvId; +import com.starrocks.catalog.OlapTable; +import com.starrocks.catalog.Partition; +import com.starrocks.catalog.PartitionInfo; +import com.starrocks.catalog.PartitionKey; +import com.starrocks.catalog.PartitionType; +import com.starrocks.catalog.PhysicalPartition; +import com.starrocks.catalog.PrimitiveType; +import com.starrocks.catalog.RandomDistributionInfo; +import com.starrocks.catalog.RangePartitionInfo; +import com.starrocks.catalog.Replica; +import com.starrocks.catalog.SinglePartitionInfo; +import com.starrocks.catalog.Table; +import com.starrocks.catalog.TableProperty; +import com.starrocks.catalog.Tablet; +import com.starrocks.catalog.TabletInvertedIndex; +import com.starrocks.catalog.TabletMeta; +import com.starrocks.catalog.View; +import com.starrocks.common.AlreadyExistsException; +import com.starrocks.common.AnalysisException; +import com.starrocks.common.Config; +import com.starrocks.common.DdlException; +import com.starrocks.common.ErrorCode; +import com.starrocks.common.ErrorReport; +import com.starrocks.common.ErrorReportException; +import com.starrocks.common.InvalidOlapTableStateException; +import com.starrocks.common.MaterializedViewExceptions; +import com.starrocks.common.MetaNotFoundException; +import com.starrocks.common.Pair; +import com.starrocks.common.UserException; +import com.starrocks.common.util.DynamicPartitionUtil; +import com.starrocks.common.util.PropertyAnalyzer; +import com.starrocks.common.util.TimeUtils; +import com.starrocks.common.util.Util; +import com.starrocks.common.util.concurrent.lock.LockType; +import com.starrocks.common.util.concurrent.lock.Locker; +import com.starrocks.connector.ConnectorMetadata; +import com.starrocks.lake.DataCacheInfo; +import com.starrocks.lake.LakeMaterializedView; +import com.starrocks.lake.LakeTable; +import com.starrocks.lake.StorageInfo; +import com.starrocks.load.pipe.PipeManager; +import com.starrocks.mv.analyzer.MVPartitionExprResolver; +import com.starrocks.persist.AddSubPartitionsInfoV2; +import com.starrocks.persist.CreateTableInfo; +import com.starrocks.persist.DatabaseInfo; +import com.starrocks.persist.DropInfo; +import com.starrocks.persist.DropPartitionInfo; +import com.starrocks.persist.DropPartitionsInfo; +import com.starrocks.persist.ModifyTablePropertyOperationLog; +import com.starrocks.persist.PartitionVersionRecoveryInfo; +import com.starrocks.persist.PhysicalPartitionPersistInfoV2; +import com.starrocks.persist.ReplacePartitionOperationLog; +import com.starrocks.persist.SetReplicaStatusOperationLog; +import com.starrocks.persist.TableInfo; +import com.starrocks.persist.TruncateTableInfo; +import com.starrocks.privilege.AccessDeniedException; +import com.starrocks.privilege.ObjectType; +import com.starrocks.privilege.PrivilegeType; +import com.starrocks.qe.ConnectContext; +import com.starrocks.qe.SessionVariable; +import com.starrocks.qe.VariableMgr; +import com.starrocks.scheduler.Constants; +import com.starrocks.scheduler.ExecuteOption; +import com.starrocks.scheduler.Task; +import com.starrocks.scheduler.TaskBuilder; +import com.starrocks.scheduler.TaskManager; +import com.starrocks.scheduler.TaskRun; +import com.starrocks.server.AbstractTableFactory; +import com.starrocks.server.GlobalStateMgr; +import com.starrocks.server.LocalMetastore; +import com.starrocks.server.RunMode; +import com.starrocks.server.StorageVolumeMgr; +import com.starrocks.server.TableFactoryProvider; +import com.starrocks.sql.analyzer.AnalyzerUtils; +import com.starrocks.sql.analyzer.Authorizer; +import com.starrocks.sql.ast.AddPartitionClause; +import com.starrocks.sql.ast.AdminCheckTabletsStmt; +import com.starrocks.sql.ast.AdminSetPartitionVersionStmt; +import com.starrocks.sql.ast.AdminSetReplicaStatusStmt; +import com.starrocks.sql.ast.AlterDatabaseQuotaStmt; +import com.starrocks.sql.ast.AlterDatabaseRenameStatement; +import com.starrocks.sql.ast.AlterMaterializedViewStmt; +import com.starrocks.sql.ast.AlterTableCommentClause; +import com.starrocks.sql.ast.AlterTableStmt; +import com.starrocks.sql.ast.AlterViewStmt; +import com.starrocks.sql.ast.AsyncRefreshSchemeDesc; +import com.starrocks.sql.ast.CancelRefreshMaterializedViewStmt; +import com.starrocks.sql.ast.CreateMaterializedViewStatement; +import com.starrocks.sql.ast.CreateMaterializedViewStmt; +import com.starrocks.sql.ast.CreateTableLikeStmt; +import com.starrocks.sql.ast.CreateTableStmt; +import com.starrocks.sql.ast.CreateTemporaryTableStmt; +import com.starrocks.sql.ast.CreateViewStmt; +import com.starrocks.sql.ast.DistributionDesc; +import com.starrocks.sql.ast.DropMaterializedViewStmt; +import com.starrocks.sql.ast.DropPartitionClause; +import com.starrocks.sql.ast.DropTableStmt; +import com.starrocks.sql.ast.ExpressionPartitionDesc; +import com.starrocks.sql.ast.IntervalLiteral; +import com.starrocks.sql.ast.PartitionDesc; +import com.starrocks.sql.ast.PartitionRangeDesc; +import com.starrocks.sql.ast.PartitionRenameClause; +import com.starrocks.sql.ast.RecoverDbStmt; +import com.starrocks.sql.ast.RecoverPartitionStmt; +import com.starrocks.sql.ast.RecoverTableStmt; +import com.starrocks.sql.ast.RefreshMaterializedViewStatement; +import com.starrocks.sql.ast.RefreshSchemeClause; +import com.starrocks.sql.ast.ReplacePartitionClause; +import com.starrocks.sql.ast.SystemVariable; +import com.starrocks.sql.ast.TableRenameClause; +import com.starrocks.sql.ast.TruncateTableStmt; +import com.starrocks.sql.common.MetaUtils; +import com.starrocks.sql.common.SyncPartitionUtils; +import com.starrocks.sql.optimizer.Utils; +import com.starrocks.task.TabletTaskExecutor; +import com.starrocks.thrift.TGetTasksParams; +import com.starrocks.thrift.TStorageMedium; +import com.starrocks.thrift.TStorageType; +import com.starrocks.thrift.TTabletType; +import org.apache.commons.collections.CollectionUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.time.LocalDateTime; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +public class StarRocksMetadata implements ConnectorMetadata { + private static final Logger LOG = LogManager.getLogger(StarRocksMetadata.class); + + @Override + public void createDb(String dbName, Map properties) throws DdlException, AlreadyExistsException { + GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); + LocalMetastore localMetastore = GlobalStateMgr.getCurrentState().getLocalMetastore(); + + long id = 0L; + if (!globalStateMgr.tryLock(false)) { + throw new DdlException("Failed to acquire globalStateMgr lock. Try again"); + } + try { + if (localMetastore.getDb(dbName) != null) { + throw new AlreadyExistsException("Database Already Exists"); + } else { + id = globalStateMgr.getNextId(); + Database db = new Database(id, dbName); + String volume = StorageVolumeMgr.DEFAULT; + if (properties != null && properties.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_VOLUME)) { + volume = properties.remove(PropertyAnalyzer.PROPERTIES_STORAGE_VOLUME); + } + if (!GlobalStateMgr.getCurrentState().getStorageVolumeMgr().bindDbToStorageVolume(volume, id)) { + throw new DdlException(String.format("Storage volume %s not exists", volume)); + } + GlobalStateMgr.getCurrentState().getGlobalTransactionMgr().addDatabaseTransactionMgr(db.getId()); + String storageVolumeId = GlobalStateMgr.getCurrentState().getStorageVolumeMgr().getStorageVolumeIdOfDb(id); + + localMetastore.createDb(db, storageVolumeId); + } + } finally { + globalStateMgr.unlock(); + } + LOG.info("createDb dbName = " + dbName + ", id = " + id); + } + + @Override + public void dropDb(String dbName, boolean isForceDrop) throws DdlException, MetaNotFoundException { + GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); + LocalMetastore localMetastore = GlobalStateMgr.getCurrentState().getLocalMetastore(); + CatalogRecycleBin recycleBin = GlobalStateMgr.getCurrentState().getRecycleBin(); + + // 1. check if database exists + Database db; + if (!globalStateMgr.tryLock(false)) { + throw new DdlException("Failed to acquire globalStateMgr lock. Try again"); + } + try { + db = localMetastore.getDb(dbName); + if (db == null) { + throw new MetaNotFoundException("Database not found"); + } + if (!isForceDrop && !db.getTemporaryTables().isEmpty()) { + throw new DdlException("The database [" + dbName + "] " + + "cannot be dropped because there are still some temporary tables in it. " + + "If you want to forcibly drop, please use \"DROP DATABASE FORCE.\""); + } + } finally { + globalStateMgr.unlock(); + } + + // 2. drop tables in db + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + if (!db.isExist()) { + throw new MetaNotFoundException("Database '" + dbName + "' not found"); + } + if (!isForceDrop && GlobalStateMgr.getCurrentState() + .getGlobalTransactionMgr().existCommittedTxns(db.getId(), null, null)) { + throw new DdlException( + "There are still some transactions in the COMMITTED state waiting to be completed. " + + "The database [" + dbName + + "] cannot be dropped. If you want to forcibly drop(cannot be recovered)," + + " please use \"DROP DATABASE FORCE\"."); + } + + // 3. remove db from globalStateMgr + // save table names for recycling + Set tableNames = new HashSet<>(db.getTableNamesViewWithLock()); + localMetastore.dropDb(db, isForceDrop); + recycleBin.recycleDatabase(db, tableNames, isForceDrop); + db.setExist(false); + + // 4. drop mv task + TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager(); + TGetTasksParams tasksParams = new TGetTasksParams(); + tasksParams.setDb(dbName); + List dropTaskIdList = taskManager.filterTasks(tasksParams) + .stream().map(Task::getId).collect(Collectors.toList()); + taskManager.dropTasks(dropTaskIdList, false); + + // 5. Drop Pipes + PipeManager pipeManager = GlobalStateMgr.getCurrentState().getPipeManager(); + pipeManager.dropPipesOfDb(dbName, db.getId()); + + LOG.info("finish drop database[{}], id: {}, is force : {}", dbName, db.getId(), isForceDrop); + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); + } + } + + public void recoverDatabase(RecoverDbStmt recoverStmt) throws DdlException { + GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); + LocalMetastore localMetastore = GlobalStateMgr.getCurrentState().getLocalMetastore(); + CatalogRecycleBin recycleBin = GlobalStateMgr.getCurrentState().getRecycleBin(); + + // check is new db with same name already exist + if (getDb(recoverStmt.getDbName()) != null) { + throw new DdlException("Database[" + recoverStmt.getDbName() + "] already exist."); + } + + Database db = recycleBin.recoverDatabase(recoverStmt.getDbName()); + + // add db to globalStateMgr + if (!globalStateMgr.tryLock(false)) { + throw new DdlException("Failed to acquire globalStateMgr lock. Try again"); + } + try { + if (localMetastore.getDb(db.getFullName()) != null) { + throw new DdlException("Database[" + db.getOriginName() + "] already exist."); + // it's ok that we do not put db back to CatalogRecycleBin + // cause this db cannot recover anymore + } + + List materializedViews = db.getMaterializedViews(); + TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager(); + for (MaterializedView materializedView : materializedViews) { + MaterializedView.RefreshType refreshType = materializedView.getRefreshScheme().getType(); + if (refreshType != MaterializedView.RefreshType.SYNC) { + Task task = TaskBuilder.buildMvTask(materializedView, db.getFullName()); + TaskBuilder.updateTaskInfo(task, materializedView); + taskManager.createTask(task, false); + } + } + + localMetastore.recoverDatabase(db); + } finally { + globalStateMgr.unlock(); + } + + LOG.info("finish recover database, name: {}, id: {}", recoverStmt.getDbName(), db.getId()); + } + + public void alterDatabaseQuota(AlterDatabaseQuotaStmt stmt) throws DdlException { + LocalMetastore localMetastore = GlobalStateMgr.getCurrentState().getLocalMetastore(); + + String dbName = stmt.getDbName(); + Database db = getDb(dbName); + if (db == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); + } + + DatabaseInfo dbInfo = new DatabaseInfo(db.getFullName(), "", stmt.getQuota(), stmt.getQuotaType()); + localMetastore.alterDatabaseQuota(dbInfo); + } + + public void renameDatabase(AlterDatabaseRenameStatement stmt) throws DdlException { + GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); + LocalMetastore localMetastore = globalStateMgr.getLocalMetastore(); + + String fullDbName = stmt.getDbName(); + String newFullDbName = stmt.getNewDbName(); + + if (fullDbName.equals(newFullDbName)) { + throw new DdlException("Same database name"); + } + + if (!globalStateMgr.tryLock(false)) { + throw new DdlException("Failed to acquire globalStateMgr lock. Try again"); + } + try { + // check if db exists + Database db = localMetastore.getDb(fullDbName); + if (db == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, fullDbName); + } + + // check if name is already used + if (localMetastore.getDb(newFullDbName) != null) { + throw new DdlException("Database name[" + newFullDbName + "] is already used"); + } + + localMetastore.renameDatabase(fullDbName, newFullDbName); + } finally { + globalStateMgr.unlock(); + } + + LOG.info("rename database[{}] to [{}], id: {}", fullDbName, newFullDbName, 0); + } + + @Override + public List listDbNames() { + return GlobalStateMgr.getCurrentState().getLocalMetastore().listDbNames(); + } + + @Override + public Database getDb(String name) { + return GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(name); + } + + @Override + public Database getDb(long dbId) { + return GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); + } + + public Optional mayGetDb(String name) { + return Optional.ofNullable(getDb(name)); + } + + public Optional mayGetDb(long dbId) { + return Optional.ofNullable(getDb(dbId)); + } + + /** + * Following is the step to create an olap table: + * 1. create columns + * 2. create partition info + * 3. create distribution info + * 4. set table id and base index id + * 5. set bloom filter columns + * 6. set and build TableProperty includes: + * 6.1. dynamicProperty + * 6.2. replicationNum + * 6.3. inMemory + * 7. set index meta + * 8. check colocation properties + * 9. create tablet in BE + * 10. add this table to FE's meta + * 11. add this table to ColocateGroup if necessary + * + * @return whether the table is created + */ + @Override + public boolean createTable(CreateTableStmt stmt) throws DdlException { + // check if db exists + Database db = getDb(stmt.getDbName()); + if (db == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, stmt.getDbName()); + } + + boolean isTemporaryTable = (stmt instanceof CreateTemporaryTableStmt); + // perform the existence check which is cheap before any further heavy operations. + // NOTE: don't even check the quota if already exists. + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.READ); + try { + String tableName = stmt.getTableName(); + if (!isTemporaryTable && getTable(db.getFullName(), tableName) != null) { + if (!stmt.isSetIfNotExists()) { + ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName); + } + LOG.info("create table[{}] which already exists", tableName); + return false; + } + } finally { + locker.unLockDatabase(db.getId(), LockType.READ); + } + + // only internal table should check quota and cluster capacity + if (!stmt.isExternal()) { + // check cluster capacity + GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().checkClusterCapacity(); + // check db quota + db.checkQuota(); + } + + AbstractTableFactory tableFactory = TableFactoryProvider.getFactory(stmt.getEngineName()); + if (tableFactory == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_UNKNOWN_STORAGE_ENGINE, stmt.getEngineName()); + } + + Table table = tableFactory.createTable(GlobalStateMgr.getCurrentState().getLocalMetastore(), db, stmt); + String storageVolumeId = GlobalStateMgr.getCurrentState().getStorageVolumeMgr() + .getStorageVolumeIdOfTable(table.getId()); + + try { + onCreate(db, table, storageVolumeId, stmt.isSetIfNotExists()); + } catch (DdlException e) { + if (table.isCloudNativeTable()) { + GlobalStateMgr.getCurrentState().getStorageVolumeMgr().unbindTableToStorageVolume(table.getId()); + } + throw e; + } + return true; + } + + public void onCreate(Database db, Table table, String storageVolumeId, boolean isSetIfNotExists) + throws DdlException { + // check database exists again, because database can be dropped when creating table + if (!GlobalStateMgr.getCurrentState().tryLock(false)) { + throw new DdlException("Failed to acquire globalStateMgr lock. " + + "Try again or increasing value of `catalog_try_lock_timeout_ms` configuration."); + } + + try { + /* + * When creating table or mv, we need to create the tablets and prepare some of the + * metadata first before putting this new table or mv in the database. So after the + * first step, we need to acquire the global lock and double check whether the db still + * exists because it maybe dropped by other concurrent client. And if we don't use the lock + * protection and handle the concurrency properly, the replay of table/mv creation may fail + * on restart or on follower. + * + * After acquire the db lock, we also need to acquire the db lock and write edit log. Since the + * db lock maybe under high contention and IO is busy, current thread can hold the global lock + * for quite a long time and make the other operation waiting for the global lock fail. + * + * So here after the confirmation of existence of modifying database, we release the global lock + * When dropping database, we will set the `exist` field of db object to false. And in the following + * creation process, we will double-check the `exist` field. + */ + if (getDb(db.getId()) == null) { + throw new DdlException("Database has been dropped when creating table/mv/view"); + } + } finally { + GlobalStateMgr.getCurrentState().unlock(); + } + + if (db.isSystemDatabase()) { + ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, table.getName(), + "cannot create table in system database"); + } + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + if (!db.isExist()) { + throw new DdlException("Database has been dropped when creating table/mv/view"); + } + + if (!db.registerTableUnlocked(table)) { + if (!isSetIfNotExists) { + table.delete(db.getId(), false); + ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, table.getName(), + "table already exists"); + } else { + LOG.info("Create table[{}] which already exists", table.getName()); + return; + } + } + + // NOTE: The table has been added to the database, and the following procedure cannot throw exception. + LOG.info("Successfully create table: {}-{}, in database: {}-{}", + table.getName(), table.getId(), db.getFullName(), db.getId()); + + CreateTableInfo createTableInfo = new CreateTableInfo(db.getFullName(), table, storageVolumeId); + GlobalStateMgr.getCurrentState().getLocalMetastore().createTable(createTableInfo); + table.onCreate(db); + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); + } + } + + @Override + public void createTableLike(CreateTableLikeStmt stmt) throws DdlException { + createTable(stmt.getCreateTableStmt()); + } + + @Override + public void dropTable(DropTableStmt stmt) throws DdlException { + String dbName = stmt.getDbName(); + String tableName = stmt.getTableName(); + + // check database + Database db = getDb(dbName); + if (db == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); + } + if (db.isSystemDatabase()) { + ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR, + "cannot drop table in system database: " + db.getOriginName()); + } + db.dropTable(tableName, stmt.isSetIfExists(), stmt.isForceDrop()); + } + + @Override + public void dropTemporaryTable(String dbName, long tableId, String tableName, + boolean isSetIfExists, boolean isForce) throws DdlException { + Database db = getDb(dbName); + if (db == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); + } + + Table table; + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + table = getTable(db.getId(), tableId); + if (table == null) { + if (isSetIfExists) { + return; + } + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName); + } + db.unprotectDropTemporaryTable(tableId, isForce, false); + DropInfo info = new DropInfo(db.getId(), table.getId(), -1L, isForce); + GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(info); + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); + } + } + + public void recoverTable(RecoverTableStmt recoverStmt) throws DdlException { + String dbName = recoverStmt.getDbName(); + + Database db = null; + if ((db = getDb(dbName)) == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); + } + + String tableName = recoverStmt.getTableName(); + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + Table table = getTable(db.getFullName(), tableName); + if (table != null) { + ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName); + } + + if (!GlobalStateMgr.getCurrentState().getRecycleBin().recoverTable(db, tableName)) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName); + } + + Table recoverTable = getTable(db.getFullName(), tableName); + if (recoverTable instanceof OlapTable) { + DynamicPartitionUtil.registerOrRemovePartitionScheduleInfo(db.getId(), (OlapTable) recoverTable); + } + + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); + } + } + + /* + * Truncate specified table or partitions. + * The main idea is: + * + * 1. using the same schema to create new table(partitions) + * 2. use the new created table(partitions) to replace the old ones. + * + * if no partition specified, it will truncate all partitions of this table, including all temp partitions, + * otherwise, it will only truncate those specified partitions. + * + */ + @Override + public void truncateTable(TruncateTableStmt truncateTableStmt, ConnectContext context) throws DdlException { + TableRef tblRef = truncateTableStmt.getTblRef(); + TableName dbTbl = tblRef.getName(); + // check, and save some info which need to be checked again later + Map origPartitions = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER); + OlapTable copiedTbl; + Database db = getDb(dbTbl.getDb()); + if (db == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbTbl.getDb()); + } + + boolean truncateEntireTable = tblRef.getPartitionNames() == null; + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.READ); + try { + Table table = MetaUtils.getSessionAwareTable(context, db, dbTbl); + if (table == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, dbTbl.getTbl()); + } + + if (!table.isOlapOrCloudNativeTable()) { + throw new DdlException("Only support truncate OLAP table or LAKE table"); + } + + OlapTable olapTable = (OlapTable) table; + if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) { + throw InvalidOlapTableStateException.of(olapTable.getState(), olapTable.getName()); + } + + if (!truncateEntireTable) { + for (String partName : tblRef.getPartitionNames().getPartitionNames()) { + Partition partition = olapTable.getPartition(partName); + if (partition == null) { + throw new DdlException("Partition " + partName + " does not exist"); + } + + origPartitions.put(partName, partition); + GlobalStateMgr.getCurrentState().getAnalyzeMgr().recordDropPartition(partition.getId()); + } + } else { + for (Partition partition : olapTable.getPartitions()) { + origPartitions.put(partition.getName(), partition); + GlobalStateMgr.getCurrentState().getAnalyzeMgr().recordDropPartition(partition.getId()); + } + } + + copiedTbl = getShadowCopyTable(olapTable); + } finally { + locker.unLockDatabase(db.getId(), LockType.READ); + } + + // 2. use the copied table to create partitions + List newPartitions = Lists.newArrayListWithCapacity(origPartitions.size()); + // tabletIdSet to save all newly created tablet ids. + Set tabletIdSet = Sets.newHashSet(); + try { + for (Map.Entry entry : origPartitions.entrySet()) { + long oldPartitionId = entry.getValue().getId(); + long newPartitionId = GlobalStateMgr.getCurrentState().getNextId(); + String newPartitionName = entry.getKey(); + + PartitionInfo partitionInfo = copiedTbl.getPartitionInfo(); + partitionInfo.setTabletType(newPartitionId, partitionInfo.getTabletType(oldPartitionId)); + partitionInfo.setIsInMemory(newPartitionId, partitionInfo.getIsInMemory(oldPartitionId)); + partitionInfo.setReplicationNum(newPartitionId, partitionInfo.getReplicationNum(oldPartitionId)); + partitionInfo.setDataProperty(newPartitionId, partitionInfo.getDataProperty(oldPartitionId)); + + if (copiedTbl.isCloudNativeTable()) { + partitionInfo.setDataCacheInfo(newPartitionId, + partitionInfo.getDataCacheInfo(oldPartitionId)); + } + + copiedTbl.setDefaultDistributionInfo(entry.getValue().getDistributionInfo()); + + Partition newPartition = + createPartition(db, copiedTbl, newPartitionId, newPartitionName, null, tabletIdSet, + ConnectContext.get().getCurrentWarehouseId()); + newPartitions.add(newPartition); + } + buildPartitions(db, copiedTbl, newPartitions.stream().map(Partition::getSubPartitions) + .flatMap(p -> p.stream()).collect(Collectors.toList()), ConnectContext.get().getCurrentWarehouseId()); + } catch (DdlException e) { + tabletIdSet.forEach(tabletId -> GlobalStateMgr.getCurrentState().getTabletInvertedIndex().deleteTablet(tabletId)); + throw e; + } + Preconditions.checkState(origPartitions.size() == newPartitions.size()); + + // all partitions are created successfully, try to replace the old partitions. + // before replacing, we need to check again. + // Things may be changed outside the database lock. + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + OlapTable olapTable = (OlapTable) getTable(db.getId(), copiedTbl.getId()); + if (olapTable == null) { + throw new DdlException("Table[" + copiedTbl.getName() + "] is dropped"); + } + + if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) { + throw InvalidOlapTableStateException.of(olapTable.getState(), olapTable.getName()); + } + + // check partitions + for (Map.Entry entry : origPartitions.entrySet()) { + Partition partition = olapTable.getPartition(entry.getValue().getId()); + if (partition == null || !partition.getName().equalsIgnoreCase(entry.getKey())) { + throw new DdlException("Partition [" + entry.getKey() + "] is changed during truncating table, " + + "please retry"); + } + } + + // check if meta changed + // rollup index may be added or dropped, and schema may be changed during creating partition operation. + boolean metaChanged = false; + if (olapTable.getIndexNameToId().size() != copiedTbl.getIndexNameToId().size()) { + metaChanged = true; + } else { + // compare schemaHash + Map copiedIndexIdToSchemaHash = copiedTbl.getIndexIdToSchemaHash(); + for (Map.Entry entry : olapTable.getIndexIdToSchemaHash().entrySet()) { + long indexId = entry.getKey(); + if (!copiedIndexIdToSchemaHash.containsKey(indexId)) { + metaChanged = true; + break; + } + if (!copiedIndexIdToSchemaHash.get(indexId).equals(entry.getValue())) { + metaChanged = true; + break; + } + } + } + + if (olapTable.getDefaultDistributionInfo().getType() != copiedTbl.getDefaultDistributionInfo().getType()) { + metaChanged = true; + } + + if (metaChanged) { + throw new DdlException("Table[" + copiedTbl.getName() + "]'s meta has been changed. try again."); + } + + // replace + GlobalStateMgr.getCurrentState().getLocalMetastore() + .truncateTableInternal(olapTable, newPartitions, truncateEntireTable, false); + + try { + GlobalStateMgr.getCurrentState().getColocateTableIndex() + .updateLakeTableColocationInfo(olapTable, true /* isJoin */, null /* expectGroupId */); + } catch (DdlException e) { + LOG.info("table {} update colocation info failed when truncate table, {}", olapTable.getId(), e.getMessage()); + } + + // write edit log + TruncateTableInfo info = new TruncateTableInfo(db.getId(), olapTable.getId(), newPartitions, + truncateEntireTable); + GlobalStateMgr.getCurrentState().getLocalMetastore().truncateTable(info); + + // refresh mv + Set relatedMvs = olapTable.getRelatedMaterializedViews(); + for (MvId mvId : relatedMvs) { + MaterializedView materializedView = (MaterializedView) getTable(db.getId(), mvId.getId()); + if (materializedView == null) { + LOG.warn("Table related materialized view {} can not be found", mvId.getId()); + continue; + } + if (materializedView.isLoadTriggeredRefresh()) { + refreshMaterializedView(db.getFullName(), getTable(db.getId(), mvId.getId()).getName(), false, null, + Constants.TaskRunPriority.NORMAL.value(), true, false); + } + } + } catch (DdlException e) { + tabletIdSet.forEach(tabletId -> GlobalStateMgr.getCurrentState().getTabletInvertedIndex().deleteTablet(tabletId)); + throw e; + } catch (MetaNotFoundException e) { + LOG.warn("Table related materialized view can not be found", e); + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); + } + + LOG.info("finished to truncate table {}, partitions: {}", + tblRef.getName().toSql(), tblRef.getPartitionNames()); + } + + /* + * used for handling AlterTableStmt (for client is the ALTER TABLE command). + * including SchemaChangeHandler and RollupHandler + */ + @Override + public void alterTable(ConnectContext context, AlterTableStmt stmt) throws UserException { + AlterJobExecutor alterJobExecutor = new AlterJobExecutor(); + alterJobExecutor.process(stmt, context); + } + + @Override + public void alterTableComment(Database db, Table table, AlterTableCommentClause clause) { + ModifyTablePropertyOperationLog log = new ModifyTablePropertyOperationLog(db.getId(), table.getId()); + log.setComment(clause.getNewComment()); + GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(log); + table.setComment(clause.getNewComment()); + } + + @Override + public void renameTable(Database db, Table table, TableRenameClause tableRenameClause) throws DdlException { + OlapTable olapTable = (OlapTable) table; + if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) { + throw new DdlException("Table[" + olapTable.getName() + "] is under " + olapTable.getState()); + } + + String oldTableName = olapTable.getName(); + String newTableName = tableRenameClause.getNewTableName(); + if (oldTableName.equals(newTableName)) { + throw new DdlException("Same table name"); + } + + // check if name is already used + if (getTable(db.getFullName(), newTableName) != null) { + throw new DdlException("Table name[" + newTableName + "] is already used"); + } + + olapTable.checkAndSetName(newTableName, false); + + db.dropTable(oldTableName); + db.registerTableUnlocked(olapTable); + inactiveRelatedMaterializedView(db, olapTable, + MaterializedViewExceptions.inactiveReasonForBaseTableRenamed(oldTableName)); + + TableInfo tableInfo = TableInfo.createForTableRename(db.getId(), olapTable.getId(), newTableName); + GlobalStateMgr.getCurrentState().getLocalMetastore().renameTable(tableInfo); + LOG.info("rename table[{}] to {}, tableId: {}", oldTableName, newTableName, olapTable.getId()); + } + + @Override + public Table getTable(String dbName, String tblName) { + return GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(dbName, tblName); + } + + public Table getTable(Long dbId, Long tableId) { + return GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(dbId, tableId); + } + + public Optional
mayGetTable(long dbId, long tableId) { + return mayGetDb(dbId).flatMap(db -> Optional.ofNullable(db.getTable(tableId))); + } + + public Optional
mayGetTable(String dbName, String tableName) { + return mayGetDb(dbName).flatMap(db -> Optional.ofNullable(db.getTable(tableName))); + } + + @Override + public void createView(CreateViewStmt stmt) throws DdlException { + String dbName = stmt.getDbName(); + String tableName = stmt.getTable(); + + // check if db exists + Database db = this.getDb(stmt.getDbName()); + if (db == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); + } + // check if table exists in db + boolean existed = false; + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.READ); + try { + if (getTable(db.getFullName(), tableName) != null) { + existed = true; + if (stmt.isSetIfNotExists()) { + LOG.info("create view[{}] which already exists", tableName); + return; + } else if (stmt.isReplace()) { + LOG.info("view {} already exists, need to replace it", tableName); + } else { + ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName); + } + } + } finally { + locker.unLockDatabase(db.getId(), LockType.READ); + } + + if (existed) { + // already existed, need to alter the view + AlterViewStmt alterViewStmt = AlterViewStmt.fromReplaceStmt(stmt); + new AlterJobExecutor().process(alterViewStmt, ConnectContext.get()); + LOG.info("replace view {} successfully", tableName); + } else { + List columns = stmt.getColumns(); + long tableId = GlobalStateMgr.getCurrentState().getNextId(); + View view = new View(tableId, tableName, columns); + view.setComment(stmt.getComment()); + view.setInlineViewDefWithSqlMode(stmt.getInlineViewDef(), + ConnectContext.get().getSessionVariable().getSqlMode()); + // init here in case the stmt string from view.toSql() has some syntax error. + try { + view.init(); + } catch (UserException e) { + throw new DdlException("failed to init view stmt", e); + } + + onCreate(db, view, "", stmt.isSetIfNotExists()); + LOG.info("successfully create view[" + tableName + "-" + view.getId() + "]"); + } + } + + /** + * used for handling AlterViewStmt (the ALTER VIEW command). + */ + @Override + public void alterView(AlterViewStmt stmt) { + new AlterJobExecutor().process(stmt, ConnectContext.get()); + } + + @Override + public void createMaterializedView(CreateMaterializedViewStmt stmt) + throws AnalysisException, DdlException { + MaterializedViewHandler materializedViewHandler = + GlobalStateMgr.getCurrentState().getAlterJobMgr().getMaterializedViewHandler(); + String tableName = stmt.getBaseIndexName(); + // check db + String dbName = stmt.getDBName(); + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbName); + if (db == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); + } + // check cluster capacity + GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().checkClusterCapacity(); + // check db quota + db.checkQuota(); + + Locker locker = new Locker(); + if (!locker.lockDatabaseAndCheckExist(db, LockType.WRITE)) { + throw new DdlException("create materialized failed. database:" + db.getFullName() + " not exist"); + } + try { + Table table = getTable(db.getFullName(), tableName); + if (table == null) { + throw new DdlException("create materialized failed. table:" + tableName + " not exist"); + } + if (table.isCloudNativeTable()) { + throw new DdlException("Creating synchronous materialized view(rollup) is not supported in " + + "shared data clusters.\nPlease use asynchronous materialized view instead.\n" + + "Refer to https://docs.starrocks.io/en-us/latest/sql-reference/sql-statements" + + "/data-definition/CREATE%20MATERIALIZED%20VIEW#asynchronous-materialized-view for details."); + } + if (!table.isOlapTable()) { + throw new DdlException("Do not support create synchronous materialized view(rollup) on " + + table.getType().name() + " table[" + tableName + "]"); + } + OlapTable olapTable = (OlapTable) table; + if (olapTable.getKeysType() == KeysType.PRIMARY_KEYS) { + throw new DdlException( + "Do not support create materialized view on primary key table[" + tableName + "]"); + } + if (GlobalStateMgr.getCurrentState().getInsertOverwriteJobMgr().hasRunningOverwriteJob(olapTable.getId())) { + throw new DdlException("Table[" + olapTable.getName() + "] is doing insert overwrite job, " + + "please start to create materialized view after insert overwrite"); + } + olapTable.checkStableAndNormal(); + + materializedViewHandler.processCreateMaterializedView(stmt, db, olapTable); + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); + } + } + + // TODO(murphy) refactor it into MVManager + @Override + public void createMaterializedView(CreateMaterializedViewStatement stmt) + throws DdlException { + // check mv exists,name must be different from view/mv/table which exists in metadata + String mvName = stmt.getTableName().getTbl(); + String dbName = stmt.getTableName().getDb(); + LOG.debug("Begin create materialized view: {}", mvName); + // check if db exists + Database db = this.getDb(dbName); + if (db == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); + } + + // check if table exists in db + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.READ); + try { + if (getTable(db.getFullName(), mvName) != null) { + if (stmt.isIfNotExists()) { + LOG.info("Create materialized view [{}] which already exists", mvName); + return; + } else { + ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, mvName); + } + } + } finally { + locker.unLockDatabase(db.getId(), LockType.READ); + } + // create columns + List baseSchema = stmt.getMvColumnItems(); + validateColumns(baseSchema); + + Map properties = stmt.getProperties(); + if (properties == null) { + properties = Maps.newHashMap(); + } + + // create partition info + PartitionInfo partitionInfo = buildPartitionInfo(stmt); + // create distribution info + DistributionDesc distributionDesc = stmt.getDistributionDesc(); + Preconditions.checkNotNull(distributionDesc); + DistributionInfo baseDistribution = distributionDesc.toDistributionInfo(baseSchema); + // create refresh scheme + MaterializedView.MvRefreshScheme mvRefreshScheme; + RefreshSchemeClause refreshSchemeDesc = stmt.getRefreshSchemeDesc(); + if (refreshSchemeDesc.getType() == MaterializedView.RefreshType.ASYNC) { + mvRefreshScheme = new MaterializedView.MvRefreshScheme(); + AsyncRefreshSchemeDesc asyncRefreshSchemeDesc = (AsyncRefreshSchemeDesc) refreshSchemeDesc; + MaterializedView.AsyncRefreshContext asyncRefreshContext = mvRefreshScheme.getAsyncRefreshContext(); + asyncRefreshContext.setDefineStartTime(asyncRefreshSchemeDesc.isDefineStartTime()); + int randomizeStart = 0; + if (properties.containsKey(PropertyAnalyzer.PROPERTY_MV_RANDOMIZE_START)) { + try { + randomizeStart = Integer.parseInt(properties.get((PropertyAnalyzer.PROPERTY_MV_RANDOMIZE_START))); + } catch (NumberFormatException e) { + ErrorReport.reportSemanticException(ErrorCode.ERR_INVALID_PARAMETER, + PropertyAnalyzer.PROPERTY_MV_RANDOMIZE_START + " only accept integer as parameter"); + } + // remove this transient variable + properties.remove(PropertyAnalyzer.PROPERTY_MV_RANDOMIZE_START); + } + + long random = getRandomStart(asyncRefreshSchemeDesc.getIntervalLiteral(), randomizeStart); + if (asyncRefreshSchemeDesc.isDefineStartTime() || randomizeStart == -1) { + long definedStartTime = Utils.getLongFromDateTime(asyncRefreshSchemeDesc.getStartTime()); + // Add random set only if mv_random_start > 0 when user has already set the start time + if (randomizeStart > 0) { + definedStartTime += random; + } + asyncRefreshContext.setStartTime(definedStartTime); + } else if (asyncRefreshSchemeDesc.getIntervalLiteral() != null) { + long currentTimeSecond = Utils.getLongFromDateTime(LocalDateTime.now()); + long randomizedStart = currentTimeSecond + random; + asyncRefreshContext.setStartTime(randomizedStart); + } + if (asyncRefreshSchemeDesc.getIntervalLiteral() != null) { + long intervalStep = ((IntLiteral) asyncRefreshSchemeDesc.getIntervalLiteral().getValue()).getValue(); + String refreshTimeUnit = asyncRefreshSchemeDesc.getIntervalLiteral().getUnitIdentifier().getDescription(); + asyncRefreshContext.setStep(intervalStep); + asyncRefreshContext.setTimeUnit(refreshTimeUnit); + + // Check the interval time should not be less than the min allowed config time. + if (Config.materialized_view_min_refresh_interval > 0) { + TimeUnit intervalTimeUnit = TimeUtils.convertUnitIdentifierToTimeUnit(refreshTimeUnit); + long periodSeconds = TimeUtils.convertTimeUnitValueToSecond(intervalStep, intervalTimeUnit); + if (periodSeconds < Config.materialized_view_min_refresh_interval) { + throw new DdlException(String.format("Refresh schedule interval %s is too small which may cost " + + "a lot of memory/cpu resources to refresh the asynchronous materialized view, " + + "please config an interval larger than " + + "Config.materialized_view_min_refresh_interval(%ss).", + periodSeconds, + Config.materialized_view_min_refresh_interval)); + } + } + } + + // task which type is EVENT_TRIGGERED can not use external table as base table now. + if (asyncRefreshContext.getTimeUnit() == null) { + // asyncRefreshContext's timeUnit is null means this task's type is EVENT_TRIGGERED + Map tableNameTableMap = AnalyzerUtils.collectAllTable(stmt.getQueryStatement()); + if (tableNameTableMap.values().stream().anyMatch(table -> !table.isNativeTableOrMaterializedView())) { + throw new DdlException( + "Materialized view which type is ASYNC need to specify refresh interval for " + + "external table"); + } + } + } else if (refreshSchemeDesc.getType() == MaterializedView.RefreshType.SYNC) { + mvRefreshScheme = new MaterializedView.MvRefreshScheme(); + mvRefreshScheme.setType(MaterializedView.RefreshType.SYNC); + } else if (refreshSchemeDesc.getType().equals(MaterializedView.RefreshType.MANUAL)) { + mvRefreshScheme = new MaterializedView.MvRefreshScheme(); + mvRefreshScheme.setType(MaterializedView.RefreshType.MANUAL); + } else { + mvRefreshScheme = new MaterializedView.MvRefreshScheme(); + mvRefreshScheme.setType(MaterializedView.RefreshType.INCREMENTAL); + } + mvRefreshScheme.setMoment(refreshSchemeDesc.getMoment()); + // create mv + long mvId = GlobalStateMgr.getCurrentState().getNextId(); + MaterializedView materializedView; + if (RunMode.isSharedNothingMode()) { + if (refreshSchemeDesc.getType().equals(MaterializedView.RefreshType.INCREMENTAL)) { + materializedView = GlobalStateMgr.getCurrentState().getMaterializedViewMgr() + .createSinkTable(stmt, partitionInfo, mvId, db.getId()); + materializedView.setMaintenancePlan(stmt.getMaintenancePlan()); + } else { + materializedView = + new MaterializedView(mvId, db.getId(), mvName, baseSchema, stmt.getKeysType(), partitionInfo, + baseDistribution, mvRefreshScheme); + } + } else { + Preconditions.checkState(RunMode.isSharedDataMode()); + if (refreshSchemeDesc.getType().equals(MaterializedView.RefreshType.INCREMENTAL)) { + throw new DdlException("Incremental materialized view in shared_data mode is not supported"); + } + + materializedView = + new LakeMaterializedView(mvId, db.getId(), mvName, baseSchema, stmt.getKeysType(), partitionInfo, + baseDistribution, mvRefreshScheme); + } + + //bitmap indexes + List mvIndexes = stmt.getMvIndexes(); + materializedView.setIndexes(mvIndexes); + + // sort keys + if (CollectionUtils.isNotEmpty(stmt.getSortKeys())) { + materializedView.setTableProperty(new TableProperty()); + materializedView.getTableProperty().setMvSortKeys(stmt.getSortKeys()); + } + // set comment + materializedView.setComment(stmt.getComment()); + // set baseTableIds + materializedView.setBaseTableInfos(stmt.getBaseTableInfos()); + // set viewDefineSql + materializedView.setViewDefineSql(stmt.getInlineViewDef()); + materializedView.setSimpleDefineSql(stmt.getSimpleViewDef()); + materializedView.setOriginalViewDefineSql(stmt.getOriginalViewDefineSql()); + // set partitionRefTableExprs + if (stmt.getPartitionRefTableExpr() != null) { + //avoid to get a list of null inside + materializedView.setPartitionRefTableExprs(Lists.newArrayList(stmt.getPartitionRefTableExpr())); + } + // set base index id + long baseIndexId = GlobalStateMgr.getCurrentState().getNextId(); + materializedView.setBaseIndexId(baseIndexId); + // set query output indexes + materializedView.setQueryOutputIndices(stmt.getQueryOutputIndices()); + // set base index meta + int schemaVersion = 0; + int schemaHash = Util.schemaHash(schemaVersion, baseSchema, null, 0d); + short shortKeyColumnCount = GlobalStateMgr.calcShortKeyColumnCount(baseSchema, null); + TStorageType baseIndexStorageType = TStorageType.COLUMN; + materializedView.setIndexMeta(baseIndexId, mvName, baseSchema, schemaVersion, schemaHash, + shortKeyColumnCount, baseIndexStorageType, stmt.getKeysType()); + + // validate hint + Map optHints = Maps.newHashMap(); + if (stmt.isExistQueryScopeHint()) { + SessionVariable sessionVariable = VariableMgr.newSessionVariable(); + for (HintNode hintNode : stmt.getAllQueryScopeHints()) { + if (hintNode instanceof SetVarHint) { + for (Map.Entry entry : hintNode.getValue().entrySet()) { + VariableMgr.setSystemVariable(sessionVariable, + new SystemVariable(entry.getKey(), new StringLiteral(entry.getValue())), true); + optHints.put(entry.getKey(), entry.getValue()); + } + } else if (hintNode instanceof UserVariableHint) { + throw new DdlException("unsupported user variable hint in Materialized view for now."); + } + } + } + + boolean isNonPartitioned = partitionInfo.isUnPartitioned(); + DataProperty dataProperty = PropertyAnalyzer.analyzeMVDataProperty(materializedView, properties); + PropertyAnalyzer.analyzeMVProperties(db, materializedView, properties, isNonPartitioned); + try { + Set tabletIdSet = new HashSet<>(); + // process single partition info + if (isNonPartitioned) { + long partitionId = GlobalStateMgr.getCurrentState().getNextId(); + Preconditions.checkNotNull(dataProperty); + partitionInfo.setDataProperty(partitionId, dataProperty); + partitionInfo.setReplicationNum(partitionId, materializedView.getDefaultReplicationNum()); + partitionInfo.setIsInMemory(partitionId, false); + partitionInfo.setTabletType(partitionId, TTabletType.TABLET_TYPE_DISK); + StorageInfo storageInfo = materializedView.getTableProperty().getStorageInfo(); + partitionInfo.setDataCacheInfo(partitionId, + storageInfo == null ? null : storageInfo.getDataCacheInfo()); + Long version = Partition.PARTITION_INIT_VERSION; + Partition partition = createPartition(db, materializedView, partitionId, mvName, version, tabletIdSet, + materializedView.getWarehouseId()); + buildPartitions(db, materializedView, new ArrayList<>(partition.getSubPartitions()), + materializedView.getWarehouseId()); + materializedView.addPartition(partition); + } else { + Expr partitionExpr = stmt.getPartitionExpDesc().getExpr(); + Map partitionExprMaps = MVPartitionExprResolver.getMVPartitionExprsChecked(partitionExpr, + stmt.getQueryStatement(), stmt.getBaseTableInfos()); + LOG.info("Generate mv {} partition exprs: {}", mvName, partitionExprMaps); + materializedView.setPartitionExprMaps(partitionExprMaps); + } + + GlobalStateMgr.getCurrentState().getMaterializedViewMgr().prepareMaintenanceWork(stmt, materializedView); + + String storageVolumeId = ""; + if (materializedView.isCloudNativeMaterializedView()) { + storageVolumeId = GlobalStateMgr.getCurrentState().getStorageVolumeMgr() + .getStorageVolumeIdOfTable(materializedView.getId()); + } + onCreate(db, materializedView, storageVolumeId, stmt.isIfNotExists()); + } catch (DdlException e) { + if (materializedView.isCloudNativeMaterializedView()) { + GlobalStateMgr.getCurrentState().getStorageVolumeMgr().unbindTableToStorageVolume(materializedView.getId()); + } + throw e; + } + LOG.info("Successfully create materialized view [{}:{}]", mvName, materializedView.getMvId()); + + // NOTE: The materialized view has been added to the database, and the following procedure cannot throw exception. + createTaskForMaterializedView(dbName, materializedView, optHints); + DynamicPartitionUtil.registerOrRemovePartitionTTLTable(db.getId(), materializedView); + } + + private long getRandomStart(IntervalLiteral interval, long randomizeStart) throws DdlException { + if (interval == null || randomizeStart == -1) { + return 0; + } + // randomize the start time if not specified manually, to avoid refresh conflicts + // default random interval is min(300s, INTERVAL/2) + // user could specify it through mv_randomize_start + long period = ((IntLiteral) interval.getValue()).getLongValue(); + TimeUnit timeUnit = + TimeUtils.convertUnitIdentifierToTimeUnit(interval.getUnitIdentifier().getDescription()); + long intervalSeconds = TimeUtils.convertTimeUnitValueToSecond(period, timeUnit); + long randomInterval = randomizeStart == 0 ? Math.min(300, intervalSeconds / 2) : randomizeStart; + return randomInterval > 0 ? ThreadLocalRandom.current().nextLong(randomInterval) : randomInterval; + } + + private void createTaskForMaterializedView(String dbName, MaterializedView materializedView, + Map optHints) throws DdlException { + MaterializedView.RefreshType refreshType = materializedView.getRefreshScheme().getType(); + MaterializedView.RefreshMoment refreshMoment = materializedView.getRefreshScheme().getMoment(); + + if (refreshType.equals(MaterializedView.RefreshType.INCREMENTAL)) { + GlobalStateMgr.getCurrentState().getMaterializedViewMgr().startMaintainMV(materializedView); + return; + } + + if (refreshType != MaterializedView.RefreshType.SYNC) { + + Task task = TaskBuilder.buildMvTask(materializedView, dbName); + TaskBuilder.updateTaskInfo(task, materializedView); + + if (optHints != null) { + Map taskProperties = task.getProperties(); + taskProperties.putAll(optHints); + } + + TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager(); + taskManager.createTask(task, false); + if (refreshMoment.equals(MaterializedView.RefreshMoment.IMMEDIATE)) { + taskManager.executeTask(task.getName()); + } + } + } + + /** + * Leave some clean up work to {@link MaterializedView#onDrop} + */ + @Override + public void dropMaterializedView(DropMaterializedViewStmt stmt) throws DdlException, MetaNotFoundException { + Database db = getDb(stmt.getDbName()); + if (db == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, stmt.getDbName()); + } + Table table; + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.READ); + try { + table = getTable(db.getFullName(), stmt.getMvName()); + } finally { + locker.unLockDatabase(db.getId(), LockType.READ); + } + if (table instanceof MaterializedView) { + try { + Authorizer.checkMaterializedViewAction(ConnectContext.get().getCurrentUserIdentity(), + ConnectContext.get().getCurrentRoleIds(), stmt.getDbMvName(), PrivilegeType.DROP); + } catch (AccessDeniedException e) { + AccessDeniedException.reportAccessDenied( + stmt.getDbMvName().getCatalog(), + ConnectContext.get().getCurrentUserIdentity(), + ConnectContext.get().getCurrentRoleIds(), PrivilegeType.DROP.name(), ObjectType.MATERIALIZED_VIEW.name(), + stmt.getDbMvName().getTbl()); + } + + db.dropTable(table.getName(), stmt.isSetIfExists(), true); + } else { + GlobalStateMgr.getCurrentState().getAlterJobMgr().getMaterializedViewHandler().processDropMaterializedView(stmt); + } + } + + @Override + public String refreshMaterializedView(RefreshMaterializedViewStatement refreshMaterializedViewStatement) + throws DdlException, MetaNotFoundException { + String dbName = refreshMaterializedViewStatement.getMvName().getDb(); + String mvName = refreshMaterializedViewStatement.getMvName().getTbl(); + boolean force = refreshMaterializedViewStatement.isForceRefresh(); + PartitionRangeDesc range = refreshMaterializedViewStatement.getPartitionRangeDesc(); + return refreshMaterializedView(dbName, mvName, force, range, Constants.TaskRunPriority.HIGH.value(), + Config.enable_mv_refresh_sync_refresh_mergeable, true, refreshMaterializedViewStatement.isSync()); + } + + @Override + public void cancelRefreshMaterializedView( + CancelRefreshMaterializedViewStmt stmt) throws DdlException, MetaNotFoundException { + String dbName = stmt.getMvName().getDb(); + String mvName = stmt.getMvName().getTbl(); + MaterializedView materializedView = getMaterializedViewToRefresh(dbName, mvName); + TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager(); + Task refreshTask = taskManager.getTask(TaskBuilder.getMvTaskName(materializedView.getId())); + boolean isForce = stmt.isForce(); + if (refreshTask != null) { + taskManager.killTask(refreshTask.getName(), isForce); + } + } + + private String executeRefreshMvTask(String dbName, MaterializedView materializedView, + ExecuteOption executeOption) + throws DdlException { + MaterializedView.RefreshType refreshType = materializedView.getRefreshScheme().getType(); + LOG.info("Start to execute refresh materialized view task, mv: {}, refreshType: {}, executionOption:{}", + materializedView.getName(), refreshType, executeOption); + + if (refreshType.equals(MaterializedView.RefreshType.INCREMENTAL)) { + GlobalStateMgr.getCurrentState().getMaterializedViewMgr().onTxnPublish(materializedView); + } else if (refreshType != MaterializedView.RefreshType.SYNC) { + TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager(); + final String mvTaskName = TaskBuilder.getMvTaskName(materializedView.getId()); + if (!taskManager.containTask(mvTaskName)) { + Task task = TaskBuilder.buildMvTask(materializedView, dbName); + TaskBuilder.updateTaskInfo(task, materializedView); + taskManager.createTask(task, false); + } + return taskManager.executeTask(mvTaskName, executeOption).getQueryId(); + } + return null; + } + + private MaterializedView getMaterializedViewToRefresh(String dbName, String mvName) + throws DdlException, MetaNotFoundException { + Database db = this.getDb(dbName); + if (db == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); + } + final Table table = getTable(db.getFullName(), mvName); + MaterializedView materializedView = null; + if (table instanceof MaterializedView) { + materializedView = (MaterializedView) table; + } + if (materializedView == null) { + throw new MetaNotFoundException(mvName + " is not a materialized view"); + } + return materializedView; + } + + public String refreshMaterializedView(String dbName, String mvName, boolean force, PartitionRangeDesc range, + int priority, boolean mergeRedundant, boolean isManual) + throws DdlException, MetaNotFoundException { + return refreshMaterializedView(dbName, mvName, force, range, priority, mergeRedundant, isManual, false); + } + + public String refreshMaterializedView(String dbName, String mvName, boolean force, PartitionRangeDesc range, + int priority, boolean mergeRedundant, boolean isManual, boolean isSync) + throws DdlException, MetaNotFoundException { + MaterializedView materializedView = getMaterializedViewToRefresh(dbName, mvName); + + HashMap taskRunProperties = new HashMap<>(); + taskRunProperties.put(TaskRun.PARTITION_START, range == null ? null : range.getPartitionStart()); + taskRunProperties.put(TaskRun.PARTITION_END, range == null ? null : range.getPartitionEnd()); + taskRunProperties.put(TaskRun.FORCE, Boolean.toString(force)); + + ExecuteOption executeOption = new ExecuteOption(priority, mergeRedundant, taskRunProperties); + executeOption.setManual(isManual); + executeOption.setSync(isSync); + return executeRefreshMvTask(dbName, materializedView, executeOption); + } + + @Override + public void alterMaterializedView(AlterMaterializedViewStmt stmt) { + new AlterMVJobExecutor().process(stmt, ConnectContext.get()); + } + + @Override + public void addPartitions(ConnectContext ctx, Database db, String tableName, AddPartitionClause addPartitionClause) + throws DdlException { + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.READ); + try { + Table table = getTable(db.getFullName(), tableName); + CatalogUtils.checkTableExist(db, tableName); + CatalogUtils.checkNativeTable(db, table); + } finally { + locker.unLockDatabase(db.getId(), LockType.READ); + } + addPartitions(ctx, db, tableName, + addPartitionClause.getResolvedPartitionDescList(), + addPartitionClause.isTempPartition(), + addPartitionClause.getDistributionDesc()); + } + + private void addPartitions(ConnectContext ctx, Database db, String tableName, List partitionDescs, + boolean isTempPartition, DistributionDesc distributionDesc) throws DdlException { + DistributionInfo distributionInfo; + OlapTable olapTable; + OlapTable copiedTable; + + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.READ); + Set checkExistPartitionName = Sets.newConcurrentHashSet(); + try { + olapTable = checkTable(db, tableName); + + // get partition info + PartitionInfo partitionInfo = olapTable.getPartitionInfo(); + + // check partition type + checkPartitionType(partitionInfo); + + // check partition num + checkPartitionNum(olapTable); + + // get distributionInfo + distributionInfo = getDistributionInfo(olapTable, distributionDesc).copy(); + olapTable.inferDistribution(distributionInfo); + + // check colocation + checkColocation(db, olapTable, distributionInfo, partitionDescs); + copiedTable = getShadowCopyTable(olapTable); + copiedTable.setDefaultDistributionInfo(distributionInfo); + checkExistPartitionName = CatalogUtils.checkPartitionNameExistForAddPartitions(olapTable, partitionDescs); + } finally { + locker.unLockDatabase(db.getId(), LockType.READ); + } + + Preconditions.checkNotNull(distributionInfo); + Preconditions.checkNotNull(olapTable); + Preconditions.checkNotNull(copiedTable); + + // create partition outside db lock + checkDataProperty(partitionDescs); + + Set tabletIdSetForAll = Sets.newHashSet(); + HashMap> partitionNameToTabletSet = Maps.newHashMap(); + try { + // create partition list + List> newPartitions = + createPartitionMap(db, copiedTable, partitionDescs, partitionNameToTabletSet, tabletIdSetForAll, + checkExistPartitionName, ctx.getCurrentWarehouseId()); + + // build partitions + List partitionList = newPartitions.stream().map(x -> x.first).collect(Collectors.toList()); + buildPartitions(db, copiedTable, partitionList.stream().map(Partition::getSubPartitions) + .flatMap(p -> p.stream()).collect(Collectors.toList()), ctx.getCurrentWarehouseId()); + + // check again + if (!locker.lockDatabaseAndCheckExist(db, LockType.WRITE)) { + throw new DdlException("db " + db.getFullName() + + "(" + db.getId() + ") has been dropped"); + } + Set existPartitionNameSet = Sets.newHashSet(); + try { + olapTable = checkTable(db, tableName); + existPartitionNameSet = CatalogUtils.checkPartitionNameExistForAddPartitions(olapTable, + partitionDescs); + if (existPartitionNameSet.size() > 0) { + for (String partitionName : existPartitionNameSet) { + LOG.info("add partition[{}] which already exists", partitionName); + } + } + + // check if meta changed + checkIfMetaChange(olapTable, copiedTable, tableName); + + // get partition info + PartitionInfo partitionInfo = olapTable.getPartitionInfo(); + + // check partition type + checkPartitionType(partitionInfo); + + // update partition info + updatePartitionInfo(partitionInfo, newPartitions, existPartitionNameSet, isTempPartition, olapTable); + + try { + GlobalStateMgr.getCurrentState().getColocateTableIndex() + .updateLakeTableColocationInfo(olapTable, true /* isJoin */, null /* expectGroupId */); + } catch (DdlException e) { + LOG.info("table {} update colocation info failed when add partition, {}", olapTable.getId(), e.getMessage()); + } + + // add partition log + GlobalStateMgr.getCurrentState().getLocalMetastore(). + addPartitionLog(db, olapTable, partitionDescs, isTempPartition, partitionInfo, partitionList, + existPartitionNameSet); + } finally { + cleanExistPartitionNameSet(existPartitionNameSet, partitionNameToTabletSet); + locker.unLockDatabase(db.getId(), LockType.WRITE); + } + } catch (DdlException e) { + cleanTabletIdSetForAll(tabletIdSetForAll); + throw e; + } + } + + private void cleanTabletIdSetForAll(Set tabletIdSetForAll) { + // Cleanup of shards for LakeTable is taken care by ShardDeleter + for (Long tabletId : tabletIdSetForAll) { + GlobalStateMgr.getCurrentState().getTabletInvertedIndex().deleteTablet(tabletId); + } + } + + private void cleanExistPartitionNameSet(Set existPartitionNameSet, + HashMap> partitionNameToTabletSet) { + for (String partitionName : existPartitionNameSet) { + Set existPartitionTabletSet = partitionNameToTabletSet.get(partitionName); + if (existPartitionTabletSet == null) { + // should not happen + continue; + } + for (Long tabletId : existPartitionTabletSet) { + // createPartitionWithIndices create duplicate tablet that if not exists scenario + // so here need to clean up those created tablets which partition already exists from invert index + GlobalStateMgr.getCurrentState().getTabletInvertedIndex().deleteTablet(tabletId); + } + } + } + + private void checkPartitionNum(OlapTable olapTable) throws DdlException { + if (olapTable.getNumberOfPartitions() > Config.max_partition_number_per_table) { + throw new DdlException("Table " + olapTable.getName() + " created partitions exceeded the maximum limit: " + + Config.max_partition_number_per_table + ". You can modify this restriction on by setting" + + " max_partition_number_per_table larger."); + } + } + + @Override + public void dropPartition(Database db, Table table, DropPartitionClause clause) throws DdlException { + CatalogUtils.checkTableExist(db, table.getName()); + Locker locker = new Locker(); + OlapTable olapTable = (OlapTable) table; + Preconditions.checkArgument(locker.isDbWriteLockHeldByCurrentThread(db)); + PartitionInfo partitionInfo = olapTable.getPartitionInfo(); + + if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) { + throw InvalidOlapTableStateException.of(olapTable.getState(), olapTable.getName()); + } + if (!partitionInfo.isRangePartition() && partitionInfo.getType() != PartitionType.LIST) { + throw new DdlException("Alter table [" + olapTable.getName() + "] failed. Not a partitioned table"); + } + boolean isTempPartition = clause.isTempPartition(); + + List existPartitions = Lists.newArrayList(); + List notExistPartitions = Lists.newArrayList(); + for (String partitionName : clause.getResolvedPartitionNames()) { + if (olapTable.checkPartitionNameExist(partitionName, isTempPartition)) { + existPartitions.add(partitionName); + } else { + notExistPartitions.add(partitionName); + } + } + if (CollectionUtils.isNotEmpty(notExistPartitions)) { + if (clause.isSetIfExists()) { + LOG.info("drop partition[{}] which does not exist", notExistPartitions); + } else { + ErrorReport.reportDdlException(ErrorCode.ERR_DROP_PARTITION_NON_EXISTENT, notExistPartitions); + } + } + if (CollectionUtils.isEmpty(existPartitions)) { + return; + } + for (String partitionName : existPartitions) { + // drop + if (isTempPartition) { + olapTable.dropTempPartition(partitionName, true); + } else { + Partition partition = olapTable.getPartition(partitionName); + if (!clause.isForceDrop()) { + if (partition != null) { + if (GlobalStateMgr.getCurrentState().getGlobalTransactionMgr() + .existCommittedTxns(db.getId(), olapTable.getId(), partition.getId())) { + throw new DdlException( + "There are still some transactions in the COMMITTED state waiting to be completed." + + " The partition [" + partitionName + + "] cannot be dropped. If you want to forcibly drop(cannot be recovered)," + + " please use \"DROP PARTITION FORCE\"."); + } + } + } + Range partitionRange = null; + if (partition != null) { + GlobalStateMgr.getCurrentState().getAnalyzeMgr().recordDropPartition(partition.getId()); + if (partitionInfo instanceof RangePartitionInfo) { + partitionRange = ((RangePartitionInfo) partitionInfo).getRange(partition.getId()); + } + } + + olapTable.dropPartition(db.getId(), partitionName, clause.isForceDrop()); + if (olapTable instanceof MaterializedView) { + MaterializedView mv = (MaterializedView) olapTable; + SyncPartitionUtils.dropBaseVersionMeta(mv, partitionName, partitionRange); + } + } + } + if (!isTempPartition) { + try { + for (MvId mvId : olapTable.getRelatedMaterializedViews()) { + MaterializedView materializedView = (MaterializedView) getTable(db.getId(), mvId.getId()); + if (materializedView != null && materializedView.isLoadTriggeredRefresh()) { + refreshMaterializedView( + db.getFullName(), materializedView.getName(), false, null, + Constants.TaskRunPriority.NORMAL.value(), true, false); + } + } + } catch (MetaNotFoundException e) { + throw new DdlException("fail to refresh materialized views when dropping partition", e); + } + } + long dbId = db.getId(); + long tableId = olapTable.getId(); + + + if (clause.getPartitionName() != null) { + String partitionName = clause.getPartitionName(); + DropPartitionInfo info = new DropPartitionInfo(dbId, tableId, partitionName, isTempPartition, clause.isForceDrop()); + GlobalStateMgr.getCurrentState().getLocalMetastore().dropPartition(info); + LOG.info("succeed in dropping partition[{}], is temp : {}, is force : {}", partitionName, isTempPartition, + clause.isForceDrop()); + } else { + DropPartitionsInfo info = + new DropPartitionsInfo(dbId, tableId, isTempPartition, clause.isForceDrop(), existPartitions); + GlobalStateMgr.getCurrentState().getLocalMetastore().dropPartitions(info); + LOG.info("succeed in dropping partitions[{}], is temp : {}, is force : {}", existPartitions, isTempPartition, + clause.isForceDrop()); + } + } + + public void recoverPartition(RecoverPartitionStmt recoverStmt) throws DdlException { + String dbName = recoverStmt.getDbName(); + + Database db = null; + if ((db = getDb(dbName)) == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); + } + + String tableName = recoverStmt.getTableName(); + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + Table table = getTable(db.getFullName(), tableName); + if (table == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName); + } + + if (!table.isOlapOrCloudNativeTable()) { + throw new DdlException("table[" + tableName + "] is not OLAP table or LAKE table"); + } + OlapTable olapTable = (OlapTable) table; + + String partitionName = recoverStmt.getPartitionName(); + if (olapTable.getPartition(partitionName) != null) { + throw new DdlException("partition[" + partitionName + "] already exist in table[" + tableName + "]"); + } + + GlobalStateMgr.getCurrentState().getRecycleBin().recoverPartition(db.getId(), olapTable, partitionName); + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); + } + } + + private OlapTable checkTable(Database db, String tableName) throws DdlException { + CatalogUtils.checkTableExist(db, tableName); + Table table = getTable(db.getFullName(), tableName); + CatalogUtils.checkNativeTable(db, table); + OlapTable olapTable = (OlapTable) table; + CatalogUtils.checkTableState(olapTable, tableName); + return olapTable; + } + + private OlapTable checkTable(Database db, Long tableId) throws DdlException { + Table table = getTable(db.getId(), tableId); + if (table == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableId); + } + CatalogUtils.checkNativeTable(db, table); + OlapTable olapTable = (OlapTable) table; + CatalogUtils.checkTableState(olapTable, table.getName()); + return olapTable; + } + + private void checkPartitionType(PartitionInfo partitionInfo) throws DdlException { + PartitionType partitionType = partitionInfo.getType(); + if (!partitionInfo.isRangePartition() && partitionType != PartitionType.LIST) { + throw new DdlException("Only support adding partition to range/list partitioned table"); + } + } + + private DistributionInfo getDistributionInfo(OlapTable olapTable, DistributionDesc distributionDesc) + throws DdlException { + DistributionInfo distributionInfo; + List baseSchema = olapTable.getBaseSchema(); + DistributionInfo defaultDistributionInfo = olapTable.getDefaultDistributionInfo(); + if (distributionDesc != null) { + distributionInfo = distributionDesc.toDistributionInfo(baseSchema); + // for now. we only support modify distribution's bucket num + if (distributionInfo.getType() != defaultDistributionInfo.getType()) { + throw new DdlException("Cannot assign different distribution type. default is: " + + defaultDistributionInfo.getType()); + } + + if (distributionInfo.getType() == DistributionInfo.DistributionInfoType.HASH) { + HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo; + List newDistriCols = MetaUtils.getColumnsByColumnIds(olapTable, + hashDistributionInfo.getDistributionColumns()); + List defaultDistriCols = MetaUtils.getColumnsByColumnIds(olapTable, + defaultDistributionInfo.getDistributionColumns()); + if (!newDistriCols.equals(defaultDistriCols)) { + throw new DdlException("Cannot assign hash distribution with different distribution cols. " + + "default is: " + defaultDistriCols); + } + if (hashDistributionInfo.getBucketNum() < 0) { + throw new DdlException("Cannot assign hash distribution buckets less than 0"); + } + } + if (distributionInfo.getType() == DistributionInfo.DistributionInfoType.RANDOM) { + RandomDistributionInfo randomDistributionInfo = (RandomDistributionInfo) distributionInfo; + if (randomDistributionInfo.getBucketNum() < 0) { + throw new DdlException("Cannot assign random distribution buckets less than 0"); + } + } + } else { + distributionInfo = defaultDistributionInfo; + } + return distributionInfo; + } + + private void checkColocation(Database db, OlapTable olapTable, DistributionInfo distributionInfo, + List partitionDescs) + throws DdlException { + if (GlobalStateMgr.getCurrentState().getColocateTableIndex().isColocateTable(olapTable.getId())) { + String fullGroupName = db.getId() + "_" + olapTable.getColocateGroup(); + ColocateGroupSchema groupSchema = GlobalStateMgr.getCurrentState() + .getColocateTableIndex().getGroupSchema(fullGroupName); + Preconditions.checkNotNull(groupSchema); + groupSchema.checkDistribution(olapTable.getIdToColumn(), distributionInfo); + for (PartitionDesc partitionDesc : partitionDescs) { + groupSchema.checkReplicationNum(partitionDesc.getReplicationNum()); + } + } + } + + private void checkDataProperty(List partitionDescs) { + for (PartitionDesc partitionDesc : partitionDescs) { + DataProperty dataProperty = partitionDesc.getPartitionDataProperty(); + Preconditions.checkNotNull(dataProperty); + } + } + + private List> createPartitionMap(Database db, OlapTable copiedTable, + List partitionDescs, + HashMap> partitionNameToTabletSet, + Set tabletIdSetForAll, + Set existPartitionNameSet, + long warehouseId) + throws DdlException { + List> partitionList = Lists.newArrayList(); + for (PartitionDesc partitionDesc : partitionDescs) { + long partitionId = GlobalStateMgr.getCurrentState().getNextId(); + DataProperty dataProperty = partitionDesc.getPartitionDataProperty(); + String partitionName = partitionDesc.getPartitionName(); + if (existPartitionNameSet.contains(partitionName)) { + continue; + } + Long version = partitionDesc.getVersionInfo(); + Set tabletIdSet = Sets.newHashSet(); + + copiedTable.getPartitionInfo().setDataProperty(partitionId, dataProperty); + copiedTable.getPartitionInfo().setTabletType(partitionId, partitionDesc.getTabletType()); + copiedTable.getPartitionInfo().setReplicationNum(partitionId, partitionDesc.getReplicationNum()); + copiedTable.getPartitionInfo().setIsInMemory(partitionId, partitionDesc.isInMemory()); + copiedTable.getPartitionInfo().setDataCacheInfo(partitionId, partitionDesc.getDataCacheInfo()); + + Partition partition = + createPartition(db, copiedTable, partitionId, partitionName, version, tabletIdSet, warehouseId); + + partitionList.add(Pair.create(partition, partitionDesc)); + tabletIdSetForAll.addAll(tabletIdSet); + partitionNameToTabletSet.put(partitionName, tabletIdSet); + } + return partitionList; + } + + private void checkIfMetaChange(OlapTable olapTable, OlapTable copiedTable, String tableName) throws DdlException { + // rollup index may be added or dropped during add partition operation. + // schema may be changed during add partition operation. + boolean metaChanged = false; + if (olapTable.getIndexNameToId().size() != copiedTable.getIndexNameToId().size()) { + metaChanged = true; + } else { + // compare schemaHash + for (Map.Entry entry : olapTable.getIndexIdToMeta().entrySet()) { + long indexId = entry.getKey(); + if (!copiedTable.getIndexIdToMeta().containsKey(indexId)) { + metaChanged = true; + break; + } + if (copiedTable.getIndexIdToMeta().get(indexId).getSchemaHash() != + entry.getValue().getSchemaHash()) { + metaChanged = true; + break; + } + } + } + + if (olapTable.getDefaultDistributionInfo().getType() != + copiedTable.getDefaultDistributionInfo().getType()) { + metaChanged = true; + } + + if (metaChanged) { + throw new DdlException("Table[" + tableName + "]'s meta has been changed. try again."); + } + } + + private void updatePartitionInfo(PartitionInfo partitionInfo, List> partitionList, + Set existPartitionNameSet, boolean isTempPartition, + OlapTable olapTable) + throws DdlException { + if (partitionInfo instanceof RangePartitionInfo) { + RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo; + rangePartitionInfo.handleNewRangePartitionDescs(olapTable.getIdToColumn(), + partitionList, existPartitionNameSet, isTempPartition); + } else if (partitionInfo instanceof ListPartitionInfo) { + ListPartitionInfo listPartitionInfo = (ListPartitionInfo) partitionInfo; + listPartitionInfo.handleNewListPartitionDescs(olapTable.getIdToColumn(), + partitionList, existPartitionNameSet, isTempPartition); + } else { + throw new DdlException("Only support adding partition to range/list partitioned table"); + } + + if (isTempPartition) { + for (Pair entry : partitionList) { + Partition partition = entry.first; + if (!existPartitionNameSet.contains(partition.getName())) { + olapTable.addTempPartition(partition); + } + } + } else { + for (Pair entry : partitionList) { + Partition partition = entry.first; + if (!existPartitionNameSet.contains(partition.getName())) { + olapTable.addPartition(partition); + } + } + } + } + + @Override + public void renamePartition(Database db, Table table, PartitionRenameClause renameClause) throws DdlException { + OlapTable olapTable = (OlapTable) table; + if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) { + throw new DdlException("Table[" + olapTable.getName() + "] is under " + olapTable.getState()); + } + + if (!olapTable.getPartitionInfo().isRangePartition()) { + throw new DdlException("Table[" + olapTable.getName() + "] is single partitioned. " + + "no need to rename partition name."); + } + + String partitionName = renameClause.getPartitionName(); + String newPartitionName = renameClause.getNewPartitionName(); + if (partitionName.equalsIgnoreCase(newPartitionName)) { + throw new DdlException("Same partition name"); + } + + Partition partition = olapTable.getPartition(partitionName); + if (partition == null) { + throw new DdlException("Partition[" + partitionName + "] does not exists"); + } + + // check if name is already used + if (olapTable.checkPartitionNameExist(newPartitionName)) { + throw new DdlException("Partition name[" + newPartitionName + "] is already used"); + } + + olapTable.renamePartition(partitionName, newPartitionName); + + // log + TableInfo tableInfo = TableInfo.createForPartitionRename(db.getId(), olapTable.getId(), partition.getId(), + newPartitionName); + GlobalStateMgr.getCurrentState().getLocalMetastore().renamePartition(tableInfo); + LOG.info("rename partition[{}] to {}", partitionName, newPartitionName); + } + + /* + * The entry of replacing partitions with temp partitions. + */ + public void replaceTempPartition(Database db, String tableName, ReplacePartitionClause clause) throws DdlException { + List partitionNames = clause.getPartitionNames(); + // duplicate temp partition will cause Incomplete transaction + List tempPartitionNames = + clause.getTempPartitionNames().stream().distinct().collect(Collectors.toList()); + + boolean isStrictRange = clause.isStrictRange(); + boolean useTempPartitionName = clause.useTempPartitionName(); + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + Table table = getTable(db.getFullName(), tableName); + if (table == null) { + ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName); + } + + if (!table.isOlapOrCloudNativeTable()) { + throw new DdlException("Table[" + tableName + "] is not OLAP table or LAKE table"); + } + + OlapTable olapTable = (OlapTable) table; + // check partition exist + for (String partName : partitionNames) { + if (!olapTable.checkPartitionNameExist(partName, false)) { + throw new DdlException("Partition[" + partName + "] does not exist"); + } + } + for (String partName : tempPartitionNames) { + if (!olapTable.checkPartitionNameExist(partName, true)) { + throw new DdlException("Temp partition[" + partName + "] does not exist"); + } + } + + partitionNames.stream().forEach(e -> + GlobalStateMgr.getCurrentState().getAnalyzeMgr().recordDropPartition(olapTable.getPartition(e).getId())); + olapTable.replaceTempPartitions(partitionNames, tempPartitionNames, isStrictRange, useTempPartitionName); + + // write log + ReplacePartitionOperationLog info = new ReplacePartitionOperationLog(db.getId(), olapTable.getId(), + partitionNames, tempPartitionNames, isStrictRange, useTempPartitionName); + GlobalStateMgr.getCurrentState().getLocalMetastore().replaceTempPartition(info); + LOG.info("finished to replace partitions {} with temp partitions {} from table: {}", + clause.getPartitionNames(), clause.getTempPartitionNames(), tableName); + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); + } + } + + public void setPartitionVersion(AdminSetPartitionVersionStmt stmt) { + Database database = getDb(stmt.getTableName().getDb()); + if (database == null) { + throw ErrorReportException.report(ErrorCode.ERR_BAD_DB_ERROR, stmt.getTableName().getDb()); + } + Locker locker = new Locker(); + locker.lockDatabase(database.getId(), LockType.WRITE); + try { + Table table = getTable(database.getFullName(), stmt.getTableName().getTbl()); + if (table == null) { + throw ErrorReportException.report(ErrorCode.ERR_BAD_TABLE_ERROR, stmt.getTableName().getTbl()); + } + if (!table.isOlapTableOrMaterializedView()) { + throw ErrorReportException.report(ErrorCode.ERR_NOT_OLAP_TABLE, stmt.getTableName().getTbl()); + } + + PhysicalPartition physicalPartition; + OlapTable olapTable = (OlapTable) table; + if (stmt.getPartitionId() != -1) { + physicalPartition = olapTable.getPhysicalPartition(stmt.getPartitionId()); + if (physicalPartition == null) { + throw ErrorReportException.report(ErrorCode.ERR_NO_SUCH_PARTITION, stmt.getPartitionName()); + } + } else { + Partition partition = olapTable.getPartition(stmt.getPartitionName()); + if (partition == null) { + throw ErrorReportException.report(ErrorCode.ERR_NO_SUCH_PARTITION, stmt.getPartitionName()); + } + + physicalPartition = partition.getDefaultPhysicalPartition(); + if (partition.getSubPartitions().size() >= 2) { + throw ErrorReportException.report(ErrorCode.ERR_MULTI_SUB_PARTITION, stmt.getPartitionName()); + } + } + + long visibleVersionTime = System.currentTimeMillis(); + physicalPartition.setVisibleVersion(stmt.getVersion(), visibleVersionTime); + physicalPartition.setNextVersion(stmt.getVersion() + 1); + + PartitionVersionRecoveryInfo.PartitionVersion partitionVersion = + new PartitionVersionRecoveryInfo.PartitionVersion(database.getId(), table.getId(), + physicalPartition.getId(), stmt.getVersion()); + for (MaterializedIndex index : physicalPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { + for (Tablet tablet : index.getTablets()) { + if (!(tablet instanceof LocalTablet)) { + continue; + } + + LocalTablet localTablet = (LocalTablet) tablet; + for (Replica replica : localTablet.getAllReplicas()) { + if (replica.getVersion() > stmt.getVersion() && localTablet.getAllReplicas().size() > 1) { + replica.setBad(true); + LOG.warn("set tablet: {} on backend: {} to bad, " + + "because its version: {} is higher than partition visible version: {}", + tablet.getId(), replica.getBackendId(), replica.getVersion(), stmt.getVersion()); + } + } + } + } + GlobalStateMgr.getCurrentState().getLocalMetastore().setPartitionVersion( + new PartitionVersionRecoveryInfo(Lists.newArrayList(partitionVersion), visibleVersionTime)); + LOG.info("Successfully set partition: {} version to {}, table: {}, db: {}", + stmt.getPartitionName(), stmt.getVersion(), table.getName(), database.getFullName()); + } finally { + locker.unLockDatabase(database.getId(), LockType.WRITE); + } + } + + public Partition getPartition(String dbName, String tblName, String partitionName) { + return null; + } + + public Partition getPartition(Database db, OlapTable olapTable, Long partitionId) { + return olapTable.getPartition(partitionId); + } + + public List getAllPartitions(Database db, OlapTable olapTable) { + return new ArrayList<>(olapTable.getAllPartitions()); + } + + public void addSubPartitions(Database db, OlapTable table, Partition partition, + int numSubPartition, long warehouseId) throws DdlException { + try { + table.setAutomaticBucketing(true); + addSubPartitions(db, table, partition, numSubPartition, null, warehouseId); + } finally { + table.setAutomaticBucketing(false); + } + } + + private void addSubPartitions(Database db, OlapTable table, Partition partition, + int numSubPartition, String[] subPartitionNames, long warehouseId) throws DdlException { + OlapTable olapTable; + OlapTable copiedTable; + + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.READ); + try { + olapTable = checkTable(db, table.getId()); + + if (partition.getDistributionInfo().getType() != DistributionInfo.DistributionInfoType.RANDOM) { + throw new DdlException("Only support adding physical partition to random distributed table"); + } + + copiedTable = getShadowCopyTable(olapTable); + } finally { + locker.unLockDatabase(db.getId(), LockType.READ); + } + + Preconditions.checkNotNull(olapTable); + Preconditions.checkNotNull(copiedTable); + + List subPartitions = new ArrayList<>(); + // create physical partition + for (int i = 0; i < numSubPartition; i++) { + String name = subPartitionNames != null && subPartitionNames.length > i ? subPartitionNames[i] : null; + PhysicalPartition subPartition = createPhysicalPartition(name, db, copiedTable, partition, warehouseId); + subPartitions.add(subPartition); + } + + // build partitions + buildPartitions(db, copiedTable, subPartitions, warehouseId); + + // check again + if (!locker.lockDatabaseAndCheckExist(db, LockType.WRITE)) { + throw new DdlException("db " + db.getFullName() + + "(" + db.getId() + ") has been dropped"); + } + try { + // check if meta changed + checkIfMetaChange(olapTable, copiedTable, table.getName()); + + for (PhysicalPartition subPartition : subPartitions) { + // add sub partition + GlobalStateMgr.getCurrentState().getLocalMetastore().addPhysicalPartition(partition, subPartition); + olapTable.addPhysicalPartition(subPartition); + } + + olapTable.setShardGroupChanged(true); + + // add partition log + List partitionInfoV2List = Lists.newArrayList(); + for (PhysicalPartition subPartition : subPartitions) { + PhysicalPartitionPersistInfoV2 info = new PhysicalPartitionPersistInfoV2( + db.getId(), olapTable.getId(), partition.getId(), subPartition); + partitionInfoV2List.add(info); + } + AddSubPartitionsInfoV2 infos = new AddSubPartitionsInfoV2(partitionInfoV2List); + GlobalStateMgr.getCurrentState().getLocalMetastore().addSubPartitionLog(infos); + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); + } + } + + private PhysicalPartition createPhysicalPartition(String name, Database db, OlapTable olapTable, + Partition partition, long warehouseId) throws DdlException { + long partitionId = partition.getId(); + DistributionInfo distributionInfo = olapTable.getDefaultDistributionInfo().copy(); + olapTable.inferDistribution(distributionInfo); + // create sub partition + Map indexMap = new HashMap<>(); + for (long indexId : olapTable.getIndexIdToMeta().keySet()) { + MaterializedIndex rollup = new MaterializedIndex(indexId, MaterializedIndex.IndexState.NORMAL); + indexMap.put(indexId, rollup); + } + + Long id = GlobalStateMgr.getCurrentState().getNextId(); + // physical partitions in the same logical partition use the same shard_group_id, + // so that the shards of this logical partition are more evenly distributed. + long shardGroupId = partition.getDefaultPhysicalPartition().getShardGroupId(); + + if (name == null) { + name = partition.generatePhysicalPartitionName(id); + } + PhysicalPartition physicalParition = new PhysicalPartition( + id, name, partition.getId(), shardGroupId, indexMap.get(olapTable.getBaseIndexId())); + + PartitionInfo partitionInfo = olapTable.getPartitionInfo(); + short replicationNum = partitionInfo.getReplicationNum(partitionId); + TStorageMedium storageMedium = partitionInfo.getDataProperty(partitionId).getStorageMedium(); + for (Map.Entry entry : indexMap.entrySet()) { + long indexId = entry.getKey(); + MaterializedIndex index = entry.getValue(); + MaterializedIndexMeta indexMeta = olapTable.getIndexIdToMeta().get(indexId); + Set tabletIdSet = new HashSet<>(); + + // create tablets + TabletMeta tabletMeta = + new TabletMeta(db.getId(), olapTable.getId(), id, indexId, indexMeta.getSchemaHash(), + storageMedium, olapTable.isCloudNativeTableOrMaterializedView()); + + if (olapTable.isCloudNativeTableOrMaterializedView()) { + GlobalStateMgr.getCurrentState().getTabletManager().createLakeTablets( + olapTable, id, shardGroupId, index, distributionInfo, + tabletMeta, tabletIdSet, warehouseId); + } else { + GlobalStateMgr.getCurrentState().getTabletManager().createOlapTablets( + olapTable, index, Replica.ReplicaState.NORMAL, distributionInfo, + physicalParition.getVisibleVersion(), replicationNum, tabletMeta, tabletIdSet); + } + if (index.getId() != olapTable.getBaseIndexId()) { + // add rollup index to partition + physicalParition.createRollupIndex(index); + } + } + + return physicalParition; + } + + public void buildPartitions(Database db, OlapTable table, List partitions, long warehouseId) + throws DdlException { + if (partitions.isEmpty()) { + return; + } + int numAliveNodes = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getAliveBackendNumber(); + + if (RunMode.isSharedDataMode()) { + numAliveNodes = 0; + List computeNodeIds = GlobalStateMgr.getCurrentState().getWarehouseMgr().getAllComputeNodeIds(warehouseId); + for (long nodeId : computeNodeIds) { + if (GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getBackendOrComputeNode(nodeId).isAlive()) { + ++numAliveNodes; + } + } + } + if (numAliveNodes == 0) { + if (RunMode.isSharedDataMode()) { + throw new DdlException("no alive compute nodes"); + } else { + throw new DdlException("no alive backends"); + } + } + + int numReplicas = 0; + for (PhysicalPartition partition : partitions) { + numReplicas += partition.storageReplicaCount(); + } + + try { + GlobalStateMgr.getCurrentState().getConsistencyChecker().addCreatingTableId(table.getId()); + if (numReplicas > Config.create_table_max_serial_replicas) { + LOG.info("start to build {} partitions concurrently for table {}.{} with {} replicas", + partitions.size(), db.getFullName(), table.getName(), numReplicas); + TabletTaskExecutor.buildPartitionsConcurrently( + db.getId(), table, partitions, numReplicas, numAliveNodes, warehouseId); + } else { + LOG.info("start to build {} partitions sequentially for table {}.{} with {} replicas", + partitions.size(), db.getFullName(), table.getName(), numReplicas); + TabletTaskExecutor.buildPartitionsSequentially( + db.getId(), table, partitions, numReplicas, numAliveNodes, warehouseId); + } + } finally { + GlobalStateMgr.getCurrentState().getConsistencyChecker().deleteCreatingTableId(table.getId()); + } + } + + // create new partitions from source partitions. + // new partitions have the same indexes as source partitions. + public List createTempPartitionsFromPartitions(Database db, Table table, + String namePostfix, List sourcePartitionIds, + List tmpPartitionIds, DistributionDesc distributionDesc, + long warehouseId) { + Preconditions.checkState(table instanceof OlapTable); + OlapTable olapTable = (OlapTable) table; + Map origPartitions = Maps.newHashMap(); + OlapTable copiedTbl = getCopiedTable(db, olapTable, sourcePartitionIds, origPartitions, distributionDesc != null); + copiedTbl.setDefaultDistributionInfo(olapTable.getDefaultDistributionInfo()); + + // 2. use the copied table to create partitions + List newPartitions = null; + // tabletIdSet to save all newly created tablet ids. + Set tabletIdSet = Sets.newHashSet(); + try { + newPartitions = getNewPartitionsFromPartitions(db, olapTable, sourcePartitionIds, origPartitions, + copiedTbl, namePostfix, tabletIdSet, tmpPartitionIds, distributionDesc, warehouseId); + buildPartitions(db, copiedTbl, newPartitions.stream().map(Partition::getSubPartitions) + .flatMap(p -> p.stream()).collect(Collectors.toList()), warehouseId); + } catch (Exception e) { + // create partition failed, remove all newly created tablets + for (Long tabletId : tabletIdSet) { + GlobalStateMgr.getCurrentState().getTabletInvertedIndex().deleteTablet(tabletId); + } + LOG.warn("create partitions from partitions failed.", e); + throw new RuntimeException("create partitions failed: " + e.getMessage(), e); + } + return newPartitions; + } + + @VisibleForTesting + public List getNewPartitionsFromPartitions(Database db, OlapTable olapTable, + List sourcePartitionIds, + Map origPartitions, OlapTable copiedTbl, + String namePostfix, Set tabletIdSet, + List tmpPartitionIds, DistributionDesc distributionDesc, + long warehouseId) + throws DdlException { + List newPartitions = Lists.newArrayListWithCapacity(sourcePartitionIds.size()); + for (int i = 0; i < sourcePartitionIds.size(); ++i) { + long newPartitionId = tmpPartitionIds.get(i); + long sourcePartitionId = sourcePartitionIds.get(i); + String newPartitionName = origPartitions.get(sourcePartitionId) + namePostfix; + if (olapTable.checkPartitionNameExist(newPartitionName, true)) { + // to prevent creating the same partitions when failover + // this will happen when OverwriteJob crashed after created temp partitions, + // but before changing to PREPARED state + LOG.warn("partition:{} already exists in table:{}", newPartitionName, olapTable.getName()); + continue; + } + PartitionInfo partitionInfo = copiedTbl.getPartitionInfo(); + partitionInfo.setTabletType(newPartitionId, partitionInfo.getTabletType(sourcePartitionId)); + partitionInfo.setIsInMemory(newPartitionId, partitionInfo.getIsInMemory(sourcePartitionId)); + partitionInfo.setReplicationNum(newPartitionId, partitionInfo.getReplicationNum(sourcePartitionId)); + partitionInfo.setDataProperty(newPartitionId, partitionInfo.getDataProperty(sourcePartitionId)); + if (copiedTbl.isCloudNativeTableOrMaterializedView()) { + partitionInfo.setDataCacheInfo(newPartitionId, partitionInfo.getDataCacheInfo(sourcePartitionId)); + } + + Partition newPartition = null; + if (distributionDesc != null) { + DistributionInfo distributionInfo = distributionDesc.toDistributionInfo(olapTable.getColumns()); + if (distributionInfo.getBucketNum() == 0) { + Partition sourcePartition = olapTable.getPartition(sourcePartitionId); + olapTable.optimizeDistribution(distributionInfo, sourcePartition); + } + newPartition = createPartition( + db, copiedTbl, newPartitionId, newPartitionName, null, tabletIdSet, distributionInfo, warehouseId); + } else { + newPartition = createPartition(db, copiedTbl, newPartitionId, newPartitionName, null, tabletIdSet, warehouseId); + } + + newPartitions.add(newPartition); + } + return newPartitions; + } + + public Partition createPartition(Database db, OlapTable table, long partitionId, String partitionName, + Long version, Set tabletIdSet, long warehouseId) throws DdlException { + DistributionInfo distributionInfo = table.getDefaultDistributionInfo().copy(); + table.inferDistribution(distributionInfo); + + return createPartition(db, table, partitionId, partitionName, version, tabletIdSet, distributionInfo, warehouseId); + } + + public Partition createPartition(Database db, OlapTable table, long partitionId, String partitionName, + Long version, Set tabletIdSet, DistributionInfo distributionInfo, + long warehouseId) throws DdlException { + PartitionInfo partitionInfo = table.getPartitionInfo(); + Map indexMap = new HashMap<>(); + for (long indexId : table.getIndexIdToMeta().keySet()) { + MaterializedIndex rollup = new MaterializedIndex(indexId, MaterializedIndex.IndexState.NORMAL); + indexMap.put(indexId, rollup); + } + + // create shard group + long shardGroupId = 0; + if (table.isCloudNativeTableOrMaterializedView()) { + shardGroupId = GlobalStateMgr.getCurrentState().getStarOSAgent(). + createShardGroup(db.getId(), table.getId(), partitionId); + } + + Partition logicalPartition = new Partition( + partitionId, + partitionName, + distributionInfo); + + PhysicalPartition physicalPartition = new PhysicalPartition( + partitionId, + partitionName, + partitionId, + shardGroupId, + indexMap.get(table.getBaseIndexId())); + + logicalPartition.addSubPartition(physicalPartition); + + //LogicalPartition partition = new LogicalPartition(partitionId, partitionName, indexMap.get(table.getBaseIndexId()), distributionInfo, shardGroupId); + // version + if (version != null) { + physicalPartition.updateVisibleVersion(version); + } + + short replicationNum = partitionInfo.getReplicationNum(partitionId); + TStorageMedium storageMedium = partitionInfo.getDataProperty(partitionId).getStorageMedium(); + for (Map.Entry entry : indexMap.entrySet()) { + long indexId = entry.getKey(); + MaterializedIndex index = entry.getValue(); + MaterializedIndexMeta indexMeta = table.getIndexIdToMeta().get(indexId); + + // create tablets + TabletMeta tabletMeta = + new TabletMeta(db.getId(), table.getId(), partitionId, indexId, indexMeta.getSchemaHash(), + storageMedium, table.isCloudNativeTableOrMaterializedView()); + + if (table.isCloudNativeTableOrMaterializedView()) { + GlobalStateMgr.getCurrentState().getTabletManager(). + createLakeTablets(table, partitionId, shardGroupId, index, distributionInfo, + tabletMeta, tabletIdSet, warehouseId); + } else { + GlobalStateMgr.getCurrentState().getTabletManager(). + createOlapTablets(table, index, Replica.ReplicaState.NORMAL, distributionInfo, + physicalPartition.getVisibleVersion(), replicationNum, tabletMeta, tabletIdSet); + } + if (index.getId() != table.getBaseIndexId()) { + // add rollup index to partition + physicalPartition.createRollupIndex(index); + } + } + return logicalPartition; + } + + public Database getDbIncludeRecycleBin(long dbId) { + Database db = getDb(dbId); + if (db == null) { + db = GlobalStateMgr.getCurrentState().getRecycleBin().getDatabase(dbId); + } + return db; + } + + public Table getTableIncludeRecycleBin(Database db, long tableId) { + Table table = getTable(db.getId(), tableId); + if (table == null) { + table = GlobalStateMgr.getCurrentState().getRecycleBin().getTable(db.getId(), tableId); + } + return table; + } + + public List
getTablesIncludeRecycleBin(Database db) { + List
tables = db.getTables(); + tables.addAll(GlobalStateMgr.getCurrentState().getRecycleBin().getTables(db.getId())); + return tables; + } + + public Partition getPartitionIncludeRecycleBin(OlapTable table, long partitionId) { + Partition partition = table.getPartition(partitionId); + if (partition == null) { + partition = GlobalStateMgr.getCurrentState().getRecycleBin().getPartition(partitionId); + } + return partition; + } + + public PhysicalPartition getPhysicalPartitionIncludeRecycleBin(OlapTable table, long physicalPartitionId) { + PhysicalPartition partition = table.getPhysicalPartition(physicalPartitionId); + if (partition == null) { + partition = GlobalStateMgr.getCurrentState().getRecycleBin().getPhysicalPartition(physicalPartitionId); + } + return partition; + } + + public Collection getPartitionsIncludeRecycleBin(OlapTable table) { + Collection partitions = new ArrayList<>(table.getPartitions()); + partitions.addAll(GlobalStateMgr.getCurrentState().getRecycleBin().getPartitions(table.getId())); + return partitions; + } + + public Collection getAllPartitionsIncludeRecycleBin(OlapTable table) { + Collection partitions = table.getAllPartitions(); + partitions.addAll(GlobalStateMgr.getCurrentState().getRecycleBin().getPartitions(table.getId())); + return partitions; + } + + // NOTE: result can be null, cause partition erase is not in db lock + public DataProperty getDataPropertyIncludeRecycleBin(PartitionInfo info, long partitionId) { + DataProperty dataProperty = info.getDataProperty(partitionId); + if (dataProperty == null) { + dataProperty = GlobalStateMgr.getCurrentState().getRecycleBin().getPartitionDataProperty(partitionId); + } + return dataProperty; + } + + // NOTE: result can be -1, cause partition erase is not in db lock + public short getReplicationNumIncludeRecycleBin(PartitionInfo info, long partitionId) { + short replicaNum = info.getReplicationNum(partitionId); + if (replicaNum == (short) -1) { + replicaNum = GlobalStateMgr.getCurrentState().getRecycleBin().getPartitionReplicationNum(partitionId); + } + return replicaNum; + } + + public List getDbIdsIncludeRecycleBin() { + List dbIds = getDbIds(); + dbIds.addAll(GlobalStateMgr.getCurrentState().getRecycleBin().getAllDbIds()); + return dbIds; + } + + @Override + public Pair getMaterializedViewIndex(String dbName, String indexName) { + Database database = getDb(dbName); + if (database == null) { + return null; + } + return database.getMaterializedViewIndex(indexName); + } + + @VisibleForTesting + public OlapTable getCopiedTable(Database db, OlapTable olapTable, List sourcePartitionIds, + Map origPartitions, boolean isOptimize) { + OlapTable copiedTbl; + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.READ); + try { + if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) { + if (!isOptimize || olapTable.getState() != OlapTable.OlapTableState.SCHEMA_CHANGE) { + throw new RuntimeException("Table' state is not NORMAL: " + olapTable.getState() + + ", tableId:" + olapTable.getId() + ", tabletName:" + olapTable.getName()); + } + } + for (Long id : sourcePartitionIds) { + origPartitions.put(id, olapTable.getPartition(id).getName()); + } + copiedTbl = getShadowCopyTable(olapTable); + } finally { + locker.unLockDatabase(db.getId(), LockType.READ); + } + return copiedTbl; + } + + @VisibleForTesting + public OlapTable getCopiedTable(Database db, OlapTable olapTable, List sourcePartitionIds, + Map origPartitions) { + return getCopiedTable(db, olapTable, sourcePartitionIds, origPartitions, false); + } + + private OlapTable getShadowCopyTable(OlapTable olapTable) { + OlapTable copiedTable; + if (olapTable instanceof LakeMaterializedView) { + copiedTable = new LakeMaterializedView(); + } else if (olapTable instanceof MaterializedView) { + copiedTable = new MaterializedView(); + } else if (olapTable instanceof LakeTable) { + copiedTable = new LakeTable(); + } else { + copiedTable = new OlapTable(); + } + + olapTable.copyOnlyForQuery(copiedTable); + return copiedTable; + } + + /* + * generate and check columns' order and key's existence + */ + public void validateColumns(List columns) throws DdlException { + if (columns.isEmpty()) { + ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_MUST_HAVE_COLUMNS); + } + + boolean encounterValue = false; + boolean hasKey = false; + for (Column column : columns) { + if (column.isKey()) { + if (encounterValue) { + ErrorReport.reportDdlException(ErrorCode.ERR_OLAP_KEY_MUST_BEFORE_VALUE); + } + hasKey = true; + } else { + encounterValue = true; + } + } + + if (!hasKey) { + ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_MUST_HAVE_KEYS); + } + } + + public void setLakeStorageInfo(Database db, OlapTable table, String storageVolumeId, Map properties) + throws DdlException { + DataCacheInfo dataCacheInfo = null; + try { + dataCacheInfo = PropertyAnalyzer.analyzeDataCacheInfo(properties); + } catch (AnalysisException e) { + throw new DdlException(e.getMessage()); + } + + // get service shard storage info from StarMgr + FilePathInfo pathInfo = !storageVolumeId.isEmpty() ? + GlobalStateMgr.getCurrentState().getStarOSAgent().allocateFilePath(storageVolumeId, db.getId(), table.getId()) : + GlobalStateMgr.getCurrentState().getStarOSAgent().allocateFilePath(db.getId(), table.getId()); + table.setStorageInfo(pathInfo, dataCacheInfo); + } + + public static PartitionInfo buildPartitionInfo(CreateMaterializedViewStatement stmt) throws DdlException { + ExpressionPartitionDesc expressionPartitionDesc = stmt.getPartitionExpDesc(); + if (expressionPartitionDesc != null) { + Expr expr = expressionPartitionDesc.getExpr(); + if (expr instanceof SlotRef) { + SlotRef slotRef = (SlotRef) expr; + if (slotRef.getType().getPrimitiveType() == PrimitiveType.VARCHAR) { + return new ListPartitionInfo(PartitionType.LIST, + Collections.singletonList(stmt.getPartitionColumn())); + } + } + if ((expr instanceof FunctionCallExpr)) { + FunctionCallExpr functionCallExpr = (FunctionCallExpr) expr; + if (functionCallExpr.getFnName().getFunction().equalsIgnoreCase(FunctionSet.STR2DATE)) { + Column partitionColumn = new Column(stmt.getPartitionColumn()); + partitionColumn.setType(com.starrocks.catalog.Type.DATE); + return expressionPartitionDesc.toPartitionInfo( + Collections.singletonList(partitionColumn), + Maps.newHashMap(), false); + } + } + return expressionPartitionDesc.toPartitionInfo( + Collections.singletonList(stmt.getPartitionColumn()), + Maps.newHashMap(), false); + } else { + return new SinglePartitionInfo(); + } + } + + public static void inactiveRelatedMaterializedView(Database db, Table olapTable, String reason) { + for (MvId mvId : olapTable.getRelatedMaterializedViews()) { + MaterializedView mv = (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() + .getTable(db.getId(), mvId.getId()); + if (mv != null) { + LOG.warn("Inactive MV {}/{} because {}", mv.getName(), mv.getId(), reason); + mv.setInactiveAndReason(reason); + + // recursive inactive + inactiveRelatedMaterializedView(db, mv, + MaterializedViewExceptions.inactiveReasonForBaseTableActive(mv.getName())); + } else { + LOG.info("Ignore materialized view {} does not exists", mvId); + } + } + } + + public void onErasePartition(Partition partition) { + // remove tablet in inverted index + TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); + for (PhysicalPartition physicalPartition : partition.getSubPartitions()) { + for (MaterializedIndex index : physicalPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + for (Tablet tablet : index.getTablets()) { + long tabletId = tablet.getId(); + invertedIndex.deleteTablet(tabletId); + } + } + } + } + + // Set specified replica's status. If replica does not exist, just ignore it. + public void setReplicaStatus(AdminSetReplicaStatusStmt stmt) { + long tabletId = stmt.getTabletId(); + long backendId = stmt.getBackendId(); + Replica.ReplicaStatus status = stmt.getStatus(); + + TabletInvertedIndex tabletInvertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); + TabletMeta meta = tabletInvertedIndex.getTabletMeta(tabletId); + if (meta == null) { + LOG.info("tablet {} does not exist", tabletId); + return; + } + long dbId = meta.getDbId(); + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); + if (db == null) { + LOG.info("database {} of tablet {} does not exist", dbId, tabletId); + return; + } + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + Replica replica = tabletInvertedIndex.getReplica(tabletId, backendId); + if (replica == null) { + LOG.info("replica of tablet {} does not exist", tabletId); + return; + } + if (status == Replica.ReplicaStatus.BAD || status == Replica.ReplicaStatus.OK) { + if (replica.setBadForce(status == Replica.ReplicaStatus.BAD)) { + // Put this tablet into urgent table so that it can be repaired ASAP. + GlobalStateMgr.getCurrentState().getTabletChecker() + .setTabletForUrgentRepair(dbId, meta.getTableId(), meta.getPartitionId()); + SetReplicaStatusOperationLog log = + new SetReplicaStatusOperationLog(backendId, tabletId, status); + + GlobalStateMgr.getCurrentState().getLocalMetastore().setReplicaStatus(log); + LOG.info("set replica {} of tablet {} on backend {} as {}.", + replica.getId(), tabletId, backendId, status); + } + } + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); + } + } + + // entry of checking tablets operation + public void checkTablets(AdminCheckTabletsStmt stmt) { + AdminCheckTabletsStmt.CheckType type = stmt.getType(); + if (type == AdminCheckTabletsStmt.CheckType.CONSISTENCY) { + GlobalStateMgr.getCurrentState().getConsistencyChecker().addTabletsToCheck(stmt.getTabletIds()); + } + } +} diff --git a/fe/fe-core/src/main/java/com/starrocks/meta/TabletHierarchyId.java b/fe/fe-core/src/main/java/com/starrocks/meta/TabletHierarchyId.java new file mode 100644 index 00000000000000..02a7cac9d1f298 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/meta/TabletHierarchyId.java @@ -0,0 +1,33 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.starrocks.meta; + +public class TabletHierarchyId { + public final long dbId; + public final long tableId; + public final long partitionId; + public final long physicalPartitionId; + public final long materializedIndexId; + public final long tabletId; + + public TabletHierarchyId(long dbId, long tableId, long partitionId, long physicalPartitionId, long materializedIndexId, + long tabletId) { + this.dbId = dbId; + this.tableId = tableId; + this.partitionId = partitionId; + this.physicalPartitionId = physicalPartitionId; + this.materializedIndexId = materializedIndexId; + this.tabletId = tabletId; + } +} diff --git a/fe/fe-core/src/main/java/com/starrocks/meta/TabletInvertedIndexV2.java b/fe/fe-core/src/main/java/com/starrocks/meta/TabletInvertedIndexV2.java new file mode 100644 index 00000000000000..54ca2d16830a84 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/meta/TabletInvertedIndexV2.java @@ -0,0 +1,288 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.starrocks.meta; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.starrocks.catalog.Replica; +import com.starrocks.catalog.TabletInvertedIndex; +import com.starrocks.catalog.TabletMeta; +import com.starrocks.persist.gson.GsonUtils; +import com.starrocks.server.GlobalStateMgr; +import com.starrocks.thrift.TStorageMedium; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +public class TabletInvertedIndexV2 extends TabletInvertedIndex { + public void readLock() { + } + + public void readUnlock() { + } + + public void writeLock() { + } + + public void writeUnlock() { + } + + // tablet id -> tablet meta + // tabletMetaMap/tabletId -> tabletMeta + + // backendId id -> replica list + // backingReplicaMetaTable/backendId/tabletId -> replicaId + + // tablet id -> replica list + // replicaMetaTable/tabletId/replicaId -> Replica + + // replica id -> tablet id + // replicaToTabletMap/replicaId -> tabletId + + @Override + public Long getTabletIdByReplica(long replicaId) { + byte[] key = ByteCoder.encode(Lists.newArrayList("replicaToTabletMap", String.valueOf(replicaId))); + return MetadataHandler.getInstance().get(null, key, Long.class); + } + + @Override + public TabletMeta getTabletMeta(long tabletId) { + byte[] key = ByteCoder.encode(Lists.newArrayList("tabletMetaMap", String.valueOf(tabletId))); + String tabletMetaJson = MetadataHandler.getInstance().get(null, key, String.class); + return GsonUtils.GSON.fromJson(tabletMetaJson, TabletMeta.class); + } + + @Override + public List getTabletMetaList(List tabletIdList) { + List tabletMetaList = new ArrayList<>(tabletIdList.size()); + for (Long tabletId : tabletIdList) { + TabletMeta tabletMeta = getTabletMeta(tabletId); + tabletMetaList.add(Objects.requireNonNullElse(tabletMeta, NOT_EXIST_TABLET_META)); + } + return tabletMetaList; + } + + @Override + public Map getReplicaMetaWithBackend(Long backendId) { + byte[] key = ByteCoder.encode(Lists.newArrayList("backingReplicaMetaTable", String.valueOf(backendId))); + List replicaList = MetadataHandler.getInstance().getPrefix(null, key); + + Map replicaMap = new HashMap<>(); + for (byte[] bytes : replicaList) { + List values = ByteCoder.decode(bytes); + Long tabletId = Long.parseLong(values.get(3)); + Long replicaId = Long.parseLong(values.get(4)); + TabletMeta tabletMeta = getTabletMeta(tabletId); + + Replica replica = getReplicaByTabletMeta(tabletMeta, replicaId); + replicaMap.put(tabletId, replica); + } + + return replicaMap; + } + + @Override + public void addTablet(long tabletId, TabletMeta tabletMeta) { + if (GlobalStateMgr.isCheckpointThread()) { + return; + } + + byte[] key = ByteCoder.encode(Lists.newArrayList("tabletMetaMap", String.valueOf(tabletId))); + MetadataHandler.getInstance().put(null, new String(key), + GsonUtils.GSON.toJson(tabletMeta, TabletMeta.class)); + } + + + @Override + public void deleteTablet(long tabletId) { + if (GlobalStateMgr.isCheckpointThread()) { + return; + } + + TabletMeta tabletMeta = getTabletMeta(tabletId); + List replicas = getReplicaByTabletMeta(tabletMeta); + for (Replica replica : replicas) { + //TODO + } + + byte[] k1 = ByteCoder.encode(Lists.newArrayList("tabletMetaMap", String.valueOf(tabletId))); + MetadataHandler.getInstance().delete(null, k1); + } + + @Override + public void addReplica(long tabletId, Replica replica) { + if (GlobalStateMgr.isCheckpointThread()) { + return; + } + + byte[] k1 = ByteCoder.encode(Lists.newArrayList("backingReplicaMetaTable", + String.valueOf(replica.getBackendId()), String.valueOf(tabletId))); + MetadataHandler.getInstance().put(null, new String(k1), String.valueOf(replica.getId())); + + byte[] k3 = ByteCoder.encode(Lists.newArrayList("replicaToTabletMap", String.valueOf(replica.getId()))); + MetadataHandler.getInstance().put(null, new String(k3), String.valueOf(tabletId)); + } + + @Override + public void deleteReplica(long tabletId, long backendId) { + if (GlobalStateMgr.isCheckpointThread()) { + return; + } + + Long replica = getReplicaId(tabletId, backendId); + + byte[] k1 = ByteCoder.encode(Lists.newArrayList("backingReplicaMetaTable", + String.valueOf(backendId), String.valueOf(tabletId), String.valueOf(replica))); + MetadataHandler.getInstance().delete(null, k1); + + byte[] k3 = ByteCoder.encode(Lists.newArrayList("replicaToTabletMap", + String.valueOf(replica), String.valueOf(tabletId))); + MetadataHandler.getInstance().delete(null, k3); + } + + @Override + public Replica getReplica(long tabletId, long backendId) { + byte[] key = ByteCoder.encode(Lists.newArrayList("backingReplicaMetaTable", + String.valueOf(backendId), + String.valueOf(tabletId))); + List replicaList = MetadataHandler.getInstance().getPrefix(null, key); + + for (byte[] bytes : replicaList) { + List values = ByteCoder.decode(bytes); + Long replicaId = Long.parseLong(values.get(4)); + TabletMeta tabletMeta = getTabletMeta(tabletId); + + Replica replica = getReplicaByTabletMeta(tabletMeta, replicaId); + return replica; + } + + return null; + } + + public Long getReplicaId(long tabletId, long backendId) { + byte[] key = ByteCoder.encode(Lists.newArrayList("backingReplicaMetaTable", + String.valueOf(backendId), + String.valueOf(tabletId))); + List replicaList = MetadataHandler.getInstance().getPrefix(null, key); + + for (byte[] bytes : replicaList) { + List values = ByteCoder.decode(bytes); + Long replicaId = Long.parseLong(values.get(4)); + return replicaId; + } + + return null; + } + + private Replica getReplicaByTabletMeta(TabletMeta tabletMeta, long replicaId) { + return null; + } + + private List getReplicaByTabletMeta(TabletMeta tabletMeta) { + return null; + } + + @Override + public List getReplicasByTabletId(long tabletId) { + TabletMeta tabletMeta = getTabletMeta(tabletId); + return getReplicaByTabletMeta(tabletMeta); + } + + @Override + public List getReplicasOnBackendByTabletIds(List tabletIds, long backendId) { + + List replicas = new ArrayList<>(); + for (Long tabletId : tabletIds) { + Replica replica = getReplica(tabletId, backendId); + replicas.add(replica); + } + + return replicas; + } + + @Override + public List getTabletIdsByBackendId(long backendId) { + byte[] key = ByteCoder.encode(Lists.newArrayList("backingReplicaMetaTable", String.valueOf(backendId))); + List bytesList = MetadataHandler.getInstance().getPrefix(null, key); + + Set tabletIds = new HashSet<>(); + for (byte[] bytes : bytesList) { + List values = ByteCoder.decode(bytes); + Long tabletId = Long.valueOf(values.get(3)); + tabletIds.add(tabletId); + } + + return new ArrayList<>(tabletIds); + } + + @Override + public List getTabletIdsByBackendIdAndStorageMedium(long backendId, TStorageMedium storageMedium) { + List tabletIds = getTabletIdsByBackendId(backendId); + + List t = new ArrayList<>(); + for (Long tabletId : tabletIds) { + TabletMeta tabletMeta = getTabletMeta(tabletId); + if (tabletMeta.getStorageMedium() == storageMedium) { + t.add(tabletId); + } + } + + return t; + } + + @Override + public long getTabletNumByBackendId(long backendId) { + return getTabletIdsByBackendId(backendId).size(); + } + + @Override + public long getTabletNumByBackendIdAndPathHash(long backendId, long pathHash) { + Collection replicas = getReplicaMetaWithBackend(backendId).values(); + int count = 0; + for (Replica replica : replicas) { + if (replica.getPathHash() == pathHash) { + count++; + } + } + + return count; + } + + @Override + public Map getReplicaNumByBeIdAndStorageMedium(long backendId) { + Map replicaNumMap = Maps.newHashMap(); + long hddNum = 0; + long ssdNum = 0; + + List tabletIds = getTabletIdsByBackendId(backendId); + for (Long tabletId : tabletIds) { + TabletMeta tabletMeta = getTabletMeta(tabletId); + if (tabletMeta.getStorageMedium() == TStorageMedium.HDD) { + hddNum++; + } else { + ssdNum++; + } + } + + replicaNumMap.put(TStorageMedium.HDD, hddNum); + replicaNumMap.put(TStorageMedium.SSD, ssdNum); + return replicaNumMap; + } +} diff --git a/fe/fe-core/src/main/java/com/starrocks/meta/TabletManager.java b/fe/fe-core/src/main/java/com/starrocks/meta/TabletManager.java new file mode 100644 index 00000000000000..67fcf119d7b26f --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/meta/TabletManager.java @@ -0,0 +1,265 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.starrocks.meta; + +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.collect.Multimap; +import com.starrocks.catalog.ColocateTableIndex; +import com.starrocks.catalog.DistributionInfo; +import com.starrocks.catalog.LocalTablet; +import com.starrocks.catalog.MaterializedIndex; +import com.starrocks.catalog.OlapTable; +import com.starrocks.catalog.Replica; +import com.starrocks.catalog.Tablet; +import com.starrocks.catalog.TabletMeta; +import com.starrocks.common.Config; +import com.starrocks.common.DdlException; +import com.starrocks.common.ErrorCode; +import com.starrocks.common.ErrorReportException; +import com.starrocks.common.util.concurrent.CountingLatch; +import com.starrocks.lake.LakeTablet; +import com.starrocks.persist.ColocatePersistInfo; +import com.starrocks.server.GlobalStateMgr; +import com.starrocks.server.WarehouseManager; +import com.starrocks.system.SystemInfoService; +import com.starrocks.thrift.TStorageMedium; +import com.starrocks.warehouse.Warehouse; +import org.apache.commons.collections.CollectionUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +public class TabletManager { + private static final Logger LOG = LogManager.getLogger(TabletManager.class); + /** + * Concurrent colocate table creation process have dependency on each other + * (even in different databases), but we do not want to affect the performance + * of non-colocate table creation, so here we use a separate latch to + * synchronize only the creation of colocate tables. + */ + private final CountingLatch colocateTableCreateSyncer = new CountingLatch(0); + + public void createLakeTablets(OlapTable table, long partitionId, long shardGroupId, MaterializedIndex index, + DistributionInfo distributionInfo, TabletMeta tabletMeta, + Set tabletIdSet, long warehouseId) + throws DdlException { + Preconditions.checkArgument(table.isCloudNativeTableOrMaterializedView()); + + DistributionInfo.DistributionInfoType distributionInfoType = distributionInfo.getType(); + if (distributionInfoType != DistributionInfo.DistributionInfoType.HASH + && distributionInfoType != DistributionInfo.DistributionInfoType.RANDOM) { + throw new DdlException("Unknown distribution type: " + distributionInfoType); + } + + Map properties = new HashMap<>(); + properties.put(LakeTablet.PROPERTY_KEY_TABLE_ID, Long.toString(table.getId())); + properties.put(LakeTablet.PROPERTY_KEY_PARTITION_ID, Long.toString(partitionId)); + properties.put(LakeTablet.PROPERTY_KEY_INDEX_ID, Long.toString(index.getId())); + int bucketNum = distributionInfo.getBucketNum(); + WarehouseManager warehouseManager = GlobalStateMgr.getCurrentState().getWarehouseMgr(); + Optional workerGroupId = warehouseManager.selectWorkerGroupByWarehouseId(warehouseId); + if (workerGroupId.isEmpty()) { + Warehouse warehouse = warehouseManager.getWarehouse(warehouseId); + throw ErrorReportException.report(ErrorCode.ERR_NO_NODES_IN_WAREHOUSE, warehouse.getName()); + } + List shardIds = GlobalStateMgr.getCurrentState().getStarOSAgent().createShards(bucketNum, + table.getPartitionFilePathInfo(partitionId), table.getPartitionFileCacheInfo(partitionId), shardGroupId, + null, properties, workerGroupId.get()); + for (long shardId : shardIds) { + Tablet tablet = new LakeTablet(shardId); + index.addTablet(tablet, tabletMeta); + tabletIdSet.add(tablet.getId()); + } + } + + public void createOlapTablets(OlapTable table, MaterializedIndex index, Replica.ReplicaState replicaState, + DistributionInfo distributionInfo, long version, short replicationNum, + TabletMeta tabletMeta, Set tabletIdSet) throws DdlException { + Preconditions.checkArgument(replicationNum > 0); + + ColocateTableIndex colocateTableIndex = GlobalStateMgr.getCurrentState().getColocateTableIndex(); + + DistributionInfo.DistributionInfoType distributionInfoType = distributionInfo.getType(); + if (distributionInfoType != DistributionInfo.DistributionInfoType.HASH + && distributionInfoType != DistributionInfo.DistributionInfoType.RANDOM) { + throw new DdlException("Unknown distribution type: " + distributionInfoType); + } + + List> backendsPerBucketSeq = null; + ColocateTableIndex.GroupId groupId = null; + boolean initBucketSeqWithSameOrigNameGroup = false; + boolean isColocateTable = colocateTableIndex.isColocateTable(tabletMeta.getTableId()); + // chooseBackendsArbitrary is true, means this may be the first table of colocation group, + // or this is just a normal table, and we can choose backends arbitrary. + // otherwise, backends should be chosen from backendsPerBucketSeq; + boolean chooseBackendsArbitrary; + + // We should synchronize the creation of colocate tables, otherwise it can have concurrent issues. + // Considering the following situation, + // T1: P1 issues `create colocate table` and finds that there isn't a bucket sequence associated + // with the colocate group, so it will initialize the bucket sequence for the first time + // T2: P2 do the same thing as P1 + // T3: P1 set the bucket sequence for colocate group stored in `ColocateTableIndex` + // T4: P2 also set the bucket sequence, hence overwrite what P1 just wrote + // T5: After P1 creates the colocate table, the actual tablet distribution won't match the bucket sequence + // of the colocate group, and balancer will create a lot of COLOCATE_MISMATCH tasks which shouldn't exist. + if (isColocateTable) { + try { + // Optimization: wait first time, before global lock + colocateTableCreateSyncer.awaitZero(); + // Since we have supported colocate tables in different databases, + // we should use global lock, not db lock. + GlobalStateMgr.getCurrentState().tryLock(false); + try { + // Wait again, for safety + // We are in global lock, we should have timeout in case holding lock for too long + colocateTableCreateSyncer.awaitZero(Config.catalog_try_lock_timeout_ms, TimeUnit.MILLISECONDS); + // if this is a colocate table, try to get backend seqs from colocation index. + groupId = colocateTableIndex.getGroup(tabletMeta.getTableId()); + backendsPerBucketSeq = colocateTableIndex.getBackendsPerBucketSeq(groupId); + if (backendsPerBucketSeq.isEmpty()) { + List colocateWithGroupsInOtherDb = + colocateTableIndex.getColocateWithGroupsInOtherDb(groupId); + if (!colocateWithGroupsInOtherDb.isEmpty()) { + backendsPerBucketSeq = + colocateTableIndex.getBackendsPerBucketSeq(colocateWithGroupsInOtherDb.get(0)); + initBucketSeqWithSameOrigNameGroup = true; + } + } + chooseBackendsArbitrary = backendsPerBucketSeq == null || backendsPerBucketSeq.isEmpty(); + if (chooseBackendsArbitrary) { + colocateTableCreateSyncer.increment(); + } + } finally { + GlobalStateMgr.getCurrentState().unlock(); + } + } catch (InterruptedException e) { + LOG.warn("wait for concurrent colocate table creation finish failed, msg: {}", + e.getMessage(), e); + Thread.currentThread().interrupt(); + throw new DdlException("wait for concurrent colocate table creation finish failed", e); + } + } else { + chooseBackendsArbitrary = true; + } + + try { + if (chooseBackendsArbitrary) { + backendsPerBucketSeq = Lists.newArrayList(); + } + for (int i = 0; i < distributionInfo.getBucketNum(); ++i) { + // create a new tablet with random chosen backends + LocalTablet tablet = new LocalTablet(GlobalStateMgr.getCurrentState().getNextId()); + + // add tablet to inverted index first + index.addTablet(tablet, tabletMeta); + tabletIdSet.add(tablet.getId()); + + // get BackendIds + List chosenBackendIds; + if (chooseBackendsArbitrary) { + // This is the first colocate table in the group, or just a normal table, + // randomly choose backends + if (Config.enable_strict_storage_medium_check) { + chosenBackendIds = + chosenBackendIdBySeq(replicationNum, table.getLocation(), tabletMeta.getStorageMedium()); + } else { + try { + chosenBackendIds = chosenBackendIdBySeq(replicationNum, table.getLocation()); + } catch (DdlException ex) { + throw new DdlException(String.format("%s, table=%s, default_replication_num=%d", + ex.getMessage(), table.getName(), Config.default_replication_num)); + } + } + backendsPerBucketSeq.add(chosenBackendIds); + } else { + // get backends from existing backend sequence + chosenBackendIds = backendsPerBucketSeq.get(i); + } + + // create replicas + for (long backendId : chosenBackendIds) { + long replicaId = GlobalStateMgr.getCurrentState().getNextId(); + Replica replica = new Replica(replicaId, backendId, replicaState, version, + tabletMeta.getOldSchemaHash()); + tablet.addReplica(replica); + } + Preconditions.checkState(chosenBackendIds.size() == replicationNum, + chosenBackendIds.size() + " vs. " + replicationNum); + } + + // In the following two situations, we should set the bucket seq for colocate group and persist the info, + // 1. This is the first time we add a table to colocate group, and it doesn't have the same original name + // with colocate group in other database. + // 2. It's indeed the first time, but it should colocate with group in other db + // (because of having the same original name), we should use the bucket + // seq of other group to initialize our own. + if ((groupId != null && chooseBackendsArbitrary) || initBucketSeqWithSameOrigNameGroup) { + colocateTableIndex.addBackendsPerBucketSeq(groupId, backendsPerBucketSeq); + ColocatePersistInfo info = + ColocatePersistInfo.createForBackendsPerBucketSeq(groupId, backendsPerBucketSeq); + GlobalStateMgr.getCurrentState().getEditLog().logColocateBackendsPerBucketSeq(info); + } + } finally { + if (isColocateTable && chooseBackendsArbitrary) { + colocateTableCreateSyncer.decrement(); + } + } + } + + // create replicas for tablet with random chosen backends + private List chosenBackendIdBySeq(int replicationNum, Multimap locReq, + TStorageMedium storageMedium) + throws DdlException { + List chosenBackendIds = + GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getNodeSelector() + .seqChooseBackendIdsByStorageMedium(replicationNum, + true, true, locReq, storageMedium); + if (CollectionUtils.isEmpty(chosenBackendIds)) { + throw new DdlException( + "Failed to find enough hosts with storage medium " + storageMedium + + " at all backends, number of replicas needed: " + + replicationNum + ". Storage medium check failure can be forcefully ignored by executing " + + "'ADMIN SET FRONTEND CONFIG (\"enable_strict_storage_medium_check\" = \"false\");', " + + "but incompatible medium type can cause balance problem, so we strongly recommend" + + " creating table with compatible 'storage_medium' property set."); + } + return chosenBackendIds; + } + + private List chosenBackendIdBySeq(int replicationNum, Multimap locReq) throws DdlException { + SystemInfoService systemInfoService = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo(); + List chosenBackendIds = systemInfoService.getNodeSelector() + .seqChooseBackendIds(replicationNum, true, true, locReq); + if (!CollectionUtils.isEmpty(chosenBackendIds)) { + return chosenBackendIds; + } else if (replicationNum > 1) { + List backendIds = systemInfoService.getBackendIds(true); + throw new DdlException( + String.format("Table replication num should be less than or equal to the number of available BE nodes. " + + "You can change this default by setting the replication_num table properties. " + + "Current alive backend is [%s]. ", Joiner.on(",").join(backendIds))); + } else { + throw new DdlException("No alive nodes"); + } + } +} diff --git a/fe/fe-core/src/main/java/com/starrocks/meta/TxnMeta.java b/fe/fe-core/src/main/java/com/starrocks/meta/TxnMeta.java new file mode 100644 index 00000000000000..b03e6130601bf3 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/meta/TxnMeta.java @@ -0,0 +1,24 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.starrocks.meta; + +import com.sleepycat.je.Transaction; + +public class TxnMeta { + private Transaction transaction; + + public void setTransaction(Transaction transaction) { + this.transaction = transaction; + } +} diff --git a/fe/fe-core/src/main/java/com/starrocks/meta/VersionManager.java b/fe/fe-core/src/main/java/com/starrocks/meta/VersionManager.java new file mode 100644 index 00000000000000..c21c15850797f6 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/meta/VersionManager.java @@ -0,0 +1,22 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.starrocks.meta; + +public class VersionManager { + public void setDataVersion(long partitionId, long dataVersion) { + //OlapTable table; + //Partition partition = table.getPartition(partitionId); + //partition.setDataVersion(dataVersion); + } +} diff --git a/fe/fe-core/src/main/java/com/starrocks/persist/CreateDbInfo.java b/fe/fe-core/src/main/java/com/starrocks/persist/CreateDbInfo.java index ae0cb774b1d30a..c2bb4ca6c042fd 100644 --- a/fe/fe-core/src/main/java/com/starrocks/persist/CreateDbInfo.java +++ b/fe/fe-core/src/main/java/com/starrocks/persist/CreateDbInfo.java @@ -17,13 +17,14 @@ import com.google.gson.annotations.SerializedName; import com.starrocks.common.io.Text; import com.starrocks.common.io.Writable; +import com.starrocks.meta.TxnMeta; import com.starrocks.persist.gson.GsonUtils; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; -public class CreateDbInfo implements Writable { +public class CreateDbInfo extends TxnMeta implements Writable { @SerializedName(value = "i") private long id; diff --git a/fe/fe-core/src/main/java/com/starrocks/persist/DatabaseInfo.java b/fe/fe-core/src/main/java/com/starrocks/persist/DatabaseInfo.java index 7ab02e6d9359eb..1fd38a4b173ad3 100644 --- a/fe/fe-core/src/main/java/com/starrocks/persist/DatabaseInfo.java +++ b/fe/fe-core/src/main/java/com/starrocks/persist/DatabaseInfo.java @@ -35,16 +35,18 @@ package com.starrocks.persist; import com.google.gson.annotations.SerializedName; +import com.sleepycat.je.Transaction; import com.starrocks.cluster.ClusterNamespace; import com.starrocks.common.io.Text; import com.starrocks.common.io.Writable; +import com.starrocks.meta.TxnMeta; import com.starrocks.sql.ast.AlterDatabaseQuotaStmt.QuotaType; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; -public class DatabaseInfo implements Writable { +public class DatabaseInfo extends TxnMeta implements Writable { @SerializedName("db") private String dbName; @@ -130,4 +132,5 @@ public QuotaType getQuotaType() { return quotaType; } + public Transaction transaction; } diff --git a/fe/fe-core/src/main/java/com/starrocks/persist/EditLog.java b/fe/fe-core/src/main/java/com/starrocks/persist/EditLog.java index a1889b354bbff0..c1d7f007fd539e 100644 --- a/fe/fe-core/src/main/java/com/starrocks/persist/EditLog.java +++ b/fe/fe-core/src/main/java/com/starrocks/persist/EditLog.java @@ -64,12 +64,10 @@ import com.starrocks.journal.JournalInconsistentException; import com.starrocks.journal.JournalTask; import com.starrocks.journal.bdbje.Timestamp; -import com.starrocks.load.DeleteInfo; import com.starrocks.load.DeleteMgr; import com.starrocks.load.ExportFailMsg; import com.starrocks.load.ExportJob; import com.starrocks.load.ExportMgr; -import com.starrocks.load.LoadErrorHub; import com.starrocks.load.MultiDeleteInfo; import com.starrocks.load.loadv2.LoadJob.LoadJobStateUpdateInfo; import com.starrocks.load.loadv2.LoadJobFinalOperation; @@ -82,14 +80,12 @@ import com.starrocks.privilege.RolePrivilegeCollectionV2; import com.starrocks.privilege.UserPrivilegeCollectionV2; import com.starrocks.proto.EncryptionKeyPB; -import com.starrocks.qe.SessionVariable; import com.starrocks.qe.VariableMgr; import com.starrocks.replication.ReplicationJob; import com.starrocks.scheduler.Task; import com.starrocks.scheduler.mv.MVEpoch; import com.starrocks.scheduler.mv.MVMaintenanceJob; import com.starrocks.scheduler.persist.ArchiveTaskRunsLog; -import com.starrocks.scheduler.persist.DropTaskRunsLog; import com.starrocks.scheduler.persist.DropTasksLog; import com.starrocks.scheduler.persist.TaskRunPeriodStatusChange; import com.starrocks.scheduler.persist.TaskRunStatus; @@ -144,7 +140,6 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) throws JournalInconsistentException { short opCode = journal.getOpCode(); if (opCode != OperationType.OP_SAVE_NEXTID - && opCode != OperationType.OP_TIMESTAMP && opCode != OperationType.OP_TIMESTAMP_V2) { LOG.debug("replay journal op code: {}", opCode); } @@ -185,7 +180,6 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) metastore.replayDropDb(dropDbInfo.getDbName(), dropDbInfo.isForceDrop()); break; } - case OperationType.OP_ALTER_DB: case OperationType.OP_ALTER_DB_V2: { DatabaseInfo dbInfo = (DatabaseInfo) journal.getData(); @@ -194,16 +188,14 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) } case OperationType.OP_ERASE_DB: { Text dbId = (Text) journal.getData(); - globalStateMgr.getLocalMetastore().replayEraseDatabase(Long.parseLong(dbId.toString())); + globalStateMgr.getRecycleBin().replayEraseDatabase(Long.parseLong(dbId.toString())); break; } - case OperationType.OP_RECOVER_DB: case OperationType.OP_RECOVER_DB_V2: { RecoverInfo info = (RecoverInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayRecoverDatabase(info); break; } - case OperationType.OP_RENAME_DB: case OperationType.OP_RENAME_DB_V2: { DatabaseInfo dbInfo = (DatabaseInfo) journal.getData(); String dbName = dbInfo.getDbName(); @@ -225,7 +217,6 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) globalStateMgr.getLocalMetastore().replayCreateTable(info); break; } - case OperationType.OP_DROP_TABLE: case OperationType.OP_DROP_TABLE_V2: { DropInfo info = (DropInfo) journal.getData(); Database db = globalStateMgr.getLocalMetastore().getDb(info.getDbId()); @@ -273,59 +264,50 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) globalStateMgr.getLocalMetastore().replayDropPartitions(info); break; } - case OperationType.OP_MODIFY_PARTITION: case OperationType.OP_MODIFY_PARTITION_V2: { ModifyPartitionInfo info = (ModifyPartitionInfo) journal.getData(); LOG.info("Begin to unprotect modify partition. db = " + info.getDbId() + " table = " + info.getTableId() + " partitionId = " + info.getPartitionId()); - globalStateMgr.getAlterJobMgr().replayModifyPartition(info); + globalStateMgr.getLocalMetastore().replayModifyPartition(info); break; } case OperationType.OP_BATCH_MODIFY_PARTITION: { BatchModifyPartitionsInfo info = (BatchModifyPartitionsInfo) journal.getData(); for (ModifyPartitionInfo modifyPartitionInfo : info.getModifyPartitionInfos()) { - globalStateMgr.getAlterJobMgr().replayModifyPartition(modifyPartitionInfo); + globalStateMgr.getLocalMetastore().replayModifyPartition(modifyPartitionInfo); } break; } - case OperationType.OP_ERASE_TABLE: { - Text tableId = (Text) journal.getData(); - globalStateMgr.getLocalMetastore().replayEraseTable(Long.parseLong(tableId.toString())); - break; - } case OperationType.OP_ERASE_MULTI_TABLES: { MultiEraseTableInfo multiEraseTableInfo = (MultiEraseTableInfo) journal.getData(); - globalStateMgr.getLocalMetastore().replayEraseMultiTables(multiEraseTableInfo); + globalStateMgr.getRecycleBin().replayEraseTable(multiEraseTableInfo.getTableIds()); break; } case OperationType.OP_DISABLE_TABLE_RECOVERY: { DisableTableRecoveryInfo disableTableRecoveryInfo = (DisableTableRecoveryInfo) journal.getData(); - globalStateMgr.getLocalMetastore().replayDisableTableRecovery(disableTableRecoveryInfo); + globalStateMgr.getRecycleBin().replayDisableTableRecovery(disableTableRecoveryInfo.getTableIds()); break; } case OperationType.OP_DISABLE_PARTITION_RECOVERY: { DisablePartitionRecoveryInfo disableRecoveryInfo = (DisablePartitionRecoveryInfo) journal.getData(); - globalStateMgr.getLocalMetastore().replayDisablePartitionRecovery(disableRecoveryInfo); + globalStateMgr.getRecycleBin().replayDisablePartitionRecovery(disableRecoveryInfo.getPartitionId()); break; } case OperationType.OP_ERASE_PARTITION: { Text partitionId = (Text) journal.getData(); - globalStateMgr.getLocalMetastore().replayErasePartition(Long.parseLong(partitionId.toString())); + globalStateMgr.getRecycleBin().replayErasePartition(Long.parseLong(partitionId.toString())); break; } - case OperationType.OP_RECOVER_TABLE: case OperationType.OP_RECOVER_TABLE_V2: { RecoverInfo info = (RecoverInfo) journal.getData(); - globalStateMgr.getLocalMetastore().replayRecoverTable(info); + globalStateMgr.getRecycleBin().replayRecoverTable(info); break; } - case OperationType.OP_RECOVER_PARTITION: case OperationType.OP_RECOVER_PARTITION_V2: { RecoverInfo info = (RecoverInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayRecoverPartition(info); break; } - case OperationType.OP_RENAME_TABLE: case OperationType.OP_RENAME_TABLE_V2: { TableInfo info = (TableInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayRenameTable(info); @@ -334,38 +316,37 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) case OperationType.OP_CHANGE_MATERIALIZED_VIEW_REFRESH_SCHEME: { ChangeMaterializedViewRefreshSchemeLog log = (ChangeMaterializedViewRefreshSchemeLog) journal.getData(); - globalStateMgr.getAlterJobMgr().replayChangeMaterializedViewRefreshScheme(log); + globalStateMgr.getLocalMetastore().replayChangeMaterializedViewRefreshScheme(log); break; } case OperationType.OP_ALTER_MATERIALIZED_VIEW_PROPERTIES: { ModifyTablePropertyOperationLog log = (ModifyTablePropertyOperationLog) journal.getData(); - globalStateMgr.getAlterJobMgr().replayAlterMaterializedViewProperties(opCode, log); + globalStateMgr.getLocalMetastore().replayAlterMaterializedViewProperties(opCode, log); break; } case OperationType.OP_ALTER_MATERIALIZED_VIEW_STATUS: { AlterMaterializedViewStatusLog log = (AlterMaterializedViewStatusLog) journal.getData(); - globalStateMgr.getAlterJobMgr().replayAlterMaterializedViewStatus(log); + globalStateMgr.getLocalMetastore().replayAlterMaterializedViewStatus(log); break; } case OperationType.OP_ALTER_MATERIALIZED_VIEW_BASE_TABLE_INFOS: { AlterMaterializedViewBaseTableInfosLog log = (AlterMaterializedViewBaseTableInfosLog) journal.getData(); - globalStateMgr.getAlterJobMgr().replayAlterMaterializedViewBaseTableInfos(log); + globalStateMgr.getLocalMetastore().replayAlterMaterializedViewBaseTableInfos(log); break; } case OperationType.OP_RENAME_MATERIALIZED_VIEW: { RenameMaterializedViewLog log = (RenameMaterializedViewLog) journal.getData(); - globalStateMgr.getAlterJobMgr().replayRenameMaterializedView(log); + globalStateMgr.getLocalMetastore().replayRenameMaterializedView(log); break; } case OperationType.OP_MODIFY_VIEW_DEF: { AlterViewInfo info = (AlterViewInfo) journal.getData(); - globalStateMgr.getAlterJobMgr().alterView(info); + globalStateMgr.getLocalMetastore().alterView(info); break; } - case OperationType.OP_RENAME_PARTITION: case OperationType.OP_RENAME_PARTITION_V2: { TableInfo info = (TableInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayRenamePartition(info); @@ -376,29 +357,26 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) globalStateMgr.getLocalMetastore().replayRenameColumn(info); break; } - case OperationType.OP_BACKUP_JOB: case OperationType.OP_BACKUP_JOB_V2: { BackupJob job = (BackupJob) journal.getData(); globalStateMgr.getBackupHandler().replayAddJob(job); break; } - case OperationType.OP_RESTORE_JOB: case OperationType.OP_RESTORE_JOB_V2: { RestoreJob job = (RestoreJob) journal.getData(); job.setGlobalStateMgr(globalStateMgr); globalStateMgr.getBackupHandler().replayAddJob(job); break; } - case OperationType.OP_DROP_ROLLUP: case OperationType.OP_DROP_ROLLUP_V2: { DropInfo info = (DropInfo) journal.getData(); - globalStateMgr.getRollupHandler().replayDropRollup(info, globalStateMgr); + globalStateMgr.getLocalMetastore().replayDropRollup(info, globalStateMgr); break; } case OperationType.OP_BATCH_DROP_ROLLUP: { BatchDropInfo batchDropInfo = (BatchDropInfo) journal.getData(); for (long indexId : batchDropInfo.getIndexIdSet()) { - globalStateMgr.getRollupHandler().replayDropRollup( + globalStateMgr.getLocalMetastore().replayDropRollup( new DropInfo(batchDropInfo.getDbId(), batchDropInfo.getTableId(), indexId, false), globalStateMgr); } @@ -407,42 +385,24 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) case OperationType.OP_FINISH_CONSISTENCY_CHECK: case OperationType.OP_FINISH_CONSISTENCY_CHECK_V2: { ConsistencyCheckInfo info = (ConsistencyCheckInfo) journal.getData(); - globalStateMgr.getConsistencyChecker().replayFinishConsistencyCheck(info, globalStateMgr); + globalStateMgr.getLocalMetastore().replayFinishConsistencyCheck(info); break; } - case OperationType.OP_CLEAR_ROLLUP_INFO: { - // Nothing to do - break; - } - case OperationType.OP_RENAME_ROLLUP: case OperationType.OP_RENAME_ROLLUP_V2: { TableInfo info = (TableInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayRenameRollup(info); break; } - case OperationType.OP_EXPORT_CREATE: case OperationType.OP_EXPORT_CREATE_V2: { ExportJob job = (ExportJob) journal.getData(); ExportMgr exportMgr = globalStateMgr.getExportMgr(); exportMgr.replayCreateExportJob(job); break; } - case OperationType.OP_EXPORT_UPDATE_STATE: - ExportJob.StateTransfer op = (ExportJob.StateTransfer) journal.getData(); - ExportMgr exportMgr = globalStateMgr.getExportMgr(); - exportMgr.replayUpdateJobState(op.getJobId(), op.getState()); - break; case OperationType.OP_EXPORT_UPDATE_INFO_V2: - case OperationType.OP_EXPORT_UPDATE_INFO: ExportJob.ExportUpdateInfo exportUpdateInfo = (ExportJob.ExportUpdateInfo) journal.getData(); globalStateMgr.getExportMgr().replayUpdateJobInfo(exportUpdateInfo); break; - case OperationType.OP_FINISH_DELETE: { - DeleteInfo info = (DeleteInfo) journal.getData(); - DeleteMgr deleteHandler = globalStateMgr.getDeleteMgr(); - deleteHandler.replayDelete(info, globalStateMgr); - break; - } case OperationType.OP_FINISH_MULTI_DELETE: { MultiDeleteInfo info = (MultiDeleteInfo) journal.getData(); DeleteMgr deleteHandler = globalStateMgr.getDeleteMgr(); @@ -455,13 +415,11 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) globalStateMgr.getLocalMetastore().replayAddReplica(info); break; } - case OperationType.OP_UPDATE_REPLICA: case OperationType.OP_UPDATE_REPLICA_V2: { ReplicaPersistInfo info = (ReplicaPersistInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayUpdateReplica(info); break; } - case OperationType.OP_DELETE_REPLICA: case OperationType.OP_DELETE_REPLICA_V2: { ReplicaPersistInfo info = (ReplicaPersistInfo) journal.getData(); globalStateMgr.getLocalMetastore().replayDeleteReplica(info); @@ -483,33 +441,27 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) .replayDropComputeNode(dropComputeNodeLog.getComputeNodeId()); break; } - case OperationType.OP_ADD_BACKEND: case OperationType.OP_ADD_BACKEND_V2: { Backend be = (Backend) journal.getData(); GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().replayAddBackend(be); break; } - case OperationType.OP_DROP_BACKEND: case OperationType.OP_DROP_BACKEND_V2: { Backend be = (Backend) journal.getData(); GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().replayDropBackend(be); break; } - case OperationType.OP_BACKEND_STATE_CHANGE: case OperationType.OP_BACKEND_STATE_CHANGE_V2: { Backend be = (Backend) journal.getData(); GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().updateInMemoryStateBackend(be); break; } - case OperationType.OP_ADD_FIRST_FRONTEND: case OperationType.OP_ADD_FIRST_FRONTEND_V2: - case OperationType.OP_ADD_FRONTEND: case OperationType.OP_ADD_FRONTEND_V2: { Frontend fe = (Frontend) journal.getData(); globalStateMgr.getNodeMgr().replayAddFrontend(fe); break; } - case OperationType.OP_REMOVE_FRONTEND: case OperationType.OP_REMOVE_FRONTEND_V2: { Frontend fe = (Frontend) journal.getData(); globalStateMgr.getNodeMgr().replayDropFrontend(fe); @@ -518,28 +470,21 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) } break; } - case OperationType.OP_UPDATE_FRONTEND: case OperationType.OP_UPDATE_FRONTEND_V2: { Frontend fe = (Frontend) journal.getData(); globalStateMgr.getNodeMgr().replayUpdateFrontend(fe); break; } - case OperationType.OP_TIMESTAMP: case OperationType.OP_TIMESTAMP_V2: { Timestamp stamp = (Timestamp) journal.getData(); globalStateMgr.setSynchronizedTime(stamp.getTimestamp()); break; } - case OperationType.OP_LEADER_INFO_CHANGE: case OperationType.OP_LEADER_INFO_CHANGE_V2: { LeaderInfo info = (LeaderInfo) journal.getData(); globalStateMgr.setLeader(info); break; } - //compatible with old community meta, newly added log using OP_META_VERSION_V2 - case OperationType.OP_META_VERSION: { - break; - } case OperationType.OP_META_VERSION_V2: { MetaVersion metaVersion = (MetaVersion) journal.getData(); if (!MetaVersion.isCompatible(metaVersion.getStarRocksVersion(), FeConstants.STARROCKS_META_VERSION)) { @@ -550,17 +495,11 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) MetaContext.get().setStarRocksMetaVersion(metaVersion.getStarRocksVersion()); break; } - case OperationType.OP_CREATE_CLUSTER: { - // ignore - break; - } - case OperationType.OP_ADD_BROKER: case OperationType.OP_ADD_BROKER_V2: { final BrokerMgr.ModifyBrokerInfo param = (BrokerMgr.ModifyBrokerInfo) journal.getData(); globalStateMgr.getBrokerMgr().replayAddBrokers(param.brokerName, param.brokerAddresses); break; } - case OperationType.OP_DROP_BROKER: case OperationType.OP_DROP_BROKER_V2: { final BrokerMgr.ModifyBrokerInfo param = (BrokerMgr.ModifyBrokerInfo) journal.getData(); globalStateMgr.getBrokerMgr().replayDropBrokers(param.brokerName, param.brokerAddresses); @@ -571,16 +510,6 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) globalStateMgr.getBrokerMgr().replayDropAllBroker(param); break; } - case OperationType.OP_SET_LOAD_ERROR_HUB: { - final LoadErrorHub.Param param = (LoadErrorHub.Param) journal.getData(); - globalStateMgr.getLoadInstance().setLoadErrorHubInfo(param); - break; - } - case OperationType.OP_UPDATE_CLUSTER_AND_BACKENDS: { - final BackendIdsUpdateInfo info = (BackendIdsUpdateInfo) journal.getData(); - globalStateMgr.replayUpdateClusterAndBackends(info); - break; - } case OperationType.OP_UPSERT_TRANSACTION_STATE_V2: { final TransactionState state = (TransactionState) journal.getData(); GlobalStateMgr.getCurrentState().getGlobalTransactionMgr().replayUpsertTransactionState(state); @@ -593,7 +522,6 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) LOG.debug("opcode: {}, txn ids: {}", opCode, stateBatch.getTxnIds()); break; } - case OperationType.OP_CREATE_REPOSITORY: case OperationType.OP_CREATE_REPOSITORY_V2: { Repository repository = (Repository) journal.getData(); globalStateMgr.getBackupHandler().getRepoMgr().addAndInitRepoIfNotExist(repository, true); @@ -609,48 +537,36 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) globalStateMgr.getLocalMetastore().replayTruncateTable(info); break; } - case OperationType.OP_COLOCATE_ADD_TABLE: case OperationType.OP_COLOCATE_ADD_TABLE_V2: { final ColocatePersistInfo info = (ColocatePersistInfo) journal.getData(); globalStateMgr.getColocateTableIndex().replayAddTableToGroup(info); break; } - case OperationType.OP_COLOCATE_REMOVE_TABLE: { - final ColocatePersistInfo info = (ColocatePersistInfo) journal.getData(); - globalStateMgr.getColocateTableIndex().replayRemoveTable(info); - break; - } - case OperationType.OP_COLOCATE_BACKENDS_PER_BUCKETSEQ: case OperationType.OP_COLOCATE_BACKENDS_PER_BUCKETSEQ_V2: { final ColocatePersistInfo info = (ColocatePersistInfo) journal.getData(); globalStateMgr.getColocateTableIndex().replayAddBackendsPerBucketSeq(info); break; } - case OperationType.OP_COLOCATE_MARK_UNSTABLE: case OperationType.OP_COLOCATE_MARK_UNSTABLE_V2: { final ColocatePersistInfo info = (ColocatePersistInfo) journal.getData(); globalStateMgr.getColocateTableIndex().replayMarkGroupUnstable(info); break; } - case OperationType.OP_COLOCATE_MARK_STABLE: case OperationType.OP_COLOCATE_MARK_STABLE_V2: { final ColocatePersistInfo info = (ColocatePersistInfo) journal.getData(); globalStateMgr.getColocateTableIndex().replayMarkGroupStable(info); break; } - case OperationType.OP_MODIFY_TABLE_COLOCATE: case OperationType.OP_MODIFY_TABLE_COLOCATE_V2: { final TablePropertyInfo info = (TablePropertyInfo) journal.getData(); globalStateMgr.getColocateTableIndex().replayModifyTableColocate(info); break; } - case OperationType.OP_HEARTBEAT_V2: - case OperationType.OP_HEARTBEAT: { + case OperationType.OP_HEARTBEAT_V2: { final HbPackage hbPackage = (HbPackage) journal.getData(); GlobalStateMgr.getCurrentState().getHeartbeatMgr().replayHearbeat(hbPackage); break; } - case OperationType.OP_ADD_FUNCTION: case OperationType.OP_ADD_FUNCTION_V2: { final Function function = (Function) journal.getData(); if (function.getFunctionName().isGlobalFunction()) { @@ -660,7 +576,6 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) } break; } - case OperationType.OP_DROP_FUNCTION: case OperationType.OP_DROP_FUNCTION_V2: { FunctionSearchDesc function = (FunctionSearchDesc) journal.getData(); if (function.getName().isGlobalFunction()) { @@ -670,44 +585,33 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) } break; } - case OperationType.OP_BACKEND_TABLETS_INFO: case OperationType.OP_BACKEND_TABLETS_INFO_V2: { BackendTabletsInfo backendTabletsInfo = (BackendTabletsInfo) journal.getData(); GlobalStateMgr.getCurrentState().getLocalMetastore().replayBackendTabletsInfo(backendTabletsInfo); break; } - case OperationType.OP_CREATE_ROUTINE_LOAD_JOB_V2: - case OperationType.OP_CREATE_ROUTINE_LOAD_JOB: { + case OperationType.OP_CREATE_ROUTINE_LOAD_JOB_V2: { RoutineLoadJob routineLoadJob = (RoutineLoadJob) journal.getData(); GlobalStateMgr.getCurrentState().getRoutineLoadMgr().replayCreateRoutineLoadJob(routineLoadJob); break; } - case OperationType.OP_CHANGE_ROUTINE_LOAD_JOB_V2: - case OperationType.OP_CHANGE_ROUTINE_LOAD_JOB: { + case OperationType.OP_CHANGE_ROUTINE_LOAD_JOB_V2: { RoutineLoadOperation operation = (RoutineLoadOperation) journal.getData(); GlobalStateMgr.getCurrentState().getRoutineLoadMgr().replayChangeRoutineLoadJob(operation); break; } - case OperationType.OP_REMOVE_ROUTINE_LOAD_JOB: { - RoutineLoadOperation operation = (RoutineLoadOperation) journal.getData(); - globalStateMgr.getRoutineLoadMgr().replayRemoveOldRoutineLoad(operation); - break; - } - case OperationType.OP_CREATE_STREAM_LOAD_TASK: case OperationType.OP_CREATE_STREAM_LOAD_TASK_V2: { StreamLoadTask streamLoadTask = (StreamLoadTask) journal.getData(); globalStateMgr.getStreamLoadMgr().replayCreateLoadTask(streamLoadTask); break; } - case OperationType.OP_CREATE_LOAD_JOB_V2: - case OperationType.OP_CREATE_LOAD_JOB: { + case OperationType.OP_CREATE_LOAD_JOB_V2: { com.starrocks.load.loadv2.LoadJob loadJob = (com.starrocks.load.loadv2.LoadJob) journal.getData(); globalStateMgr.getLoadMgr().replayCreateLoadJob(loadJob); break; } - case OperationType.OP_END_LOAD_JOB_V2: - case OperationType.OP_END_LOAD_JOB: { + case OperationType.OP_END_LOAD_JOB_V2: { LoadJobFinalOperation operation = (LoadJobFinalOperation) journal.getData(); globalStateMgr.getLoadMgr().replayEndLoadJob(operation); break; @@ -758,11 +662,6 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) globalStateMgr.getTaskManager().replayUpdateTaskRun(statusChange); break; } - case OperationType.OP_DROP_TASK_RUNS: { - DropTaskRunsLog dropTaskRunsLog = (DropTaskRunsLog) journal.getData(); - globalStateMgr.getTaskManager().replayDropTaskRuns(dropTaskRunsLog.getQueryIdList()); - break; - } case OperationType.OP_UPDATE_TASK_RUN_STATE: { TaskRunPeriodStatusChange taskRunPeriodStatusChange = (TaskRunPeriodStatusChange) journal.getData(); globalStateMgr.getTaskManager().replayAlterRunningTaskRunProgress( @@ -774,13 +673,11 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) globalStateMgr.getTaskManager().replayArchiveTaskRuns(log); break; } - case OperationType.OP_CREATE_SMALL_FILE: case OperationType.OP_CREATE_SMALL_FILE_V2: { SmallFile smallFile = (SmallFile) journal.getData(); globalStateMgr.getSmallFileMgr().replayCreateFile(smallFile); break; } - case OperationType.OP_DROP_SMALL_FILE: case OperationType.OP_DROP_SMALL_FILE_V2: { SmallFile smallFile = (SmallFile) journal.getData(); globalStateMgr.getSmallFileMgr().replayRemoveFile(smallFile); @@ -801,7 +698,6 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) } break; } - case OperationType.OP_BATCH_ADD_ROLLUP: case OperationType.OP_BATCH_ADD_ROLLUP_V2: { BatchAlterJobPersistInfo batchAlterJobV2 = (BatchAlterJobPersistInfo) journal.getData(); for (AlterJobV2 alterJobV2 : batchAlterJobV2.getAlterJobV2List()) { @@ -809,12 +705,6 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) } break; } - case OperationType.OP_MODIFY_DISTRIBUTION_TYPE: - case OperationType.OP_MODIFY_DISTRIBUTION_TYPE_V2: { - TableInfo tableInfo = (TableInfo) journal.getData(); - globalStateMgr.getLocalMetastore().replayConvertDistributionType(tableInfo); - break; - } case OperationType.OP_DYNAMIC_PARTITION: case OperationType.OP_MODIFY_IN_MEMORY: case OperationType.OP_SET_FORBIDDEN_GLOBAL_DICT: @@ -899,7 +789,7 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) } case OperationType.OP_SWAP_TABLE: { SwapTableOperationLog log = (SwapTableOperationLog) journal.getData(); - globalStateMgr.getAlterJobMgr().replaySwapTable(log); + globalStateMgr.getLocalMetastore().replaySwapTable(log); break; } case OperationType.OP_ADD_ANALYZER_JOB: { @@ -1045,10 +935,6 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) globalStateMgr.getInsertOverwriteJobMgr().replayInsertOverwriteStateChange(stateChangeInfo); break; } - case OperationType.OP_ADD_UNUSED_SHARD: - case OperationType.OP_DELETE_UNUSED_SHARD: - // Deprecated: Nothing to do - break; case OperationType.OP_STARMGR: { StarMgrJournal j = (StarMgrJournal) journal.getData(); StarMgrServer.getCurrentState().getStarMgr().replay(j.getJournal()); @@ -1080,13 +966,11 @@ public void loadJournal(GlobalStateMgr globalStateMgr, JournalEntity journal) info.getUserIdentity(), info.getAuthenticationInfo(), info.getProperties()); break; } - case OperationType.OP_UPDATE_USER_PROP_V2: case OperationType.OP_UPDATE_USER_PROP_V3: { UserPropertyInfo info = (UserPropertyInfo) journal.getData(); globalStateMgr.getAuthenticationMgr().replayUpdateUserProperty(info); break; } - case OperationType.OP_DROP_USER_V2: case OperationType.OP_DROP_USER_V3: { UserIdentity userIdentity = (UserIdentity) journal.getData(); globalStateMgr.getAuthenticationMgr().replayDropUser(userIdentity); @@ -1538,10 +1422,6 @@ public void logPartitionRename(TableInfo tableInfo) { logJsonObject(OperationType.OP_RENAME_PARTITION_V2, tableInfo); } - public void logGlobalVariable(SessionVariable variable) { - logEdit(OperationType.OP_GLOBAL_VARIABLE, variable); - } - public void logAddBroker(BrokerMgr.ModifyBrokerInfo info) { logJsonObject(OperationType.OP_ADD_BROKER_V2, info); } @@ -1554,10 +1434,6 @@ public void logDropAllBroker(String brokerName) { logEdit(OperationType.OP_DROP_ALL_BROKER, new Text(brokerName)); } - public void logSetLoadErrorHub(LoadErrorHub.Param param) { - logEdit(OperationType.OP_SET_LOAD_ERROR_HUB, param); - } - public void logExportCreate(ExportJob job) { logJsonObject(OperationType.OP_EXPORT_CREATE_V2, job); } @@ -1691,10 +1567,6 @@ public void logBatchAlterJob(BatchAlterJobPersistInfo batchAlterJobV2) { logJsonObject(OperationType.OP_BATCH_ADD_ROLLUP_V2, batchAlterJobV2); } - public void logModifyDistributionType(TableInfo tableInfo) { - logJsonObject(OperationType.OP_MODIFY_DISTRIBUTION_TYPE_V2, tableInfo); - } - public void logDynamicPartition(ModifyTablePropertyOperationLog info) { logEdit(OperationType.OP_DYNAMIC_PARTITION, info); } @@ -1971,7 +1843,7 @@ public void logPipeOp(PipeOpEntry opEntry) { logEdit(OperationType.OP_PIPE, opEntry); } - private void logJsonObject(short op, Object obj) { + public void logJsonObject(short op, Object obj) { logEdit(op, out -> Text.writeString(out, GsonUtils.GSON.toJson(obj))); } diff --git a/fe/fe-core/src/main/java/com/starrocks/persist/ModifyTablePropertyOperationLog.java b/fe/fe-core/src/main/java/com/starrocks/persist/ModifyTablePropertyOperationLog.java index 421d6ea300986a..68f4f501c00eda 100644 --- a/fe/fe-core/src/main/java/com/starrocks/persist/ModifyTablePropertyOperationLog.java +++ b/fe/fe-core/src/main/java/com/starrocks/persist/ModifyTablePropertyOperationLog.java @@ -20,6 +20,7 @@ import com.google.gson.annotations.SerializedName; import com.starrocks.common.io.Text; import com.starrocks.common.io.Writable; +import com.starrocks.meta.TxnMeta; import com.starrocks.persist.gson.GsonUtils; import java.io.DataInput; @@ -28,7 +29,7 @@ import java.util.HashMap; import java.util.Map; -public class ModifyTablePropertyOperationLog implements Writable { +public class ModifyTablePropertyOperationLog extends TxnMeta implements Writable { @SerializedName(value = "dbId") private long dbId; diff --git a/fe/fe-core/src/main/java/com/starrocks/persist/OperationType.java b/fe/fe-core/src/main/java/com/starrocks/persist/OperationType.java index 2a3a8fc81a8145..83d4a896386964 100644 --- a/fe/fe-core/src/main/java/com/starrocks/persist/OperationType.java +++ b/fe/fe-core/src/main/java/com/starrocks/persist/OperationType.java @@ -51,74 +51,20 @@ public class OperationType { public static final short OP_SAVE_NEXTID = 0; - @Deprecated - //Added OP_CREATE_DB_V2 in version 3.1, can be removed in version 3.2 - public static final short OP_CREATE_DB = 1; - @IgnorableOnReplayFailed public static final short OP_DROP_DB = 2; - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_ALTER_DB = 3; - @IgnorableOnReplayFailed public static final short OP_ERASE_DB = 4; - @Deprecated - public static final short OP_RECOVER_DB = 5; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_RENAME_DB = 6; - - // 10~19 110~119 210~219 ... - - @Deprecated - //Added OP_CREATE_TABLE_V2 in version 3.1, can be removed in version 3.2 - public static final short OP_CREATE_TABLE = 10; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_DROP_TABLE = 11; - - @Deprecated - public static final short OP_ADD_PARTITION = 12; - @IgnorableOnReplayFailed public static final short OP_DROP_PARTITION = 13; - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_MODIFY_PARTITION = 14; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_ERASE_TABLE = 15; - @IgnorableOnReplayFailed public static final short OP_ERASE_PARTITION = 16; - @Deprecated - public static final short OP_RECOVER_TABLE = 17; - - @Deprecated - public static final short OP_RECOVER_PARTITION = 18; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_RENAME_TABLE = 19; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_RENAME_PARTITION = 110; - - @Deprecated @IgnorableOnReplayFailed - public static final short OP_BACKUP_JOB = 116; - - @Deprecated - public static final short OP_RESTORE_JOB = 117; + public static final short OP_FINISH_CONSISTENCY_CHECK = 29; public static final short OP_TRUNCATE_TABLE = 118; @@ -130,245 +76,28 @@ public class OperationType { @IgnorableOnReplayFailed public static final short OP_BATCH_MODIFY_PARTITION = 211; - // 20~29 120~129 220~229 ... - @Deprecated - public static final short OP_START_ROLLUP = 20; - - @Deprecated - public static final short OP_FINISH_ROLLUP = 21; - - @Deprecated - public static final short OP_CANCEL_ROLLUP = 23; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_DROP_ROLLUP = 24; - - @Deprecated - public static final short OP_START_SCHEMA_CHANGE = 25; - - @Deprecated - public static final short OP_FINISH_SCHEMA_CHANGE = 26; - - @Deprecated - public static final short OP_CANCEL_SCHEMA_CHANGE = 27; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_CLEAR_ROLLUP_INFO = 28; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_FINISH_CONSISTENCY_CHECK = 29; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_RENAME_ROLLUP = 120; - public static final short OP_ALTER_JOB_V2 = 121; - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_MODIFY_DISTRIBUTION_TYPE = 122; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_BATCH_ADD_ROLLUP = 123; - @IgnorableOnReplayFailed public static final short OP_BATCH_DROP_ROLLUP = 124; @IgnorableOnReplayFailed public static final short OP_REMOVE_ALTER_JOB_V2 = 125; - // 30~39 130~139 230~239 ... - // load job for only hadoop load - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_EXPORT_CREATE = 36; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_EXPORT_UPDATE_STATE = 37; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_EXPORT_UPDATE_INFO = 38; - - @Deprecated - public static final short OP_FINISH_SYNC_DELETE = 40; - - @Deprecated - public static final short OP_FINISH_DELETE = 41; - @Deprecated public static final short OP_ADD_REPLICA = 42; - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_DELETE_REPLICA = 43; - - @Deprecated - public static final short OP_FINISH_ASYNC_DELETE = 44; - - @Deprecated - public static final short OP_UPDATE_REPLICA = 45; - - @Deprecated - public static final short OP_BACKEND_TABLETS_INFO = 46; - public static final short OP_SET_REPLICA_STATUS = 47; - @Deprecated - public static final short OP_ADD_BACKEND = 50; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_DROP_BACKEND = 51; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_BACKEND_STATE_CHANGE = 52; - - @Deprecated - public static final short OP_START_DECOMMISSION_BACKEND = 53; - - @Deprecated - public static final short OP_FINISH_DECOMMISSION_BACKEND = 54; - - @Deprecated - public static final short OP_ADD_FRONTEND = 55; - - @Deprecated - public static final short OP_ADD_FIRST_FRONTEND = 56; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_REMOVE_FRONTEND = 57; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_SET_LOAD_ERROR_HUB = 58; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_HEARTBEAT = 59; - - @Deprecated - public static final short OP_CREATE_USER = 62; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_NEW_DROP_USER = 63; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_GRANT_PRIV = 64; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_REVOKE_PRIV = 65; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_SET_PASSWORD = 66; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_CREATE_ROLE = 67; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_DROP_ROLE = 68; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_UPDATE_USER_PROPERTY = 69; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_TIMESTAMP = 70; - - @Deprecated - public static final short OP_LEADER_INFO_CHANGE = 71; - @Deprecated - - public static final short OP_META_VERSION = 72; - - @Deprecated - // replaced by OP_GLOBAL_VARIABLE_V2 - @IgnorableOnReplayFailed - public static final short OP_GLOBAL_VARIABLE = 73; - - @Deprecated - public static final short OP_CREATE_CLUSTER = 74; - @IgnorableOnReplayFailed public static final short OP_GLOBAL_VARIABLE_V2 = 84; - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_ADD_BROKER = 85; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_DROP_BROKER = 86; - @IgnorableOnReplayFailed public static final short OP_DROP_ALL_BROKER = 87; - @Deprecated - public static final short OP_UPDATE_CLUSTER_AND_BACKENDS = 88; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_CREATE_REPOSITORY = 89; - @IgnorableOnReplayFailed public static final short OP_DROP_REPOSITORY = 90; - //colocate table - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_COLOCATE_ADD_TABLE = 94; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_COLOCATE_REMOVE_TABLE = 95; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_COLOCATE_BACKENDS_PER_BUCKETSEQ = 96; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_COLOCATE_MARK_UNSTABLE = 97; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_COLOCATE_MARK_STABLE = 98; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_MODIFY_TABLE_COLOCATE = 99; - - //real time load 100 -108 - @Deprecated - public static final short OP_UPSERT_TRANSACTION_STATE = 100; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_DELETE_TRANSACTION_STATE = 101; - - @Deprecated - public static final short OP_FINISHING_ROLLUP = 102; - - @Deprecated - public static final short OP_FINISHING_SCHEMA_CHANGE = 103; - - @Deprecated - public static final short OP_SAVE_TRANSACTION_ID = 104; - public static final short OP_SAVE_AUTO_INCREMENT_ID = 105; @IgnorableOnReplayFailed @@ -377,56 +106,13 @@ public class OperationType { // light schema change for add and drop columns public static final short OP_MODIFY_TABLE_ADD_OR_DROP_COLUMNS = 107; - @Deprecated @IgnorableOnReplayFailed public static final short OP_ALTER_ROUTINE_LOAD_JOB = 111; - // UDF 130-140 - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_ADD_FUNCTION = 130; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_DROP_FUNCTION = 131; - - // routine load 200 - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_CREATE_ROUTINE_LOAD_JOB = 200; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_CHANGE_ROUTINE_LOAD_JOB = 201; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_REMOVE_ROUTINE_LOAD_JOB = 202; - - // load job v2 for broker load 230~250 - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_CREATE_LOAD_JOB = 230; - - // this finish op include finished and cancelled - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_END_LOAD_JOB = 231; - // update job info, used by spark load - @Deprecated @IgnorableOnReplayFailed public static final short OP_UPDATE_LOAD_JOB = 232; - // small files 251~260 - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_CREATE_SMALL_FILE = 251; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_DROP_SMALL_FILE = 252; - // dynamic partition 261~265 @IgnorableOnReplayFailed public static final short OP_DYNAMIC_PARTITION = 261; @@ -464,9 +150,6 @@ public class OperationType { @IgnorableOnReplayFailed public static final short OP_SWAP_TABLE = 10001; - @Deprecated - public static final short OP_ADD_PARTITIONS = 10002; - @IgnorableOnReplayFailed public static final short OP_FINISH_MULTI_DELETE = 10003; @@ -533,23 +216,6 @@ public class OperationType { @IgnorableOnReplayFailed public static final short OP_DROP_CATALOG = 10061; - // grant & revoke impersonate - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_GRANT_IMPERSONATE = 10062; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_REVOKE_IMPERSONATE = 10063; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_GRANT_ROLE = 10064; - - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_REVOKE_ROLE = 10065; - // task 10071 ~ 10090 @IgnorableOnReplayFailed public static final short OP_CREATE_TASK = 10071; @@ -563,10 +229,6 @@ public class OperationType { @IgnorableOnReplayFailed public static final short OP_UPDATE_TASK_RUN = 10082; - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_DROP_TASK_RUNS = 10083; - @IgnorableOnReplayFailed public static final short OP_UPDATE_TASK_RUN_STATE = 10084; @@ -586,23 +248,14 @@ public class OperationType { @IgnorableOnReplayFailed public static final short OP_ALTER_MATERIALIZED_VIEW_PROPERTIES = 10093; - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_CREATE_MATERIALIZED_VIEW = 10094; - public static final short OP_CREATE_INSERT_OVERWRITE = 10095; public static final short OP_INSERT_OVERWRITE_STATE_CHANGE = 10096; @IgnorableOnReplayFailed public static final short OP_ALTER_MATERIALIZED_VIEW_STATUS = 10097; - public static final short OP_ALTER_MATERIALIZED_VIEW_BASE_TABLE_INFOS = 10098; - - // manage system node info 10101 ~ 10120 - @Deprecated - public static final short OP_UPDATE_FRONTEND = 10101; - // manage compute node 10201 ~ 10220 + public static final short OP_ALTER_MATERIALIZED_VIEW_BASE_TABLE_INFOS = 10098; @IgnorableOnReplayFailed public static final short OP_ADD_COMPUTE_NODE = 10201; @@ -610,12 +263,6 @@ public class OperationType { @IgnorableOnReplayFailed public static final short OP_DROP_COMPUTE_NODE = 10202; - // shard operate 10221 ~ 10240. Deprecated - @Deprecated - public static final short OP_ADD_UNUSED_SHARD = 10221; - @Deprecated - public static final short OP_DELETE_UNUSED_SHARD = 10222; - // new operator for partition 10241 ~ 10260 public static final short OP_ADD_PARTITION_V2 = 10241; public static final short OP_ADD_PARTITIONS_V2 = 10242; @@ -637,9 +284,6 @@ public class OperationType { @IgnorableOnReplayFailed public static final short OP_ALTER_USER_V2 = 10263; - @Deprecated - public static final short OP_DROP_USER_V2 = 10264; - @IgnorableOnReplayFailed public static final short OP_UPDATE_ROLE_PRIVILEGE_V2 = 10265; @@ -648,21 +292,12 @@ public class OperationType { public static final short OP_AUTH_UPGRADE_V2 = 10267; - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_UPDATE_USER_PROP_V2 = 10268; - @IgnorableOnReplayFailed public static final short OP_CREATE_SECURITY_INTEGRATION = 10269; // integrate with starmgr public static final short OP_STARMGR = 11000; - // stream load - @Deprecated - @IgnorableOnReplayFailed - public static final short OP_CREATE_STREAM_LOAD_TASK = 11020; - // MaterializedView Maintenance @IgnorableOnReplayFailed diff --git a/fe/fe-core/src/main/java/com/starrocks/persist/PhysicalPartitionPersistInfoV2.java b/fe/fe-core/src/main/java/com/starrocks/persist/PhysicalPartitionPersistInfoV2.java index e3677ec0d08783..3a74d93c249a18 100644 --- a/fe/fe-core/src/main/java/com/starrocks/persist/PhysicalPartitionPersistInfoV2.java +++ b/fe/fe-core/src/main/java/com/starrocks/persist/PhysicalPartitionPersistInfoV2.java @@ -17,7 +17,6 @@ import com.google.gson.annotations.SerializedName; import com.starrocks.catalog.PhysicalPartition; -import com.starrocks.catalog.PhysicalPartitionImpl; import com.starrocks.common.io.Text; import com.starrocks.common.io.Writable; import com.starrocks.persist.gson.GsonUtils; @@ -35,9 +34,9 @@ public class PhysicalPartitionPersistInfoV2 implements Writable { @SerializedName("partitionId") private Long partitionId; @SerializedName("physicalPartition") - private PhysicalPartitionImpl partition; + private PhysicalPartition partition; - public PhysicalPartitionPersistInfoV2(Long dbId, Long tableId, Long partitionId, PhysicalPartitionImpl partition) { + public PhysicalPartitionPersistInfoV2(Long dbId, Long tableId, Long partitionId, PhysicalPartition partition) { this.dbId = dbId; this.tableId = tableId; this.partitionId = partitionId; diff --git a/fe/fe-core/src/main/java/com/starrocks/persist/ReplicaPersistInfo.java b/fe/fe-core/src/main/java/com/starrocks/persist/ReplicaPersistInfo.java index ad77d97dc8cd75..9ab5297fc7cf86 100644 --- a/fe/fe-core/src/main/java/com/starrocks/persist/ReplicaPersistInfo.java +++ b/fe/fe-core/src/main/java/com/starrocks/persist/ReplicaPersistInfo.java @@ -37,12 +37,13 @@ import com.google.common.base.Objects; import com.google.gson.annotations.SerializedName; import com.starrocks.common.io.Writable; +import com.starrocks.meta.TxnMeta; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; -public class ReplicaPersistInfo implements Writable { +public class ReplicaPersistInfo extends TxnMeta implements Writable { public enum ReplicaOperationType { ADD(0), diff --git a/fe/fe-core/src/main/java/com/starrocks/planner/MetaScanNode.java b/fe/fe-core/src/main/java/com/starrocks/planner/MetaScanNode.java index a6909e9f65b527..70991cb0547499 100644 --- a/fe/fe-core/src/main/java/com/starrocks/planner/MetaScanNode.java +++ b/fe/fe-core/src/main/java/com/starrocks/planner/MetaScanNode.java @@ -67,11 +67,11 @@ public MetaScanNode(PlanNodeId id, TupleDescriptor desc, OlapTable olapTable, public void computeRangeLocations() { Collection partitions = olapTable.getPartitions(); for (Partition partition : partitions) { - MaterializedIndex index = partition.getBaseIndex(); + MaterializedIndex index = partition.getDefaultPhysicalPartition().getBaseIndex(); int schemaHash = olapTable.getSchemaHashByIndexId(index.getId()); List tablets = index.getTablets(); - long visibleVersion = partition.getVisibleVersion(); + long visibleVersion = partition.getDefaultPhysicalPartition().getVisibleVersion(); String visibleVersionStr = String.valueOf(visibleVersion); for (Tablet tablet : tablets) { diff --git a/fe/fe-core/src/main/java/com/starrocks/planner/OlapScanNode.java b/fe/fe-core/src/main/java/com/starrocks/planner/OlapScanNode.java index 5e5c32d972512b..317747f4563804 100644 --- a/fe/fe-core/src/main/java/com/starrocks/planner/OlapScanNode.java +++ b/fe/fe-core/src/main/java/com/starrocks/planner/OlapScanNode.java @@ -424,7 +424,7 @@ public List updateScanRangeLocations(List> expandObjectNames() { .map(Pipe::getDbAndName) .collect(Collectors.toList()); for (Pair dbAndName : ListUtils.emptyIfNull(dbAndNames)) { - Optional db = GlobalStateMgr.getCurrentState().getLocalMetastore().mayGetDb(dbAndName.first); + Optional db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().mayGetDb(dbAndName.first); db.ifPresent(database -> objects.add( Lists.newArrayList(InternalCatalog.DEFAULT_INTERNAL_CATALOG_NAME, database.getFullName(), dbAndName.second))); @@ -206,7 +206,7 @@ public Optional getDatabase() { return Optional.empty(); } long dbId = Long.parseLong(getDbUUID()); - return GlobalStateMgr.getCurrentState().getLocalMetastore().mayGetDb(dbId); + return GlobalStateMgr.getCurrentState().getStarRocksMetadata().mayGetDb(dbId); } catch (NumberFormatException e) { return Optional.empty(); } diff --git a/fe/fe-core/src/main/java/com/starrocks/privilege/TablePEntryObject.java b/fe/fe-core/src/main/java/com/starrocks/privilege/TablePEntryObject.java index 0546d82b5ceab5..b100e6ff4d92f7 100644 --- a/fe/fe-core/src/main/java/com/starrocks/privilege/TablePEntryObject.java +++ b/fe/fe-core/src/main/java/com/starrocks/privilege/TablePEntryObject.java @@ -181,11 +181,11 @@ public boolean isFuzzyMatching() { @Override public boolean validate(GlobalStateMgr globalStateMgr) { if (catalogId == InternalCatalog.DEFAULT_INTERNAL_CATALOG_ID) { - Database db = globalStateMgr.getLocalMetastore().getDbIncludeRecycleBin(Long.parseLong(this.databaseUUID)); + Database db = globalStateMgr.getStarRocksMetadata().getDbIncludeRecycleBin(Long.parseLong(this.databaseUUID)); if (db == null) { return false; } - return globalStateMgr.getLocalMetastore().getTableIncludeRecycleBin(db, Long.parseLong(this.tableUUID)) != null; + return globalStateMgr.getStarRocksMetadata().getTableIncludeRecycleBin(db, Long.parseLong(this.tableUUID)) != null; } // do not validate privilege of external table return true; diff --git a/fe/fe-core/src/main/java/com/starrocks/qe/DDLStmtExecutor.java b/fe/fe-core/src/main/java/com/starrocks/qe/DDLStmtExecutor.java index 568414f038d838..29d9000de983c6 100644 --- a/fe/fe-core/src/main/java/com/starrocks/qe/DDLStmtExecutor.java +++ b/fe/fe-core/src/main/java/com/starrocks/qe/DDLStmtExecutor.java @@ -147,7 +147,6 @@ import java.io.IOException; import java.time.LocalDateTime; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; @@ -339,7 +338,7 @@ public ShowResultSet visitCleanTemporaryTableStatement(CleanTemporaryTableStmt s @Override public ShowResultSet visitCreateMaterializedViewStmt(CreateMaterializedViewStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { - context.getGlobalStateMgr().getLocalMetastore().createMaterializedView(stmt); + context.getGlobalStateMgr().getStarRocksMetadata().createMaterializedView(stmt); }); return null; } @@ -348,7 +347,7 @@ public ShowResultSet visitCreateMaterializedViewStmt(CreateMaterializedViewStmt public ShowResultSet visitCreateMaterializedViewStatement(CreateMaterializedViewStatement stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { - context.getGlobalStateMgr().getLocalMetastore().createMaterializedView(stmt); + context.getGlobalStateMgr().getStarRocksMetadata().createMaterializedView(stmt); }); return null; } @@ -356,7 +355,7 @@ public ShowResultSet visitCreateMaterializedViewStatement(CreateMaterializedView @Override public ShowResultSet visitDropMaterializedViewStatement(DropMaterializedViewStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { - context.getGlobalStateMgr().getLocalMetastore().dropMaterializedView(stmt); + context.getGlobalStateMgr().getStarRocksMetadata().dropMaterializedView(stmt); }); return null; } @@ -365,7 +364,7 @@ public ShowResultSet visitDropMaterializedViewStatement(DropMaterializedViewStmt public ShowResultSet visitAlterMaterializedViewStatement(AlterMaterializedViewStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { - context.getGlobalStateMgr().getLocalMetastore().alterMaterializedView(stmt); + context.getGlobalStateMgr().getStarRocksMetadata().alterMaterializedView(stmt); }); return null; } @@ -376,18 +375,18 @@ public ShowResultSet visitRefreshMaterializedViewStatement(RefreshMaterializedVi List info = Lists.newArrayList(); ErrorReport.wrapWithRuntimeException(() -> { // The priority of manual refresh is higher than that of general refresh - String taskRunId = context.getGlobalStateMgr().getLocalMetastore().refreshMaterializedView(stmt); + String taskRunId = context.getGlobalStateMgr().getStarRocksMetadata().refreshMaterializedView(stmt); info.add(taskRunId); }); - return new ShowResultSet(RefreshMaterializedViewStatement.META_DATA, Arrays.asList(info)); + return new ShowResultSet(RefreshMaterializedViewStatement.META_DATA, List.of(info)); } @Override public ShowResultSet visitCancelRefreshMaterializedViewStatement(CancelRefreshMaterializedViewStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { - context.getGlobalStateMgr().getLocalMetastore().cancelRefreshMaterializedView(stmt); + context.getGlobalStateMgr().getStarRocksMetadata().cancelRefreshMaterializedView(stmt); }); return null; } @@ -403,7 +402,7 @@ public ShowResultSet visitAlterTableStatement(AlterTableStmt stmt, ConnectContex @Override public ShowResultSet visitAlterViewStatement(AlterViewStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { - context.getGlobalStateMgr().getLocalMetastore().alterView(stmt); + context.getGlobalStateMgr().getStarRocksMetadata().alterView(stmt); }); return null; } @@ -411,7 +410,7 @@ public ShowResultSet visitAlterViewStatement(AlterViewStmt stmt, ConnectContext @Override public ShowResultSet visitCancelAlterTableStatement(CancelAlterTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { - context.getGlobalStateMgr().getLocalMetastore().cancelAlter(stmt); + context.getGlobalStateMgr().getAlterJobMgr().cancelAlter(stmt, "user cancelled"); }); return null; } @@ -604,7 +603,7 @@ public ShowResultSet visitCancelAlterSystemStatement(CancelAlterSystemStmt stmt, @Override public ShowResultSet visitAlterDatabaseQuotaStatement(AlterDatabaseQuotaStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { - context.getGlobalStateMgr().getLocalMetastore().alterDatabaseQuota(stmt); + context.getGlobalStateMgr().getStarRocksMetadata().alterDatabaseQuota(stmt); }); return null; } @@ -613,7 +612,7 @@ public ShowResultSet visitAlterDatabaseQuotaStatement(AlterDatabaseQuotaStmt stm public ShowResultSet visitAlterDatabaseRenameStatement(AlterDatabaseRenameStatement stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { - context.getGlobalStateMgr().getLocalMetastore().renameDatabase(stmt); + context.getGlobalStateMgr().getStarRocksMetadata().renameDatabase(stmt); }); return null; } @@ -621,7 +620,7 @@ public ShowResultSet visitAlterDatabaseRenameStatement(AlterDatabaseRenameStatem @Override public ShowResultSet visitRecoverDbStatement(RecoverDbStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { - context.getGlobalStateMgr().getLocalMetastore().recoverDatabase(stmt); + context.getGlobalStateMgr().getStarRocksMetadata().recoverDatabase(stmt); }); return null; } @@ -629,7 +628,7 @@ public ShowResultSet visitRecoverDbStatement(RecoverDbStmt stmt, ConnectContext @Override public ShowResultSet visitRecoverTableStatement(RecoverTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { - context.getGlobalStateMgr().getLocalMetastore().recoverTable(stmt); + context.getGlobalStateMgr().getStarRocksMetadata().recoverTable(stmt); }); return null; } @@ -637,7 +636,7 @@ public ShowResultSet visitRecoverTableStatement(RecoverTableStmt stmt, ConnectCo @Override public ShowResultSet visitRecoverPartitionStatement(RecoverPartitionStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { - context.getGlobalStateMgr().getLocalMetastore().recoverPartition(stmt); + context.getGlobalStateMgr().getStarRocksMetadata().recoverPartition(stmt); }); return null; } @@ -700,7 +699,7 @@ public ShowResultSet visitSyncStatement(SyncStmt stmt, ConnectContext context) { @Override public ShowResultSet visitTruncateTableStatement(TruncateTableStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { - context.getGlobalStateMgr().getLocalMetastore().truncateTable(stmt, context); + context.getGlobalStateMgr().getStarRocksMetadata().truncateTable(stmt, context); }); return null; } @@ -784,7 +783,7 @@ public ShowResultSet visitUninstallPluginStatement(UninstallPluginStmt stmt, Con @Override public ShowResultSet visitAdminCheckTabletsStatement(AdminCheckTabletsStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { - context.getGlobalStateMgr().getLocalMetastore().checkTablets(stmt); + context.getGlobalStateMgr().getStarRocksMetadata().checkTablets(stmt); }); return null; } @@ -792,7 +791,7 @@ public ShowResultSet visitAdminCheckTabletsStatement(AdminCheckTabletsStmt stmt, @Override public ShowResultSet visitAdminSetPartitionVersionStmt(AdminSetPartitionVersionStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() - -> context.getGlobalStateMgr().getLocalMetastore().setPartitionVersion(stmt)); + -> context.getGlobalStateMgr().getStarRocksMetadata().setPartitionVersion(stmt)); return null; } @@ -800,7 +799,7 @@ public ShowResultSet visitAdminSetPartitionVersionStmt(AdminSetPartitionVersionS public ShowResultSet visitAdminSetReplicaStatusStatement(AdminSetReplicaStatusStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { - context.getGlobalStateMgr().getLocalMetastore().setReplicaStatus(stmt); + context.getGlobalStateMgr().getStarRocksMetadata().setReplicaStatus(stmt); }); return null; } diff --git a/fe/fe-core/src/main/java/com/starrocks/qe/ShowExecutor.java b/fe/fe-core/src/main/java/com/starrocks/qe/ShowExecutor.java index 4e7fb2ebc1c468..d5ed1173846f3a 100644 --- a/fe/fe-core/src/main/java/com/starrocks/qe/ShowExecutor.java +++ b/fe/fe-core/src/main/java/com/starrocks/qe/ShowExecutor.java @@ -1617,7 +1617,7 @@ public ShowResultSet visitShowTabletStatement(ShowTabletStmt statement, ConnectC break; } - List replicas = tablet.getImmutableReplicas(); + List replicas = GlobalStateMgr.getCurrentState().getLocalMetastore().getAllReplicas(tablet); for (Replica replica : replicas) { Replica tmp = invertedIndex.getReplica(tabletId, replica.getBackendId()); if (tmp == null) { @@ -2503,7 +2503,7 @@ public ShowResultSet visitDescStorageVolumeStatement(DescStorageVolumeStmt state public ShowResultSet visitShowPipeStatement(ShowPipeStmt statement, ConnectContext context) { List> rows = Lists.newArrayList(); String dbName = statement.getDbName(); - long dbId = GlobalStateMgr.getCurrentState().getLocalMetastore().mayGetDb(dbName) + long dbId = GlobalStateMgr.getCurrentState().getStarRocksMetadata().mayGetDb(dbName) .map(Database::getId) .orElseThrow(() -> ErrorReport.buildSemanticException(ErrorCode.ERR_BAD_DB_ERROR, dbName)); PipeManager pipeManager = GlobalStateMgr.getCurrentState().getPipeManager(); @@ -2767,7 +2767,7 @@ public static List listMaterializedViewStatus( // rows if (olapTable.getPartitionInfo().getType() == PartitionType.UNPARTITIONED) { Partition partition = olapTable.getPartitions().iterator().next(); - MaterializedIndex index = partition.getIndex(mvId); + MaterializedIndex index = partition.getDefaultPhysicalPartition().getIndex(mvId); mvStatus.setRows(index.getRowCount()); } else { mvStatus.setRows(0L); diff --git a/fe/fe-core/src/main/java/com/starrocks/scheduler/MVActiveChecker.java b/fe/fe-core/src/main/java/com/starrocks/scheduler/MVActiveChecker.java index 1bcba036ca4c37..8e51f854add2eb 100644 --- a/fe/fe-core/src/main/java/com/starrocks/scheduler/MVActiveChecker.java +++ b/fe/fe-core/src/main/java/com/starrocks/scheduler/MVActiveChecker.java @@ -125,7 +125,8 @@ public static void tryToActivate(MaterializedView mv, boolean checkGracePeriod) } long dbId = mv.getDbId(); - Optional dbName = GlobalStateMgr.getCurrentState().getLocalMetastore().mayGetDb(dbId).map(Database::getFullName); + Optional dbName = GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .mayGetDb(dbId).map(Database::getFullName); if (!dbName.isPresent()) { LOG.warn("[MVActiveChecker] cannot activate MV {} since database {} not found", mv.getName(), dbId); return; diff --git a/fe/fe-core/src/main/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessor.java b/fe/fe-core/src/main/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessor.java index 6d8e9ac743493f..4401cce05b677e 100644 --- a/fe/fe-core/src/main/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessor.java +++ b/fe/fe-core/src/main/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessor.java @@ -1347,7 +1347,9 @@ private Map getRefreshedPartitionInf continue; } MaterializedView.BasePartitionInfo basePartitionInfo = new MaterializedView.BasePartitionInfo( - partition.getId(), partition.getVisibleVersion(), partition.getVisibleVersionTime()); + partition.getId(), + partition.getDefaultPhysicalPartition().getVisibleVersion(), + partition.getDefaultPhysicalPartition().getVisibleVersionTime()); partitionInfos.put(partition.getName(), basePartitionInfo); } LOG.info("Collect olap base table {}'s refreshed partition infos: {}", baseTable.getName(), partitionInfos); diff --git a/fe/fe-core/src/main/java/com/starrocks/scheduler/history/TableKeeper.java b/fe/fe-core/src/main/java/com/starrocks/scheduler/history/TableKeeper.java index 9bd41bf2f0ef6d..3c5f686b844087 100644 --- a/fe/fe-core/src/main/java/com/starrocks/scheduler/history/TableKeeper.java +++ b/fe/fe-core/src/main/java/com/starrocks/scheduler/history/TableKeeper.java @@ -102,7 +102,7 @@ public void createTable() throws UserException { public void correctTable() { int numBackends = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getTotalBackendNumber(); int replica = GlobalStateMgr.getCurrentState() - .getLocalMetastore().mayGetTable(databaseName, tableName) + .getStarRocksMetadata().mayGetTable(databaseName, tableName) .map(tbl -> ((OlapTable) tbl).getPartitionInfo().getMinReplicationNum()) .orElse((short) 1); if (numBackends < tableReplicas) { @@ -142,7 +142,7 @@ public void changeTTL() { private Optional mayGetTable() { return GlobalStateMgr.getCurrentState() - .getLocalMetastore().mayGetTable(databaseName, tableName) + .getStarRocksMetadata().mayGetTable(databaseName, tableName) .flatMap(x -> Optional.of((OlapTable) x)); } diff --git a/fe/fe-core/src/main/java/com/starrocks/scheduler/mv/IMTCreator.java b/fe/fe-core/src/main/java/com/starrocks/scheduler/mv/IMTCreator.java index ec8c46d20724cc..440803d20d0506 100644 --- a/fe/fe-core/src/main/java/com/starrocks/scheduler/mv/IMTCreator.java +++ b/fe/fe-core/src/main/java/com/starrocks/scheduler/mv/IMTCreator.java @@ -120,7 +120,7 @@ public static void createIMT(CreateMaterializedViewStatement stmt, MaterializedV for (CreateTableStmt create : createTables) { LOG.info("creating IMT {} for MV {}", create.getTableName(), view.getName()); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().createTable(create); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createTable(create); } catch (DdlException e) { // TODO(murphy) cleanup created IMT, or make it atomic LOG.warn("create IMT {} failed due to ", create.getTableName(), e); diff --git a/fe/fe-core/src/main/java/com/starrocks/scheduler/mv/MVPCTRefreshListPartitioner.java b/fe/fe-core/src/main/java/com/starrocks/scheduler/mv/MVPCTRefreshListPartitioner.java index fdd5de0b303d1a..0a68610b6c149b 100644 --- a/fe/fe-core/src/main/java/com/starrocks/scheduler/mv/MVPCTRefreshListPartitioner.java +++ b/fe/fe-core/src/main/java/com/starrocks/scheduler/mv/MVPCTRefreshListPartitioner.java @@ -323,7 +323,7 @@ private void addListPartitions(Database database, MaterializedView materializedV AlterTableClauseAnalyzer analyzer = new AlterTableClauseAnalyzer(materializedView); analyzer.analyze(mvContext.getCtx(), addPartitionClause); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().addPartitions( + GlobalStateMgr.getCurrentState().getStarRocksMetadata().addPartitions( mvContext.getCtx(), database, materializedView.getName(), addPartitionClause); } catch (Exception e) { throw new DmlException("add list partition failed: %s, db: %s, table: %s", e, e.getMessage(), diff --git a/fe/fe-core/src/main/java/com/starrocks/scheduler/mv/MVPCTRefreshPartitioner.java b/fe/fe-core/src/main/java/com/starrocks/scheduler/mv/MVPCTRefreshPartitioner.java index 0909f335d6b81f..ef3d418547ecfa 100644 --- a/fe/fe-core/src/main/java/com/starrocks/scheduler/mv/MVPCTRefreshPartitioner.java +++ b/fe/fe-core/src/main/java/com/starrocks/scheduler/mv/MVPCTRefreshPartitioner.java @@ -272,7 +272,7 @@ protected void dropPartition(Database db, MaterializedView materializedView, Str AlterTableClauseAnalyzer analyzer = new AlterTableClauseAnalyzer(materializedView); analyzer.analyze(new ConnectContext(), dropPartitionClause); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropPartition(db, materializedView, dropPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropPartition(db, materializedView, dropPartitionClause); } catch (Exception e) { throw new DmlException("Expression add partition failed: %s, db: %s, table: %s", e, e.getMessage(), db.getFullName(), materializedView.getName()); diff --git a/fe/fe-core/src/main/java/com/starrocks/scheduler/mv/MVPCTRefreshRangePartitioner.java b/fe/fe-core/src/main/java/com/starrocks/scheduler/mv/MVPCTRefreshRangePartitioner.java index e7e788fd2c0c53..ab9857f07728e8 100644 --- a/fe/fe-core/src/main/java/com/starrocks/scheduler/mv/MVPCTRefreshRangePartitioner.java +++ b/fe/fe-core/src/main/java/com/starrocks/scheduler/mv/MVPCTRefreshRangePartitioner.java @@ -465,7 +465,7 @@ private void addRangePartitions(Database database, MaterializedView materialized AlterTableClauseAnalyzer analyzer = new AlterTableClauseAnalyzer(materializedView); analyzer.analyze(mvContext.getCtx(), alterPartition); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().addPartitions(mvContext.getCtx(), + GlobalStateMgr.getCurrentState().getStarRocksMetadata().addPartitions(mvContext.getCtx(), database, materializedView.getName(), alterPartition); } catch (Exception e) { throw new DmlException("Expression add partition failed: %s, db: %s, table: %s", e, e.getMessage(), diff --git a/fe/fe-core/src/main/java/com/starrocks/server/ElasticSearchTableFactory.java b/fe/fe-core/src/main/java/com/starrocks/server/ElasticSearchTableFactory.java index 0f383d5fdb5e75..d79d6416398e4d 100644 --- a/fe/fe-core/src/main/java/com/starrocks/server/ElasticSearchTableFactory.java +++ b/fe/fe-core/src/main/java/com/starrocks/server/ElasticSearchTableFactory.java @@ -55,7 +55,7 @@ public Table createTable(LocalMetastore metastore, Database database, CreateTabl .collect(Collectors.toList()); // metastore is null when external table if (null != metastore) { - metastore.validateColumns(baseSchema); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().validateColumns(baseSchema); } // create partition info @@ -65,7 +65,7 @@ public Table createTable(LocalMetastore metastore, Database database, CreateTabl if (partitionDesc != null) { partitionInfo = partitionDesc.toPartitionInfo(baseSchema, partitionNameToId, false); } else if (null != metastore) { - long partitionId = metastore.getNextId(); + long partitionId = GlobalStateMgr.getCurrentState().getNextId(); // use table name as single partition name partitionNameToId.put(tableName, partitionId); partitionInfo = new SinglePartitionInfo(); diff --git a/fe/fe-core/src/main/java/com/starrocks/server/GlobalStateMgr.java b/fe/fe-core/src/main/java/com/starrocks/server/GlobalStateMgr.java index 8074bfc836ee0b..ebc9e83be466bf 100644 --- a/fe/fe-core/src/main/java/com/starrocks/server/GlobalStateMgr.java +++ b/fe/fe-core/src/main/java/com/starrocks/server/GlobalStateMgr.java @@ -127,6 +127,7 @@ import com.starrocks.journal.JournalInconsistentException; import com.starrocks.journal.JournalTask; import com.starrocks.journal.JournalWriter; +import com.starrocks.journal.bdbje.BDBJEJournal; import com.starrocks.journal.bdbje.Timestamp; import com.starrocks.lake.ShardManager; import com.starrocks.lake.StarMgrMetaSyncer; @@ -157,8 +158,10 @@ import com.starrocks.memory.MemoryUsageTracker; import com.starrocks.memory.ProcProfileCollector; import com.starrocks.meta.MetaContext; +import com.starrocks.meta.MetadataHandler; +import com.starrocks.meta.StarRocksMetadata; +import com.starrocks.meta.TabletManager; import com.starrocks.metric.MetricRepo; -import com.starrocks.persist.BackendIdsUpdateInfo; import com.starrocks.persist.EditLog; import com.starrocks.persist.ImageFormatVersion; import com.starrocks.persist.ImageHeader; @@ -346,6 +349,8 @@ public class GlobalStateMgr { private EditLog editLog; private Journal journal; + private MetadataHandler metadataHandler; + // For checkpoint and observer memory replayed marker private final AtomicLong replayedJournalId; @@ -358,6 +363,8 @@ public class GlobalStateMgr { private final JournalObservable journalObservable; private final TabletInvertedIndex tabletInvertedIndex; + private final TabletManager tabletManager; + private ColocateTableIndex colocateTableIndex; private final CatalogRecycleBin recycleBin; @@ -439,6 +446,7 @@ public class GlobalStateMgr { private final InsertOverwriteJobMgr insertOverwriteJobMgr; private final LocalMetastore localMetastore; + private final StarRocksMetadata starRocksMetadata; private final GlobalFunctionMgr globalFunctionMgr; @Deprecated @@ -534,10 +542,13 @@ public TabletInvertedIndex getTabletInvertedIndex() { return this.tabletInvertedIndex; } + public TabletManager getTabletManager() { + return tabletManager; + } + // only for test public void setColocateTableIndex(ColocateTableIndex colocateTableIndex) { this.colocateTableIndex = colocateTableIndex; - localMetastore.setColocateTableIndex(colocateTableIndex); } public ColocateTableIndex getColocateTableIndex() { @@ -564,6 +575,10 @@ public LocalMetastore getLocalMetastore() { return localMetastore; } + public StarRocksMetadata getStarRocksMetadata() { + return starRocksMetadata; + } + public TemporaryTableMgr getTemporaryTableMgr() { return temporaryTableMgr; } @@ -644,6 +659,8 @@ private GlobalStateMgr(boolean isCkptGlobalState, NodeMgr nodeMgr) { this.journalObservable = new JournalObservable(); this.tabletInvertedIndex = new TabletInvertedIndex(); + this.tabletManager = new TabletManager(); + this.colocateTableIndex = new ColocateTableIndex(); this.recycleBin = new CatalogRecycleBin(); this.functionSet = new FunctionSet(); @@ -704,12 +721,13 @@ private GlobalStateMgr(boolean isCkptGlobalState, NodeMgr nodeMgr) { this.pluginMgr = new PluginMgr(); this.auditEventProcessor = new AuditEventProcessor(this.pluginMgr); this.analyzeMgr = new AnalyzeMgr(); - this.localMetastore = new LocalMetastore(this, recycleBin, colocateTableIndex); + this.localMetastore = new LocalMetastore(this); + this.starRocksMetadata = new StarRocksMetadata(); this.temporaryTableMgr = new TemporaryTableMgr(); this.warehouseMgr = new WarehouseManager(); this.connectorMgr = new ConnectorMgr(); this.connectorTblMetaInfoMgr = new ConnectorTblMetaInfoMgr(); - this.metadataMgr = new MetadataMgr(localMetastore, temporaryTableMgr, connectorMgr, connectorTblMetaInfoMgr); + this.metadataMgr = new MetadataMgr(starRocksMetadata, temporaryTableMgr, connectorMgr, connectorTblMetaInfoMgr); this.catalogMgr = new CatalogMgr(connectorMgr); this.connectorTableTriggerAnalyzeMgr = new ConnectorTableTriggerAnalyzeMgr(); @@ -1150,6 +1168,8 @@ protected void initJournal() throws JournalException, InterruptedException { journalWriter = new JournalWriter(journal, journalQueue); editLog = new EditLog(journalQueue); + + metadataHandler = new MetadataHandler(((BDBJEJournal) journal).getBdbEnvironment()); } // wait until FE is ready. @@ -2062,6 +2082,10 @@ public Journal getJournal() { return journal; } + public MetadataHandler getMetadataHandler() { + return metadataHandler; + } + // Get the next available, lock-free because nextId is atomic. public long getNextId() { return idGenerator.getNextId(); @@ -2429,10 +2453,6 @@ public void initDefaultWarehouse() { isDefaultWarehouseCreated = true; } - public void replayUpdateClusterAndBackends(BackendIdsUpdateInfo info) { - localMetastore.replayUpdateClusterAndBackends(info); - } - public String dumpImage() { LOG.info("begin to dump meta data"); String dumpFilePath; diff --git a/fe/fe-core/src/main/java/com/starrocks/server/JDBCTableFactory.java b/fe/fe-core/src/main/java/com/starrocks/server/JDBCTableFactory.java index 9120b4f03e10c5..f89488651ae66b 100644 --- a/fe/fe-core/src/main/java/com/starrocks/server/JDBCTableFactory.java +++ b/fe/fe-core/src/main/java/com/starrocks/server/JDBCTableFactory.java @@ -37,7 +37,7 @@ public Table createTable(LocalMetastore metastore, Database database, CreateTabl String tableName = stmt.getTableName(); List columns = stmt.getColumns(); Map properties = stmt.getProperties(); - long tableId = metastore.getNextId(); + long tableId = GlobalStateMgr.getCurrentState().getNextId(); return new JDBCTable(tableId, tableName, columns, properties); } } diff --git a/fe/fe-core/src/main/java/com/starrocks/server/LocalMetastore.java b/fe/fe-core/src/main/java/com/starrocks/server/LocalMetastore.java index e16c3488f982ad..fb3843b0331edc 100644 --- a/fe/fe-core/src/main/java/com/starrocks/server/LocalMetastore.java +++ b/fe/fe-core/src/main/java/com/starrocks/server/LocalMetastore.java @@ -37,60 +37,27 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; -import com.google.common.collect.HashMultimap; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import com.google.common.collect.Multimap; -import com.google.common.collect.Range; import com.google.common.collect.Sets; -import com.staros.proto.FilePathInfo; -import com.starrocks.alter.AlterJobExecutor; -import com.starrocks.alter.AlterMVJobExecutor; -import com.starrocks.alter.MaterializedViewHandler; -import com.starrocks.analysis.Expr; -import com.starrocks.analysis.FunctionCallExpr; -import com.starrocks.analysis.HintNode; -import com.starrocks.analysis.IntLiteral; -import com.starrocks.analysis.SetVarHint; -import com.starrocks.analysis.SlotRef; -import com.starrocks.analysis.StringLiteral; -import com.starrocks.analysis.TableName; -import com.starrocks.analysis.TableRef; -import com.starrocks.analysis.UserVariableHint; -import com.starrocks.binlog.BinlogConfig; -import com.starrocks.catalog.CatalogRecycleBin; -import com.starrocks.catalog.CatalogUtils; -import com.starrocks.catalog.ColocateGroupSchema; -import com.starrocks.catalog.ColocateTableIndex; +import com.starrocks.alter.AlterJobException; +import com.starrocks.authentication.AuthenticationMgr; +import com.starrocks.catalog.BaseTableInfo; import com.starrocks.catalog.Column; -import com.starrocks.catalog.DataProperty; import com.starrocks.catalog.Database; -import com.starrocks.catalog.DistributionInfo; -import com.starrocks.catalog.FunctionSet; -import com.starrocks.catalog.HashDistributionInfo; import com.starrocks.catalog.HiveTable; -import com.starrocks.catalog.Index; -import com.starrocks.catalog.KeysType; import com.starrocks.catalog.ListPartitionInfo; import com.starrocks.catalog.LocalTablet; import com.starrocks.catalog.MaterializedIndex; -import com.starrocks.catalog.MaterializedIndex.IndexExtState; -import com.starrocks.catalog.MaterializedIndexMeta; import com.starrocks.catalog.MaterializedView; -import com.starrocks.catalog.MvId; import com.starrocks.catalog.OlapTable; import com.starrocks.catalog.Partition; import com.starrocks.catalog.PartitionInfo; -import com.starrocks.catalog.PartitionKey; import com.starrocks.catalog.PartitionType; import com.starrocks.catalog.PhysicalPartition; -import com.starrocks.catalog.PhysicalPartitionImpl; -import com.starrocks.catalog.PrimitiveType; -import com.starrocks.catalog.RandomDistributionInfo; import com.starrocks.catalog.RangePartitionInfo; import com.starrocks.catalog.Replica; -import com.starrocks.catalog.SinglePartitionInfo; import com.starrocks.catalog.Table; import com.starrocks.catalog.TableProperty; import com.starrocks.catalog.Tablet; @@ -100,72 +67,62 @@ import com.starrocks.catalog.system.information.InfoSchemaDb; import com.starrocks.catalog.system.sys.SysDb; import com.starrocks.cluster.ClusterNamespace; -import com.starrocks.common.AlreadyExistsException; import com.starrocks.common.AnalysisException; import com.starrocks.common.Config; import com.starrocks.common.DdlException; -import com.starrocks.common.ErrorCode; -import com.starrocks.common.ErrorReport; -import com.starrocks.common.ErrorReportException; -import com.starrocks.common.InvalidOlapTableStateException; +import com.starrocks.common.FeConstants; import com.starrocks.common.MaterializedViewExceptions; -import com.starrocks.common.MetaNotFoundException; import com.starrocks.common.Pair; import com.starrocks.common.UserException; import com.starrocks.common.util.DynamicPartitionUtil; import com.starrocks.common.util.PropertyAnalyzer; -import com.starrocks.common.util.TimeUtils; import com.starrocks.common.util.UUIDUtil; -import com.starrocks.common.util.Util; -import com.starrocks.common.util.concurrent.CountingLatch; +import com.starrocks.common.util.WriteQuorum; +import com.starrocks.common.util.concurrent.MarkedCountDownLatch; +import com.starrocks.common.util.concurrent.lock.AutoCloseableLock; import com.starrocks.common.util.concurrent.lock.LockType; import com.starrocks.common.util.concurrent.lock.Locker; -import com.starrocks.connector.ConnectorMetadata; import com.starrocks.connector.exception.StarRocksConnectorException; -import com.starrocks.lake.DataCacheInfo; -import com.starrocks.lake.LakeMaterializedView; -import com.starrocks.lake.LakeTable; -import com.starrocks.lake.LakeTablet; -import com.starrocks.lake.StorageInfo; -import com.starrocks.load.pipe.PipeManager; import com.starrocks.memory.MemoryTrackable; +import com.starrocks.meta.LocalMetastoreInterface; +import com.starrocks.meta.StarRocksMetadata; import com.starrocks.mv.MVMetaVersionRepairer; import com.starrocks.mv.MVRepairHandler; -import com.starrocks.mv.analyzer.MVPartitionExprResolver; import com.starrocks.persist.AddPartitionsInfoV2; import com.starrocks.persist.AddSubPartitionsInfoV2; +import com.starrocks.persist.AlterMaterializedViewBaseTableInfosLog; +import com.starrocks.persist.AlterMaterializedViewStatusLog; +import com.starrocks.persist.AlterViewInfo; import com.starrocks.persist.AutoIncrementInfo; -import com.starrocks.persist.BackendIdsUpdateInfo; import com.starrocks.persist.BackendTabletsInfo; import com.starrocks.persist.BatchDeleteReplicaInfo; -import com.starrocks.persist.ColocatePersistInfo; +import com.starrocks.persist.BatchDropInfo; +import com.starrocks.persist.ChangeMaterializedViewRefreshSchemeLog; import com.starrocks.persist.ColumnRenameInfo; +import com.starrocks.persist.ConsistencyCheckInfo; import com.starrocks.persist.CreateDbInfo; import com.starrocks.persist.CreateTableInfo; import com.starrocks.persist.DatabaseInfo; -import com.starrocks.persist.DisablePartitionRecoveryInfo; -import com.starrocks.persist.DisableTableRecoveryInfo; import com.starrocks.persist.DropDbInfo; +import com.starrocks.persist.DropInfo; import com.starrocks.persist.DropPartitionInfo; import com.starrocks.persist.DropPartitionsInfo; -import com.starrocks.persist.EditLog; import com.starrocks.persist.ImageWriter; import com.starrocks.persist.ListPartitionPersistInfo; import com.starrocks.persist.ModifyPartitionInfo; import com.starrocks.persist.ModifyTableColumnOperationLog; import com.starrocks.persist.ModifyTablePropertyOperationLog; -import com.starrocks.persist.MultiEraseTableInfo; import com.starrocks.persist.OperationType; -import com.starrocks.persist.PartitionPersistInfo; import com.starrocks.persist.PartitionPersistInfoV2; import com.starrocks.persist.PartitionVersionRecoveryInfo; -import com.starrocks.persist.PartitionVersionRecoveryInfo.PartitionVersion; import com.starrocks.persist.PhysicalPartitionPersistInfoV2; import com.starrocks.persist.RangePartitionPersistInfo; import com.starrocks.persist.RecoverInfo; +import com.starrocks.persist.RenameMaterializedViewLog; import com.starrocks.persist.ReplacePartitionOperationLog; import com.starrocks.persist.ReplicaPersistInfo; import com.starrocks.persist.SetReplicaStatusOperationLog; +import com.starrocks.persist.SwapTableOperationLog; import com.starrocks.persist.TableInfo; import com.starrocks.persist.TruncateTableInfo; import com.starrocks.persist.metablock.SRMetaBlockEOFException; @@ -173,85 +130,41 @@ import com.starrocks.persist.metablock.SRMetaBlockID; import com.starrocks.persist.metablock.SRMetaBlockReader; import com.starrocks.persist.metablock.SRMetaBlockWriter; -import com.starrocks.privilege.AccessDeniedException; -import com.starrocks.privilege.ObjectType; -import com.starrocks.privilege.PrivilegeType; +import com.starrocks.privilege.PrivilegeBuiltinConstants; import com.starrocks.qe.ConnectContext; -import com.starrocks.qe.SessionVariable; -import com.starrocks.qe.VariableMgr; -import com.starrocks.scheduler.Constants; -import com.starrocks.scheduler.ExecuteOption; import com.starrocks.scheduler.Task; import com.starrocks.scheduler.TaskBuilder; -import com.starrocks.scheduler.TaskManager; -import com.starrocks.scheduler.TaskRun; -import com.starrocks.sql.analyzer.AnalyzerUtils; -import com.starrocks.sql.analyzer.Authorizer; -import com.starrocks.sql.ast.AddPartitionClause; -import com.starrocks.sql.ast.AdminCheckTabletsStmt; -import com.starrocks.sql.ast.AdminSetPartitionVersionStmt; -import com.starrocks.sql.ast.AdminSetReplicaStatusStmt; +import com.starrocks.sql.analyzer.Analyzer; +import com.starrocks.sql.analyzer.MaterializedViewAnalyzer; +import com.starrocks.sql.analyzer.SemanticException; import com.starrocks.sql.ast.AlterDatabaseQuotaStmt; -import com.starrocks.sql.ast.AlterDatabaseRenameStatement; -import com.starrocks.sql.ast.AlterMaterializedViewStmt; -import com.starrocks.sql.ast.AlterTableCommentClause; -import com.starrocks.sql.ast.AlterTableStmt; -import com.starrocks.sql.ast.AlterViewStmt; -import com.starrocks.sql.ast.AsyncRefreshSchemeDesc; -import com.starrocks.sql.ast.CancelAlterTableStmt; -import com.starrocks.sql.ast.CancelRefreshMaterializedViewStmt; -import com.starrocks.sql.ast.ColumnRenameClause; +import com.starrocks.sql.ast.AlterMaterializedViewStatusClause; import com.starrocks.sql.ast.CreateMaterializedViewStatement; -import com.starrocks.sql.ast.CreateMaterializedViewStmt; -import com.starrocks.sql.ast.CreateTableLikeStmt; -import com.starrocks.sql.ast.CreateTableStmt; -import com.starrocks.sql.ast.CreateTemporaryTableStmt; -import com.starrocks.sql.ast.CreateViewStmt; -import com.starrocks.sql.ast.DistributionDesc; -import com.starrocks.sql.ast.DropMaterializedViewStmt; -import com.starrocks.sql.ast.DropPartitionClause; -import com.starrocks.sql.ast.DropTableStmt; -import com.starrocks.sql.ast.ExpressionPartitionDesc; -import com.starrocks.sql.ast.IntervalLiteral; import com.starrocks.sql.ast.PartitionDesc; -import com.starrocks.sql.ast.PartitionRangeDesc; -import com.starrocks.sql.ast.PartitionRenameClause; -import com.starrocks.sql.ast.RecoverDbStmt; -import com.starrocks.sql.ast.RecoverPartitionStmt; -import com.starrocks.sql.ast.RecoverTableStmt; -import com.starrocks.sql.ast.RefreshMaterializedViewStatement; -import com.starrocks.sql.ast.RefreshSchemeClause; -import com.starrocks.sql.ast.ReplacePartitionClause; -import com.starrocks.sql.ast.RollupRenameClause; -import com.starrocks.sql.ast.ShowAlterStmt; +import com.starrocks.sql.ast.QueryStatement; import com.starrocks.sql.ast.SingleRangePartitionDesc; -import com.starrocks.sql.ast.SystemVariable; -import com.starrocks.sql.ast.TableRenameClause; -import com.starrocks.sql.ast.TruncateTableStmt; -import com.starrocks.sql.common.MetaUtils; -import com.starrocks.sql.common.SyncPartitionUtils; -import com.starrocks.sql.optimizer.Utils; +import com.starrocks.sql.ast.StatementBase; +import com.starrocks.sql.ast.UserIdentity; +import com.starrocks.sql.optimizer.rule.transformation.materialization.MvUtils; import com.starrocks.sql.optimizer.statistics.IDictManager; -import com.starrocks.system.Backend; -import com.starrocks.system.SystemInfoService; -import com.starrocks.task.TabletTaskExecutor; -import com.starrocks.thrift.TGetTasksParams; +import com.starrocks.sql.parser.SqlParser; +import com.starrocks.task.AgentBatchTask; +import com.starrocks.task.AgentTaskExecutor; +import com.starrocks.task.AgentTaskQueue; +import com.starrocks.task.TabletMetadataUpdateAgentTask; +import com.starrocks.task.TabletMetadataUpdateAgentTaskFactory; import com.starrocks.thrift.TStorageMedium; -import com.starrocks.thrift.TStorageType; import com.starrocks.thrift.TTabletMetaType; -import com.starrocks.thrift.TTabletType; -import com.starrocks.warehouse.Warehouse; -import org.apache.commons.collections.CollectionUtils; +import com.starrocks.thrift.TTaskType; +import com.starrocks.thrift.TWriteQuorumType; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import java.io.IOException; -import java.time.LocalDateTime; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; -import java.util.HashMap; +import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -259,15 +172,15 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import javax.validation.constraints.NotNull; +import static com.starrocks.meta.StarRocksMetadata.inactiveRelatedMaterializedView; import static com.starrocks.server.GlobalStateMgr.NEXT_ID_INIT_VALUE; import static com.starrocks.server.GlobalStateMgr.isCheckpointThread; -public class LocalMetastore implements ConnectorMetadata, MVRepairHandler, MemoryTrackable { +public class LocalMetastore implements MVRepairHandler, MemoryTrackable, LocalMetastoreInterface { private static final Logger LOG = LogManager.getLogger(LocalMetastore.class); private final ConcurrentHashMap idToDb = new ConcurrentHashMap<>(); @@ -275,21 +188,9 @@ public class LocalMetastore implements ConnectorMetadata, MVRepairHandler, Memor private final ConcurrentHashMap tableIdToIncrementId = new ConcurrentHashMap<>(); private final GlobalStateMgr stateMgr; - private final CatalogRecycleBin recycleBin; - private ColocateTableIndex colocateTableIndex; - /** - * Concurrent colocate table creation process have dependency on each other - * (even in different databases), but we do not want to affect the performance - * of non-colocate table creation, so here we use a separate latch to - * synchronize only the creation of colocate tables. - */ - private final CountingLatch colocateTableCreateSyncer = new CountingLatch(0); - public LocalMetastore(GlobalStateMgr globalStateMgr, CatalogRecycleBin recycleBin, - ColocateTableIndex colocateTableIndex) { + public LocalMetastore(GlobalStateMgr globalStateMgr) { this.stateMgr = globalStateMgr; - this.recycleBin = recycleBin; - this.colocateTableIndex = colocateTableIndex; // put built-in database into local metastore InfoSchemaDb infoSchemaDb = new InfoSchemaDb(); @@ -305,90 +206,10 @@ public LocalMetastore(GlobalStateMgr globalStateMgr, CatalogRecycleBin recycleBi fullNameToDb.put(starRocksDb.getFullName(), starRocksDb); } - boolean tryLock(boolean mustLock) { - return stateMgr.tryLock(mustLock); - } - - void unlock() { - stateMgr.unlock(); - } - - long getNextId() { - return stateMgr.getNextId(); - } - - GlobalStateMgr getStateMgr() { - return stateMgr; - } - - public void recreateTabletInvertIndex() { - if (isCheckpointThread()) { - return; - } - - // create inverted index - TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); - for (Database db : this.fullNameToDb.values()) { - long dbId = db.getId(); - for (Table table : db.getTables()) { - if (!table.isNativeTableOrMaterializedView()) { - continue; - } - - OlapTable olapTable = (OlapTable) table; - long tableId = olapTable.getId(); - for (PhysicalPartition partition : olapTable.getAllPhysicalPartitions()) { - long physicalPartitionId = partition.getId(); - TStorageMedium medium = olapTable.getPartitionInfo().getDataProperty( - partition.getParentId()).getStorageMedium(); - for (MaterializedIndex index : partition - .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { - long indexId = index.getId(); - int schemaHash = olapTable.getSchemaHashByIndexId(indexId); - TabletMeta tabletMeta = new TabletMeta(dbId, tableId, partition.getParentId(), physicalPartitionId, - indexId, schemaHash, medium, table.isCloudNativeTableOrMaterializedView()); - for (Tablet tablet : index.getTablets()) { - long tabletId = tablet.getId(); - invertedIndex.addTablet(tabletId, tabletMeta); - if (table.isOlapTableOrMaterializedView()) { - for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { - invertedIndex.addReplica(tabletId, replica); - } - } - } - } // end for indices - } // end for partitions - } // end for tables - } // end for dbs - } - @Override - public void createDb(String dbName, Map properties) throws DdlException, AlreadyExistsException { - long id = 0L; - if (!tryLock(false)) { - throw new DdlException("Failed to acquire globalStateMgr lock. Try again"); - } - try { - if (fullNameToDb.containsKey(dbName)) { - throw new AlreadyExistsException("Database Already Exists"); - } else { - id = getNextId(); - Database db = new Database(id, dbName); - String volume = StorageVolumeMgr.DEFAULT; - if (properties != null && properties.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_VOLUME)) { - volume = properties.remove(PropertyAnalyzer.PROPERTIES_STORAGE_VOLUME); - } - if (!GlobalStateMgr.getCurrentState().getStorageVolumeMgr().bindDbToStorageVolume(volume, id)) { - throw new DdlException(String.format("Storage volume %s not exists", volume)); - } - unprotectCreateDb(db); - String storageVolumeId = GlobalStateMgr.getCurrentState().getStorageVolumeMgr().getStorageVolumeIdOfDb(id); - GlobalStateMgr.getCurrentState().getEditLog().logCreateDb(db, storageVolumeId); - } - } finally { - unlock(); - } - LOG.info("createDb dbName = " + dbName + ", id = " + id); + public void createDb(Database db, String storageVolumeId) { + unprotectCreateDb(db); + GlobalStateMgr.getCurrentState().getEditLog().logCreateDb(db, storageVolumeId); } // For replay edit log, needn't lock metadata @@ -402,104 +223,35 @@ public void unprotectCreateDb(Database db) { stateMgr.getGlobalTransactionMgr().addDatabaseTransactionMgr(db.getId()); } - public ConcurrentHashMap getIdToDb() { - return idToDb; - } - - public void replayCreateDb(Database db) { - tryLock(true); - try { - unprotectCreateDb(db); - LOG.info("finish replay create db, name: {}, id: {}", db.getOriginName(), db.getId()); - } finally { - unlock(); - } - } - public void replayCreateDb(CreateDbInfo createDbInfo) { - tryLock(true); + GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); + LocalMetastore localMetastore = globalStateMgr.getLocalMetastore(); + + globalStateMgr.tryLock(true); try { Database db = new Database(createDbInfo.getId(), createDbInfo.getDbName()); - unprotectCreateDb(db); + localMetastore.unprotectCreateDb(db); + GlobalStateMgr.getCurrentState().getGlobalTransactionMgr().addDatabaseTransactionMgr(db.getId()); + // If user upgrades from 3.0, the storage volume id will be null if (createDbInfo.getStorageVolumeId() != null) { - stateMgr.getStorageVolumeMgr().replayBindDbToStorageVolume(createDbInfo.getStorageVolumeId(), db.getId()); + globalStateMgr.getStorageVolumeMgr().replayBindDbToStorageVolume(createDbInfo.getStorageVolumeId(), db.getId()); } LOG.info("finish replay create db, name: {}, id: {}", db.getOriginName(), db.getId()); } finally { - unlock(); + globalStateMgr.unlock(); } } @Override - public void dropDb(String dbName, boolean isForceDrop) throws DdlException, MetaNotFoundException { - // 1. check if database exists - Database db; - if (!tryLock(false)) { - throw new DdlException("Failed to acquire globalStateMgr lock. Try again"); - } - try { - if (!fullNameToDb.containsKey(dbName)) { - throw new MetaNotFoundException("Database not found"); - } - db = this.fullNameToDb.get(dbName); - if (!isForceDrop && !db.getTemporaryTables().isEmpty()) { - throw new DdlException("The database [" + dbName + "] " + - "cannot be dropped because there are still some temporary tables in it. " + - "If you want to forcibly drop, please use \"DROP DATABASE FORCE.\""); - } - } finally { - unlock(); - } - - // 2. drop tables in db - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - if (!db.isExist()) { - throw new MetaNotFoundException("Database '" + dbName + "' not found"); - } - if (!isForceDrop && stateMgr.getGlobalTransactionMgr().existCommittedTxns(db.getId(), null, null)) { - throw new DdlException( - "There are still some transactions in the COMMITTED state waiting to be completed. " + - "The database [" + dbName + - "] cannot be dropped. If you want to forcibly drop(cannot be recovered)," + - " please use \"DROP DATABASE FORCE\"."); - } - - // save table names for recycling - Set tableNames = new HashSet<>(db.getTableNamesViewWithLock()); - unprotectDropDb(db, isForceDrop, false); - if (!isForceDrop) { - recycleBin.recycleDatabase(db, tableNames); - } else { - stateMgr.getLocalMetastore().onEraseDatabase(db.getId()); - } - db.setExist(false); - - // 3. remove db from globalStateMgr - idToDb.remove(db.getId()); - fullNameToDb.remove(db.getFullName()); - - // 4. drop mv task - TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager(); - TGetTasksParams tasksParams = new TGetTasksParams(); - tasksParams.setDb(dbName); - List dropTaskIdList = taskManager.filterTasks(tasksParams) - .stream().map(Task::getId).collect(Collectors.toList()); - taskManager.dropTasks(dropTaskIdList, false); - - DropDbInfo info = new DropDbInfo(db.getFullName(), isForceDrop); - GlobalStateMgr.getCurrentState().getEditLog().logDropDb(info); - - // 5. Drop Pipes - PipeManager pipeManager = GlobalStateMgr.getCurrentState().getPipeManager(); - pipeManager.dropPipesOfDb(dbName, db.getId()); + public void dropDb(Database db, boolean isForceDrop) { + // 3. remove db from globalStateMgr + idToDb.remove(db.getId()); + fullNameToDb.remove(db.getFullName()); + unprotectDropDb(db, isForceDrop, false); - LOG.info("finish drop database[{}], id: {}, is force : {}", dbName, db.getId(), isForceDrop); - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } + DropDbInfo info = new DropDbInfo(db.getFullName(), isForceDrop); + GlobalStateMgr.getCurrentState().getEditLog().logDropDb(info); } @NotNull @@ -517,8 +269,8 @@ public void unprotectDropDb(Database db, boolean isForeDrop, boolean isReplay) { } } - public void replayDropDb(String dbName, boolean isForceDrop) throws DdlException { - tryLock(true); + public void replayDropDb(String dbName, boolean isForceDrop) { + GlobalStateMgr.getCurrentState().tryLock(true); try { Database db = fullNameToDb.get(dbName); Locker locker = new Locker(); @@ -526,11 +278,7 @@ public void replayDropDb(String dbName, boolean isForceDrop) throws DdlException try { Set tableNames = new HashSet(db.getTableNamesViewWithLock()); unprotectDropDb(db, isForceDrop, true); - if (!isForceDrop) { - recycleBin.recycleDatabase(db, tableNames); - } else { - stateMgr.getLocalMetastore().onEraseDatabase(db.getId()); - } + GlobalStateMgr.getCurrentState().getRecycleBin().recycleDatabase(db, tableNames, isForceDrop); db.setExist(false); } finally { locker.unLockDatabase(db.getId(), LockType.WRITE); @@ -541,144 +289,37 @@ public void replayDropDb(String dbName, boolean isForceDrop) throws DdlException LOG.info("finish replay drop db, name: {}, id: {}", dbName, db.getId()); } finally { - unlock(); - } - } - - public void recoverDatabase(RecoverDbStmt recoverStmt) throws DdlException { - // check is new db with same name already exist - if (getDb(recoverStmt.getDbName()) != null) { - throw new DdlException("Database[" + recoverStmt.getDbName() + "] already exist."); - } - - Database db = recycleBin.recoverDatabase(recoverStmt.getDbName()); - - // add db to globalStateMgr - if (!tryLock(false)) { - throw new DdlException("Failed to acquire globalStateMgr lock. Try again"); - } - try { - if (fullNameToDb.containsKey(db.getFullName())) { - throw new DdlException("Database[" + db.getOriginName() + "] already exist."); - // it's ok that we do not put db back to CatalogRecycleBin - // cause this db cannot recover anymore - } - - fullNameToDb.put(db.getFullName(), db); - idToDb.put(db.getId(), db); - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - db.setExist(true); - locker.unLockDatabase(db.getId(), LockType.WRITE); - - List materializedViews = db.getMaterializedViews(); - TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager(); - for (MaterializedView materializedView : materializedViews) { - MaterializedView.RefreshType refreshType = materializedView.getRefreshScheme().getType(); - if (refreshType != MaterializedView.RefreshType.SYNC) { - Task task = TaskBuilder.buildMvTask(materializedView, db.getFullName()); - TaskBuilder.updateTaskInfo(task, materializedView); - taskManager.createTask(task, false); - } - } - - // log - RecoverInfo recoverInfo = new RecoverInfo(db.getId(), -1L, -1L); - GlobalStateMgr.getCurrentState().getEditLog().logRecoverDb(recoverInfo); - } finally { - unlock(); + GlobalStateMgr.getCurrentState().unlock(); } - - LOG.info("finish recover database, name: {}, id: {}", recoverStmt.getDbName(), db.getId()); } - public void recoverTable(RecoverTableStmt recoverStmt) throws DdlException { - String dbName = recoverStmt.getDbName(); - - Database db = null; - if ((db = getDb(dbName)) == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); - } - - String tableName = recoverStmt.getTableName(); - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - Table table = getTable(db.getFullName(), tableName); - if (table != null) { - ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName); - } - - if (!recycleBin.recoverTable(db, tableName)) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName); - } - - Table recoverTable = getTable(db.getFullName(), tableName); - if (recoverTable instanceof OlapTable) { - DynamicPartitionUtil.registerOrRemovePartitionScheduleInfo(db.getId(), (OlapTable) recoverTable); - } - - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } + @Override + public void recoverDatabase(Database db) { + unprotectCreateDb(db); + // log + RecoverInfo recoverInfo = new RecoverInfo(db.getId(), -1L, -1L); + GlobalStateMgr.getCurrentState().getEditLog().logRecoverDb(recoverInfo); } - public void recoverPartition(RecoverPartitionStmt recoverStmt) throws DdlException { - String dbName = recoverStmt.getDbName(); - - Database db = null; - if ((db = getDb(dbName)) == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); - } + public void replayRecoverDatabase(RecoverInfo info) { + GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); - String tableName = recoverStmt.getTableName(); - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); + long dbId = info.getDbId(); + Database db = GlobalStateMgr.getCurrentState().getRecycleBin().replayRecoverDatabase(dbId); + globalStateMgr.tryLock(true); try { - Table table = getTable(db.getFullName(), tableName); - if (table == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName); - } - - if (!table.isOlapOrCloudNativeTable()) { - throw new DdlException("table[" + tableName + "] is not OLAP table or LAKE table"); - } - OlapTable olapTable = (OlapTable) table; - - String partitionName = recoverStmt.getPartitionName(); - if (olapTable.getPartition(partitionName) != null) { - throw new DdlException("partition[" + partitionName + "] already exist in table[" + tableName + "]"); - } - - recycleBin.recoverPartition(db.getId(), olapTable, partitionName); + unprotectCreateDb(db); + LOG.info("finish replay create db, name: {}, id: {}", db.getOriginName(), db.getId()); } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); + globalStateMgr.unlock(); } - } - - public void replayEraseDatabase(long dbId) { - recycleBin.replayEraseDatabase(dbId); - } - - public void replayRecoverDatabase(RecoverInfo info) { - long dbId = info.getDbId(); - Database db = recycleBin.replayRecoverDatabase(dbId); - - // add db to globalStateMgr - replayCreateDb(db); LOG.info("replay recover db[{}], name: {}", dbId, db.getOriginName()); } - public void alterDatabaseQuota(AlterDatabaseQuotaStmt stmt) throws DdlException { - String dbName = stmt.getDbName(); - Database db = getDb(dbName); - if (db == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); - } - - DatabaseInfo dbInfo = new DatabaseInfo(db.getFullName(), "", stmt.getQuota(), stmt.getQuotaType()); - GlobalStateMgr.getCurrentState().getEditLog().logAlterDb(dbInfo); + @Override + public void alterDatabaseQuota(DatabaseInfo dbInfo) { + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbInfo.getDbName()); Locker locker = new Locker(); locker.lockDatabase(db.getId(), LockType.WRITE); @@ -687,6 +328,8 @@ public void alterDatabaseQuota(AlterDatabaseQuotaStmt stmt) throws DdlException } finally { locker.unLockDatabase(db.getId(), LockType.WRITE); } + + GlobalStateMgr.getCurrentState().getEditLog().logAlterDb(dbInfo); } public void replayAlterDatabaseQuota(DatabaseInfo dbInfo) { @@ -704,54 +347,15 @@ public void replayAlterDatabaseQuota(DatabaseInfo dbInfo) { } } - public void renameDatabase(AlterDatabaseRenameStatement stmt) throws DdlException { - String fullDbName = stmt.getDbName(); - String newFullDbName = stmt.getNewDbName(); - - if (fullDbName.equals(newFullDbName)) { - throw new DdlException("Same database name"); - } - - Database db; - if (!tryLock(false)) { - throw new DdlException("Failed to acquire globalStateMgr lock. Try again"); - } - try { - // check if db exists - db = fullNameToDb.get(fullDbName); - if (db == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, fullDbName); - } - - // check if name is already used - if (fullNameToDb.get(newFullDbName) != null) { - throw new DdlException("Database name[" + newFullDbName + "] is already used"); - } - // 1. rename db - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - db.setName(newFullDbName); - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } - - // 2. add to meta. check again - fullNameToDb.remove(fullDbName); - fullNameToDb.put(newFullDbName, db); - - DatabaseInfo dbInfo = - new DatabaseInfo(fullDbName, newFullDbName, -1L, AlterDatabaseQuotaStmt.QuotaType.NONE); - GlobalStateMgr.getCurrentState().getEditLog().logDatabaseRename(dbInfo); - } finally { - unlock(); - } - - LOG.info("rename database[{}] to [{}], id: {}", fullDbName, newFullDbName, db.getId()); + @Override + public void renameDatabase(String dbName, String newDbName) { + replayRenameDatabase(dbName, newDbName); + DatabaseInfo dbInfo = new DatabaseInfo(dbName, newDbName, -1L, AlterDatabaseQuotaStmt.QuotaType.NONE); + GlobalStateMgr.getCurrentState().getEditLog().logDatabaseRename(dbInfo); } public void replayRenameDatabase(String dbName, String newDbName) { - tryLock(true); + GlobalStateMgr.getCurrentState().tryLock(true); try { Database db = fullNameToDb.get(dbName); db.setName(newDbName); @@ -760,3829 +364,1647 @@ public void replayRenameDatabase(String dbName, String newDbName) { LOG.info("replay rename database {} to {}, id: {}", dbName, newDbName, db.getId()); } finally { - unlock(); + GlobalStateMgr.getCurrentState().unlock(); } } - /** - * Following is the step to create an olap table: - * 1. create columns - * 2. create partition info - * 3. create distribution info - * 4. set table id and base index id - * 5. set bloom filter columns - * 6. set and build TableProperty includes: - * 6.1. dynamicProperty - * 6.2. replicationNum - * 6.3. inMemory - * 7. set index meta - * 8. check colocation properties - * 9. create tablet in BE - * 10. add this table to FE's meta - * 11. add this table to ColocateGroup if necessary - * - * @return whether the table is created - */ @Override - public boolean createTable(CreateTableStmt stmt) throws DdlException { - // check if db exists - Database db = getDb(stmt.getDbName()); - if (db == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, stmt.getDbName()); - } - - boolean isTemporaryTable = (stmt instanceof CreateTemporaryTableStmt); - // perform the existence check which is cheap before any further heavy operations. - // NOTE: don't even check the quota if already exists. - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.READ); - try { - String tableName = stmt.getTableName(); - if (!isTemporaryTable && getTable(db.getFullName(), tableName) != null) { - if (!stmt.isSetIfNotExists()) { - ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName); - } - LOG.info("create table[{}] which already exists", tableName); - return false; - } - } finally { - locker.unLockDatabase(db.getId(), LockType.READ); - } - - // only internal table should check quota and cluster capacity - if (!stmt.isExternal()) { - // check cluster capacity - GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().checkClusterCapacity(); - // check db quota - db.checkQuota(); - } - - AbstractTableFactory tableFactory = TableFactoryProvider.getFactory(stmt.getEngineName()); - if (tableFactory == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_UNKNOWN_STORAGE_ENGINE, stmt.getEngineName()); - } - - Table table = tableFactory.createTable(this, db, stmt); - String storageVolumeId = GlobalStateMgr.getCurrentState().getStorageVolumeMgr() - .getStorageVolumeIdOfTable(table.getId()); - - try { - onCreate(db, table, storageVolumeId, stmt.isSetIfNotExists()); - } catch (DdlException e) { - if (table.isCloudNativeTable()) { - GlobalStateMgr.getCurrentState().getStorageVolumeMgr().unbindTableToStorageVolume(table.getId()); - } - throw e; - } - return true; + public List listDbNames() { + return Lists.newArrayList(fullNameToDb.keySet()); } @Override - public void createTableLike(CreateTableLikeStmt stmt) throws DdlException { - createTable(stmt.getCreateTableStmt()); + public ConcurrentHashMap getIdToDb() { + return idToDb; } @Override - public void addPartitions(ConnectContext ctx, Database db, String tableName, AddPartitionClause addPartitionClause) - throws DdlException { - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.READ); - try { - Table table = getTable(db.getFullName(), tableName); - CatalogUtils.checkTableExist(db, tableName); - CatalogUtils.checkNativeTable(db, table); - } finally { - locker.unLockDatabase(db.getId(), LockType.READ); - } - addPartitions(ctx, db, tableName, - addPartitionClause.getResolvedPartitionDescList(), - addPartitionClause.isTempPartition(), - addPartitionClause.getDistributionDesc()); - } - - private OlapTable checkTable(Database db, String tableName) throws DdlException { - CatalogUtils.checkTableExist(db, tableName); - Table table = getTable(db.getFullName(), tableName); - CatalogUtils.checkNativeTable(db, table); - OlapTable olapTable = (OlapTable) table; - CatalogUtils.checkTableState(olapTable, tableName); - return olapTable; + public List getDbIds() { + return Lists.newArrayList(idToDb.keySet()); } - private OlapTable checkTable(Database db, Long tableId) throws DdlException { - Table table = getTable(db.getId(), tableId); - if (table == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableId); - } - CatalogUtils.checkNativeTable(db, table); - OlapTable olapTable = (OlapTable) table; - CatalogUtils.checkTableState(olapTable, table.getName()); - return olapTable; + @Override + public ConcurrentHashMap getFullNameToDb() { + return fullNameToDb; } - private void checkPartitionType(PartitionInfo partitionInfo) throws DdlException { - PartitionType partitionType = partitionInfo.getType(); - if (!partitionInfo.isRangePartition() && partitionType != PartitionType.LIST) { - throw new DdlException("Only support adding partition to range/list partitioned table"); + @Override + public Database getDb(String name) { + if (name == null) { + return null; } - } - - private DistributionInfo getDistributionInfo(OlapTable olapTable, DistributionDesc distributionDesc) - throws DdlException { - DistributionInfo distributionInfo; - List baseSchema = olapTable.getBaseSchema(); - DistributionInfo defaultDistributionInfo = olapTable.getDefaultDistributionInfo(); - if (distributionDesc != null) { - distributionInfo = distributionDesc.toDistributionInfo(baseSchema); - // for now. we only support modify distribution's bucket num - if (distributionInfo.getType() != defaultDistributionInfo.getType()) { - throw new DdlException("Cannot assign different distribution type. default is: " - + defaultDistributionInfo.getType()); - } - - if (distributionInfo.getType() == DistributionInfo.DistributionInfoType.HASH) { - HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo; - List newDistriCols = MetaUtils.getColumnsByColumnIds(olapTable, - hashDistributionInfo.getDistributionColumns()); - List defaultDistriCols = MetaUtils.getColumnsByColumnIds(olapTable, - defaultDistributionInfo.getDistributionColumns()); - if (!newDistriCols.equals(defaultDistriCols)) { - throw new DdlException("Cannot assign hash distribution with different distribution cols. " - + "default is: " + defaultDistriCols); - } - if (hashDistributionInfo.getBucketNum() < 0) { - throw new DdlException("Cannot assign hash distribution buckets less than 0"); - } - } - if (distributionInfo.getType() == DistributionInfo.DistributionInfoType.RANDOM) { - RandomDistributionInfo randomDistributionInfo = (RandomDistributionInfo) distributionInfo; - if (randomDistributionInfo.getBucketNum() < 0) { - throw new DdlException("Cannot assign random distribution buckets less than 0"); - } - } + if (fullNameToDb.containsKey(name)) { + return fullNameToDb.get(name); } else { - distributionInfo = defaultDistributionInfo; - } - return distributionInfo; - } - - private void checkColocation(Database db, OlapTable olapTable, DistributionInfo distributionInfo, - List partitionDescs) - throws DdlException { - if (colocateTableIndex.isColocateTable(olapTable.getId())) { - String fullGroupName = db.getId() + "_" + olapTable.getColocateGroup(); - ColocateGroupSchema groupSchema = colocateTableIndex.getGroupSchema(fullGroupName); - Preconditions.checkNotNull(groupSchema); - groupSchema.checkDistribution(olapTable.getIdToColumn(), distributionInfo); - for (PartitionDesc partitionDesc : partitionDescs) { - groupSchema.checkReplicationNum(partitionDesc.getReplicationNum()); + // This maybe an information_schema db request, and information_schema db name is case-insensitive. + // So, we first extract db name to check if it is information_schema. + // Then we reassemble the origin cluster name with lower case db name, + // and finally get information_schema db from the name map. + String dbName = ClusterNamespace.getNameFromFullName(name); + if (dbName.equalsIgnoreCase(InfoSchemaDb.DATABASE_NAME) || + dbName.equalsIgnoreCase(SysDb.DATABASE_NAME)) { + return fullNameToDb.get(dbName.toLowerCase()); } } + return null; } - private void checkDataProperty(List partitionDescs) { - for (PartitionDesc partitionDesc : partitionDescs) { - DataProperty dataProperty = partitionDesc.getPartitionDataProperty(); - Preconditions.checkNotNull(dataProperty); - } + @Override + public Database getDb(long dbId) { + return idToDb.get(dbId); } - private List> createPartitionMap(Database db, OlapTable copiedTable, - List partitionDescs, - HashMap> partitionNameToTabletSet, - Set tabletIdSetForAll, - Set existPartitionNameSet, - long warehouseId) - throws DdlException { - List> partitionList = Lists.newArrayList(); - for (PartitionDesc partitionDesc : partitionDescs) { - long partitionId = getNextId(); - DataProperty dataProperty = partitionDesc.getPartitionDataProperty(); - String partitionName = partitionDesc.getPartitionName(); - if (existPartitionNameSet.contains(partitionName)) { - continue; - } - Long version = partitionDesc.getVersionInfo(); - Set tabletIdSet = Sets.newHashSet(); - - copiedTable.getPartitionInfo().setDataProperty(partitionId, dataProperty); - copiedTable.getPartitionInfo().setTabletType(partitionId, partitionDesc.getTabletType()); - copiedTable.getPartitionInfo().setReplicationNum(partitionId, partitionDesc.getReplicationNum()); - copiedTable.getPartitionInfo().setIsInMemory(partitionId, partitionDesc.isInMemory()); - copiedTable.getPartitionInfo().setDataCacheInfo(partitionId, partitionDesc.getDataCacheInfo()); - - Partition partition = - createPartition(db, copiedTable, partitionId, partitionName, version, tabletIdSet, warehouseId); - - partitionList.add(Pair.create(partition, partitionDesc)); - tabletIdSetForAll.addAll(tabletIdSet); - partitionNameToTabletSet.put(partitionName, tabletIdSet); - } - return partitionList; + @Override + public void createTable(CreateTableInfo createTableInfo) { + GlobalStateMgr.getCurrentState().getEditLog().logCreateTable(createTableInfo); } - private void checkIfMetaChange(OlapTable olapTable, OlapTable copiedTable, String tableName) throws DdlException { - // rollup index may be added or dropped during add partition operation. - // schema may be changed during add partition operation. - boolean metaChanged = false; - if (olapTable.getIndexNameToId().size() != copiedTable.getIndexNameToId().size()) { - metaChanged = true; - } else { - // compare schemaHash - for (Map.Entry entry : olapTable.getIndexIdToMeta().entrySet()) { - long indexId = entry.getKey(); - if (!copiedTable.getIndexIdToMeta().containsKey(indexId)) { - metaChanged = true; - break; - } - if (copiedTable.getIndexIdToMeta().get(indexId).getSchemaHash() != - entry.getValue().getSchemaHash()) { - metaChanged = true; - break; - } - } - } - - if (olapTable.getDefaultDistributionInfo().getType() != - copiedTable.getDefaultDistributionInfo().getType()) { - metaChanged = true; - } - - if (metaChanged) { - throw new DdlException("Table[" + tableName + "]'s meta has been changed. try again."); - } - } - - private void updatePartitionInfo(PartitionInfo partitionInfo, List> partitionList, - Set existPartitionNameSet, boolean isTempPartition, - OlapTable olapTable) - throws DdlException { - if (partitionInfo instanceof RangePartitionInfo) { - RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo; - rangePartitionInfo.handleNewRangePartitionDescs(olapTable.getIdToColumn(), - partitionList, existPartitionNameSet, isTempPartition); - } else if (partitionInfo instanceof ListPartitionInfo) { - ListPartitionInfo listPartitionInfo = (ListPartitionInfo) partitionInfo; - listPartitionInfo.handleNewListPartitionDescs(olapTable.getIdToColumn(), - partitionList, existPartitionNameSet, isTempPartition); - } else { - throw new DdlException("Only support adding partition to range/list partitioned table"); - } - - if (isTempPartition) { - for (Pair entry : partitionList) { - Partition partition = entry.first; - if (!existPartitionNameSet.contains(partition.getName())) { - olapTable.addTempPartition(partition); - } - } - } else { - for (Pair entry : partitionList) { - Partition partition = entry.first; - if (!existPartitionNameSet.contains(partition.getName())) { - olapTable.addPartition(partition); - } - } - } - } - - private void addRangePartitionLog(Database db, OlapTable olapTable, List partitionDescs, - boolean isTempPartition, PartitionInfo partitionInfo, - List partitionList, Set existPartitionNameSet) { - int partitionLen = partitionList.size(); - List partitionInfoV2List = Lists.newArrayListWithCapacity(partitionLen); - if (partitionLen == 1) { - Partition partition = partitionList.get(0); - if (existPartitionNameSet.contains(partition.getName())) { - LOG.info("add partition[{}] which already exists", partition.getName()); - return; - } - PartitionPersistInfoV2 info = new RangePartitionPersistInfo(db.getId(), olapTable.getId(), partition, - partitionDescs.get(0).getPartitionDataProperty(), - partitionInfo.getReplicationNum(partition.getId()), - partitionInfo.getIsInMemory(partition.getId()), isTempPartition, - ((RangePartitionInfo) partitionInfo).getRange(partition.getId()), - ((SingleRangePartitionDesc) partitionDescs.get(0)).getDataCacheInfo()); - partitionInfoV2List.add(info); - AddPartitionsInfoV2 infos = new AddPartitionsInfoV2(partitionInfoV2List); - GlobalStateMgr.getCurrentState().getEditLog().logAddPartitions(infos); - - LOG.info("succeed in creating partition[{}], name: {}, temp: {}", partition.getId(), - partition.getName(), isTempPartition); - } else { - for (int i = 0; i < partitionLen; i++) { - Partition partition = partitionList.get(i); - if (!existPartitionNameSet.contains(partition.getName())) { - PartitionPersistInfoV2 info = new RangePartitionPersistInfo(db.getId(), olapTable.getId(), - partition, partitionDescs.get(i).getPartitionDataProperty(), - partitionInfo.getReplicationNum(partition.getId()), - partitionInfo.getIsInMemory(partition.getId()), isTempPartition, - ((RangePartitionInfo) partitionInfo).getRange(partition.getId()), - ((SingleRangePartitionDesc) partitionDescs.get(i)).getDataCacheInfo()); - - partitionInfoV2List.add(info); - } - } - - AddPartitionsInfoV2 infos = new AddPartitionsInfoV2(partitionInfoV2List); - GlobalStateMgr.getCurrentState().getEditLog().logAddPartitions(infos); - - for (PartitionPersistInfoV2 infoV2 : partitionInfoV2List) { - LOG.info("succeed in creating partition[{}], name: {}, temp: {}", infoV2.getPartition().getId(), - infoV2.getPartition().getName(), isTempPartition); - } - } - } - - @VisibleForTesting - public void addListPartitionLog(Database db, OlapTable olapTable, List partitionDescs, - boolean isTempPartition, PartitionInfo partitionInfo, - List partitionList, Set existPartitionNameSet) - throws DdlException { - if (partitionList == null) { - throw new DdlException("partitionList should not null"); - } else if (partitionList.size() == 0) { - return; - } - - // TODO: add only 1 log for multi list partition - int i = 0; - for (Partition partition : partitionList) { - if (existPartitionNameSet.contains(partition.getName())) { - LOG.info("add partition[{}] which already exists", partition.getName()); - continue; - } - long partitionId = partition.getId(); - PartitionPersistInfoV2 info = new ListPartitionPersistInfo(db.getId(), olapTable.getId(), partition, - partitionDescs.get(i).getPartitionDataProperty(), - partitionInfo.getReplicationNum(partitionId), - partitionInfo.getIsInMemory(partitionId), - isTempPartition, - ((ListPartitionInfo) partitionInfo).getIdToValues().get(partitionId), - ((ListPartitionInfo) partitionInfo).getIdToMultiValues().get(partitionId), - partitionDescs.get(i).getDataCacheInfo()); - GlobalStateMgr.getCurrentState().getEditLog().logAddPartition(info); - LOG.info("succeed in creating list partition[{}], name: {}, temp: {}", partitionId, - partition.getName(), isTempPartition); - i++; - } - } - - private void addPartitionLog(Database db, OlapTable olapTable, List partitionDescs, - boolean isTempPartition, PartitionInfo partitionInfo, - List partitionList, Set existPartitionNameSet) - throws DdlException { - PartitionType partitionType = partitionInfo.getType(); - if (partitionInfo.isRangePartition()) { - addRangePartitionLog(db, olapTable, partitionDescs, isTempPartition, partitionInfo, partitionList, - existPartitionNameSet); - } else if (partitionType == PartitionType.LIST) { - addListPartitionLog(db, olapTable, partitionDescs, isTempPartition, partitionInfo, partitionList, - existPartitionNameSet); - } else { - throw new DdlException("Only support adding partition log to range/list partitioned table"); - } - } - - private void addSubPartitionLog(Database db, OlapTable olapTable, Partition partition, - List subPartitioins) throws DdlException { - List partitionInfoV2List = Lists.newArrayList(); - for (PhysicalPartition subPartition : subPartitioins) { - if (subPartition instanceof PhysicalPartitionImpl) { - PhysicalPartitionPersistInfoV2 info = new PhysicalPartitionPersistInfoV2(db.getId(), olapTable.getId(), - partition.getId(), (PhysicalPartitionImpl) subPartition); - partitionInfoV2List.add(info); - } - } - - AddSubPartitionsInfoV2 infos = new AddSubPartitionsInfoV2(partitionInfoV2List); - GlobalStateMgr.getCurrentState().getEditLog().logAddSubPartitions(infos); - - for (PhysicalPartition subPartition : subPartitioins) { - LOG.info("succeed in creating sub partitions[{}]", subPartition); - } - - } - - private void cleanExistPartitionNameSet(Set existPartitionNameSet, - HashMap> partitionNameToTabletSet) { - for (String partitionName : existPartitionNameSet) { - Set existPartitionTabletSet = partitionNameToTabletSet.get(partitionName); - if (existPartitionTabletSet == null) { - // should not happen - continue; - } - for (Long tabletId : existPartitionTabletSet) { - // createPartitionWithIndices create duplicate tablet that if not exists scenario - // so here need to clean up those created tablets which partition already exists from invert index - GlobalStateMgr.getCurrentState().getTabletInvertedIndex().deleteTablet(tabletId); - } - } - } - - private void cleanTabletIdSetForAll(Set tabletIdSetForAll) { - // Cleanup of shards for LakeTable is taken care by ShardDeleter - for (Long tabletId : tabletIdSetForAll) { - GlobalStateMgr.getCurrentState().getTabletInvertedIndex().deleteTablet(tabletId); - } - } - - private void checkPartitionNum(OlapTable olapTable) throws DdlException { - if (olapTable.getNumberOfPartitions() > Config.max_partition_number_per_table) { - throw new DdlException("Table " + olapTable.getName() + " created partitions exceeded the maximum limit: " + - Config.max_partition_number_per_table + ". You can modify this restriction on by setting" + - " max_partition_number_per_table larger."); - } - } - - private void addPartitions(ConnectContext ctx, Database db, String tableName, List partitionDescs, - boolean isTempPartition, DistributionDesc distributionDesc) throws DdlException { - DistributionInfo distributionInfo; - OlapTable olapTable; - OlapTable copiedTable; - - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.READ); - Set checkExistPartitionName = Sets.newConcurrentHashSet(); - try { - olapTable = checkTable(db, tableName); - - // get partition info - PartitionInfo partitionInfo = olapTable.getPartitionInfo(); - - // check partition type - checkPartitionType(partitionInfo); - - // check partition num - checkPartitionNum(olapTable); - - // get distributionInfo - distributionInfo = getDistributionInfo(olapTable, distributionDesc).copy(); - olapTable.inferDistribution(distributionInfo); - - // check colocation - checkColocation(db, olapTable, distributionInfo, partitionDescs); - copiedTable = getShadowCopyTable(olapTable); - copiedTable.setDefaultDistributionInfo(distributionInfo); - checkExistPartitionName = CatalogUtils.checkPartitionNameExistForAddPartitions(olapTable, partitionDescs); - } finally { - locker.unLockDatabase(db.getId(), LockType.READ); - } - - Preconditions.checkNotNull(distributionInfo); - Preconditions.checkNotNull(olapTable); - Preconditions.checkNotNull(copiedTable); - - // create partition outside db lock - checkDataProperty(partitionDescs); - - Set tabletIdSetForAll = Sets.newHashSet(); - HashMap> partitionNameToTabletSet = Maps.newHashMap(); - try { - // create partition list - List> newPartitions = - createPartitionMap(db, copiedTable, partitionDescs, partitionNameToTabletSet, tabletIdSetForAll, - checkExistPartitionName, ctx.getCurrentWarehouseId()); - - // build partitions - List partitionList = newPartitions.stream().map(x -> x.first).collect(Collectors.toList()); - buildPartitions(db, copiedTable, partitionList.stream().map(Partition::getSubPartitions) - .flatMap(p -> p.stream()).collect(Collectors.toList()), ctx.getCurrentWarehouseId()); - - // check again - if (!locker.lockDatabaseAndCheckExist(db, LockType.WRITE)) { - throw new DdlException("db " + db.getFullName() - + "(" + db.getId() + ") has been dropped"); - } - Set existPartitionNameSet = Sets.newHashSet(); - try { - olapTable = checkTable(db, tableName); - existPartitionNameSet = CatalogUtils.checkPartitionNameExistForAddPartitions(olapTable, - partitionDescs); - if (existPartitionNameSet.size() > 0) { - for (String partitionName : existPartitionNameSet) { - LOG.info("add partition[{}] which already exists", partitionName); - } - } - - // check if meta changed - checkIfMetaChange(olapTable, copiedTable, tableName); - - // get partition info - PartitionInfo partitionInfo = olapTable.getPartitionInfo(); - - // check partition type - checkPartitionType(partitionInfo); - - // update partition info - updatePartitionInfo(partitionInfo, newPartitions, existPartitionNameSet, isTempPartition, olapTable); - - try { - colocateTableIndex.updateLakeTableColocationInfo(olapTable, true /* isJoin */, - null /* expectGroupId */); - } catch (DdlException e) { - LOG.info("table {} update colocation info failed when add partition, {}", olapTable.getId(), e.getMessage()); - } - - // add partition log - addPartitionLog(db, olapTable, partitionDescs, isTempPartition, partitionInfo, partitionList, - existPartitionNameSet); - } finally { - cleanExistPartitionNameSet(existPartitionNameSet, partitionNameToTabletSet); - locker.unLockDatabase(db.getId(), LockType.WRITE); - } - } catch (DdlException e) { - cleanTabletIdSetForAll(tabletIdSetForAll); - throw e; - } - } - - public void replayAddPartition(PartitionPersistInfoV2 info) throws DdlException { - Database db = this.getDb(info.getDbId()); - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - OlapTable olapTable = (OlapTable) getTable(db.getId(), info.getTableId()); - Partition partition = info.getPartition(); - - PartitionInfo partitionInfo = olapTable.getPartitionInfo(); - if (info.isTempPartition()) { - olapTable.addTempPartition(partition); - } else { - olapTable.addPartition(partition); - } - - PartitionType partitionType = partitionInfo.getType(); - if (partitionType == PartitionType.LIST) { - try { - ((ListPartitionInfo) partitionInfo).unprotectHandleNewPartitionDesc( - olapTable.getIdToColumn(), info.asListPartitionPersistInfo()); - } catch (AnalysisException e) { - throw new DdlException(e.getMessage()); - } - } else if (partitionInfo.isRangePartition()) { - ((RangePartitionInfo) partitionInfo).unprotectHandleNewSinglePartitionDesc( - info.asRangePartitionPersistInfo()); - } else if (partitionType == PartitionType.UNPARTITIONED) { - // insert overwrite job will create temp partition and replace the single partition. - partitionInfo.addPartition(partition.getId(), info.getDataProperty(), info.getReplicationNum(), - info.isInMemory(), info.getDataCacheInfo()); - } else { - throw new DdlException("Unsupported partition type: " + partitionType.name()); - } - - if (!isCheckpointThread()) { - // add to inverted index - TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { - long indexId = index.getId(); - int schemaHash = olapTable.getSchemaHashByIndexId(indexId); - TabletMeta tabletMeta = new TabletMeta(info.getDbId(), info.getTableId(), partition.getId(), - index.getId(), schemaHash, info.getDataProperty().getStorageMedium()); - for (Tablet tablet : index.getTablets()) { - long tabletId = tablet.getId(); - invertedIndex.addTablet(tabletId, tabletMeta); - // modify some logic - if (tablet instanceof LocalTablet) { - for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { - invertedIndex.addReplica(tabletId, replica); - } - } - } - } - } - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } - } - - public void replayAddPartition(PartitionPersistInfo info) throws DdlException { - Database db = this.getDb(info.getDbId()); - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - OlapTable olapTable = (OlapTable) getTable(db.getId(), info.getTableId()); - Partition partition = info.getPartition(); - - PartitionInfo partitionInfo = olapTable.getPartitionInfo(); - if (info.isTempPartition()) { - olapTable.addTempPartition(partition); - } else { - olapTable.addPartition(partition); - } - - if (partitionInfo.isRangePartition()) { - ((RangePartitionInfo) partitionInfo).unprotectHandleNewSinglePartitionDesc(partition.getId(), - info.isTempPartition(), info.getRange(), info.getDataProperty(), info.getReplicationNum(), - info.isInMemory()); - } else { - partitionInfo.addPartition( - partition.getId(), info.getDataProperty(), info.getReplicationNum(), info.isInMemory()); - } - if (!isCheckpointThread()) { - // add to inverted index - TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { - long indexId = index.getId(); - int schemaHash = olapTable.getSchemaHashByIndexId(indexId); - TabletMeta tabletMeta = new TabletMeta(info.getDbId(), info.getTableId(), partition.getId(), - index.getId(), schemaHash, info.getDataProperty().getStorageMedium()); - for (Tablet tablet : index.getTablets()) { - long tabletId = tablet.getId(); - invertedIndex.addTablet(tabletId, tabletMeta); - for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { - invertedIndex.addReplica(tabletId, replica); - } - } - } - } - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } - } - - @Override - public void dropPartition(Database db, Table table, DropPartitionClause clause) throws DdlException { - CatalogUtils.checkTableExist(db, table.getName()); - Locker locker = new Locker(); - OlapTable olapTable = (OlapTable) table; - Preconditions.checkArgument(locker.isDbWriteLockHeldByCurrentThread(db)); - PartitionInfo partitionInfo = olapTable.getPartitionInfo(); - - if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) { - throw InvalidOlapTableStateException.of(olapTable.getState(), olapTable.getName()); - } - if (!partitionInfo.isRangePartition() && partitionInfo.getType() != PartitionType.LIST) { - throw new DdlException("Alter table [" + olapTable.getName() + "] failed. Not a partitioned table"); - } - boolean isTempPartition = clause.isTempPartition(); - - List existPartitions = Lists.newArrayList(); - List notExistPartitions = Lists.newArrayList(); - for (String partitionName : clause.getResolvedPartitionNames()) { - if (olapTable.checkPartitionNameExist(partitionName, isTempPartition)) { - existPartitions.add(partitionName); - } else { - notExistPartitions.add(partitionName); - } - } - if (CollectionUtils.isNotEmpty(notExistPartitions)) { - if (clause.isSetIfExists()) { - LOG.info("drop partition[{}] which does not exist", notExistPartitions); - } else { - ErrorReport.reportDdlException(ErrorCode.ERR_DROP_PARTITION_NON_EXISTENT, notExistPartitions); - } - } - if (CollectionUtils.isEmpty(existPartitions)) { - return; - } - for (String partitionName : existPartitions) { - // drop - if (isTempPartition) { - olapTable.dropTempPartition(partitionName, true); - } else { - Partition partition = olapTable.getPartition(partitionName); - if (!clause.isForceDrop()) { - if (partition != null) { - if (stateMgr.getGlobalTransactionMgr() - .existCommittedTxns(db.getId(), olapTable.getId(), partition.getId())) { - throw new DdlException( - "There are still some transactions in the COMMITTED state waiting to be completed." + - " The partition [" + partitionName + - "] cannot be dropped. If you want to forcibly drop(cannot be recovered)," + - " please use \"DROP PARTITION FORCE\"."); - } - } - } - Range partitionRange = null; - if (partition != null) { - GlobalStateMgr.getCurrentState().getAnalyzeMgr().recordDropPartition(partition.getId()); - if (partitionInfo instanceof RangePartitionInfo) { - partitionRange = ((RangePartitionInfo) partitionInfo).getRange(partition.getId()); - } - } - - olapTable.dropPartition(db.getId(), partitionName, clause.isForceDrop()); - if (olapTable instanceof MaterializedView) { - MaterializedView mv = (MaterializedView) olapTable; - SyncPartitionUtils.dropBaseVersionMeta(mv, partitionName, partitionRange); - } - } - } - if (!isTempPartition) { - try { - for (MvId mvId : olapTable.getRelatedMaterializedViews()) { - MaterializedView materializedView = (MaterializedView) getTable(db.getId(), mvId.getId()); - if (materializedView != null && materializedView.isLoadTriggeredRefresh()) { - GlobalStateMgr.getCurrentState().getLocalMetastore().refreshMaterializedView( - db.getFullName(), materializedView.getName(), false, null, - Constants.TaskRunPriority.NORMAL.value(), true, false); - } - } - } catch (MetaNotFoundException e) { - throw new DdlException("fail to refresh materialized views when dropping partition", e); - } - } - long dbId = db.getId(); - long tableId = olapTable.getId(); - EditLog editLog = GlobalStateMgr.getCurrentState().getEditLog(); - - if (clause.getPartitionName() != null) { - String partitionName = clause.getPartitionName(); - DropPartitionInfo info = new DropPartitionInfo(dbId, tableId, partitionName, isTempPartition, clause.isForceDrop()); - editLog.logDropPartition(info); - LOG.info("succeed in dropping partition[{}], is temp : {}, is force : {}", partitionName, isTempPartition, - clause.isForceDrop()); - } else { - DropPartitionsInfo info = - new DropPartitionsInfo(dbId, tableId, isTempPartition, clause.isForceDrop(), existPartitions); - editLog.logDropPartitions(info); - LOG.info("succeed in dropping partitions[{}], is temp : {}, is force : {}", existPartitions, isTempPartition, - clause.isForceDrop()); - } - - } - - public void replayDropPartition(DropPartitionInfo info) { - Database db = this.getDb(info.getDbId()); - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - OlapTable olapTable = (OlapTable) getTable(db.getId(), info.getTableId()); - if (info.isTempPartition()) { - olapTable.dropTempPartition(info.getPartitionName(), true); - } else { - olapTable.dropPartition(info.getDbId(), info.getPartitionName(), info.isForceDrop()); - } - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } - } - - public void replayDropPartitions(DropPartitionsInfo info) { - Database db = this.getDb(info.getDbId()); - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - LOG.info("Begin to unprotect drop partitions. db = " + info.getDbId() - + " table = " + info.getTableId() - + " partitionNames = " + info.getPartitionNames()); - List partitionNames = info.getPartitionNames(); - OlapTable olapTable = (OlapTable) getTable(db.getId(), info.getTableId()); - boolean isTempPartition = info.isTempPartition(); - long dbId = info.getDbId(); - boolean isForceDrop = info.isForceDrop(); - partitionNames.stream().forEach(partitionName -> { - if (isTempPartition) { - olapTable.dropTempPartition(partitionName, true); - } else { - olapTable.dropPartition(dbId, partitionName, isForceDrop); - } - }); - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } - } - - public void replayErasePartition(long partitionId) throws DdlException { - recycleBin.replayErasePartition(partitionId); - } - - public void replayRecoverPartition(RecoverInfo info) { - long dbId = info.getDbId(); - Database db = getDb(dbId); - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - Table table = getTable(db.getId(), info.getTableId()); - recycleBin.replayRecoverPartition((OlapTable) table, info.getPartitionId()); - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } - } - - private PhysicalPartition createPhysicalPartition(String name, Database db, OlapTable olapTable, - Partition partition, long warehouseId) throws DdlException { - long partitionId = partition.getId(); - DistributionInfo distributionInfo = olapTable.getDefaultDistributionInfo().copy(); - olapTable.inferDistribution(distributionInfo); - // create sub partition - Map indexMap = new HashMap<>(); - for (long indexId : olapTable.getIndexIdToMeta().keySet()) { - MaterializedIndex rollup = new MaterializedIndex(indexId, MaterializedIndex.IndexState.NORMAL); - indexMap.put(indexId, rollup); - } - - Long id = GlobalStateMgr.getCurrentState().getNextId(); - // physical partitions in the same logical partition use the same shard_group_id, - // so that the shards of this logical partition are more evenly distributed. - long shardGroupId = partition.getShardGroupId(); - - if (name == null) { - name = partition.generatePhysicalPartitionName(id); - } - PhysicalPartitionImpl physicalParition = new PhysicalPartitionImpl( - id, name, partition.getId(), shardGroupId, indexMap.get(olapTable.getBaseIndexId())); - - PartitionInfo partitionInfo = olapTable.getPartitionInfo(); - short replicationNum = partitionInfo.getReplicationNum(partitionId); - TStorageMedium storageMedium = partitionInfo.getDataProperty(partitionId).getStorageMedium(); - for (Map.Entry entry : indexMap.entrySet()) { - long indexId = entry.getKey(); - MaterializedIndex index = entry.getValue(); - MaterializedIndexMeta indexMeta = olapTable.getIndexIdToMeta().get(indexId); - Set tabletIdSet = new HashSet<>(); - - // create tablets - TabletMeta tabletMeta = - new TabletMeta(db.getId(), olapTable.getId(), id, indexId, indexMeta.getSchemaHash(), - storageMedium, olapTable.isCloudNativeTableOrMaterializedView()); - - if (olapTable.isCloudNativeTableOrMaterializedView()) { - createLakeTablets(olapTable, id, shardGroupId, index, distributionInfo, - tabletMeta, tabletIdSet, warehouseId); - } else { - createOlapTablets(olapTable, index, Replica.ReplicaState.NORMAL, distributionInfo, - physicalParition.getVisibleVersion(), replicationNum, tabletMeta, tabletIdSet); - } - if (index.getId() != olapTable.getBaseIndexId()) { - // add rollup index to partition - physicalParition.createRollupIndex(index); - } - } - - return physicalParition; - } - - public void addSubPartitions(Database db, OlapTable table, Partition partition, - int numSubPartition, long warehouseId) throws DdlException { - try { - table.setAutomaticBucketing(true); - addSubPartitions(db, table, partition, numSubPartition, null, warehouseId); - } finally { - table.setAutomaticBucketing(false); - } - } - - private void addSubPartitions(Database db, OlapTable table, Partition partition, - int numSubPartition, String[] subPartitionNames, long warehouseId) throws DdlException { - OlapTable olapTable; - OlapTable copiedTable; - - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.READ); - try { - olapTable = checkTable(db, table.getId()); - - if (partition.getDistributionInfo().getType() != DistributionInfo.DistributionInfoType.RANDOM) { - throw new DdlException("Only support adding physical partition to random distributed table"); - } - - copiedTable = getShadowCopyTable(olapTable); - } finally { - locker.unLockDatabase(db.getId(), LockType.READ); - } - - Preconditions.checkNotNull(olapTable); - Preconditions.checkNotNull(copiedTable); - - List subPartitions = new ArrayList<>(); - // create physical partition - for (int i = 0; i < numSubPartition; i++) { - String name = subPartitionNames != null && subPartitionNames.length > i ? subPartitionNames[i] : null; - PhysicalPartition subPartition = createPhysicalPartition(name, db, copiedTable, partition, warehouseId); - subPartitions.add(subPartition); - } - - // build partitions - buildPartitions(db, copiedTable, subPartitions, warehouseId); - - // check again - if (!locker.lockDatabaseAndCheckExist(db, LockType.WRITE)) { - throw new DdlException("db " + db.getFullName() - + "(" + db.getId() + ") has been dropped"); - } - try { - // check if meta changed - checkIfMetaChange(olapTable, copiedTable, table.getName()); - - for (PhysicalPartition subPartition : subPartitions) { - // add sub partition - partition.addSubPartition(subPartition); - olapTable.addPhysicalPartition(subPartition); - } - - olapTable.setShardGroupChanged(true); - - // add partition log - addSubPartitionLog(db, olapTable, partition, subPartitions); - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } - } - - public void replayAddSubPartition(PhysicalPartitionPersistInfoV2 info) throws DdlException { - Database db = this.getDb(info.getDbId()); - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - OlapTable olapTable = (OlapTable) getTable(db.getId(), info.getTableId()); - Partition partition = olapTable.getPartition(info.getPartitionId()); - PhysicalPartition physicalPartition = info.getPhysicalPartition(); - partition.addSubPartition(physicalPartition); - olapTable.addPhysicalPartition(physicalPartition); - - if (!isCheckpointThread()) { - // add to inverted index - TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); - for (MaterializedIndex index : physicalPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { - long indexId = index.getId(); - int schemaHash = olapTable.getSchemaHashByIndexId(indexId); - TabletMeta tabletMeta = new TabletMeta(info.getDbId(), info.getTableId(), info.getPartitionId(), - physicalPartition.getId(), index.getId(), schemaHash, olapTable.getPartitionInfo().getDataProperty( - info.getPartitionId()).getStorageMedium(), false); - for (Tablet tablet : index.getTablets()) { - long tabletId = tablet.getId(); - invertedIndex.addTablet(tabletId, tabletMeta); - // modify some logic - if (tablet instanceof LocalTablet) { - for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { - invertedIndex.addReplica(tabletId, replica); - } - } - } - } - } - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } - } - - Partition createPartition(Database db, OlapTable table, long partitionId, String partitionName, - Long version, Set tabletIdSet, long warehouseId) throws DdlException { - DistributionInfo distributionInfo = table.getDefaultDistributionInfo().copy(); - table.inferDistribution(distributionInfo); - - return createPartition(db, table, partitionId, partitionName, version, tabletIdSet, distributionInfo, warehouseId); - } - - Partition createPartition(Database db, OlapTable table, long partitionId, String partitionName, - Long version, Set tabletIdSet, DistributionInfo distributionInfo, - long warehouseId) throws DdlException { - PartitionInfo partitionInfo = table.getPartitionInfo(); - Map indexMap = new HashMap<>(); - for (long indexId : table.getIndexIdToMeta().keySet()) { - MaterializedIndex rollup = new MaterializedIndex(indexId, MaterializedIndex.IndexState.NORMAL); - indexMap.put(indexId, rollup); - } - - // create shard group - long shardGroupId = 0; - if (table.isCloudNativeTableOrMaterializedView()) { - shardGroupId = GlobalStateMgr.getCurrentState().getStarOSAgent(). - createShardGroup(db.getId(), table.getId(), partitionId); - } - - Partition partition = - new Partition(partitionId, partitionName, indexMap.get(table.getBaseIndexId()), - distributionInfo, shardGroupId); - // version - if (version != null) { - partition.updateVisibleVersion(version); - } - - short replicationNum = partitionInfo.getReplicationNum(partitionId); - TStorageMedium storageMedium = partitionInfo.getDataProperty(partitionId).getStorageMedium(); - for (Map.Entry entry : indexMap.entrySet()) { - long indexId = entry.getKey(); - MaterializedIndex index = entry.getValue(); - MaterializedIndexMeta indexMeta = table.getIndexIdToMeta().get(indexId); - - // create tablets - TabletMeta tabletMeta = - new TabletMeta(db.getId(), table.getId(), partitionId, indexId, indexMeta.getSchemaHash(), - storageMedium, table.isCloudNativeTableOrMaterializedView()); - - if (table.isCloudNativeTableOrMaterializedView()) { - createLakeTablets(table, partitionId, shardGroupId, index, distributionInfo, - tabletMeta, tabletIdSet, warehouseId); - } else { - createOlapTablets(table, index, Replica.ReplicaState.NORMAL, distributionInfo, - partition.getVisibleVersion(), replicationNum, tabletMeta, tabletIdSet); - } - if (index.getId() != table.getBaseIndexId()) { - // add rollup index to partition - partition.createRollupIndex(index); - } - } - return partition; - } - - void buildPartitions(Database db, OlapTable table, List partitions, long warehouseId) - throws DdlException { - if (partitions.isEmpty()) { - return; - } - int numAliveNodes = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getAliveBackendNumber(); - - if (RunMode.isSharedDataMode()) { - numAliveNodes = 0; - List computeNodeIds = GlobalStateMgr.getCurrentState().getWarehouseMgr().getAllComputeNodeIds(warehouseId); - for (long nodeId : computeNodeIds) { - if (GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getBackendOrComputeNode(nodeId).isAlive()) { - ++numAliveNodes; - } - } - } - if (numAliveNodes == 0) { - if (RunMode.isSharedDataMode()) { - throw new DdlException("no alive compute nodes"); - } else { - throw new DdlException("no alive backends"); - } - } - - int numReplicas = 0; - for (PhysicalPartition partition : partitions) { - numReplicas += partition.storageReplicaCount(); - } - - try { - GlobalStateMgr.getCurrentState().getConsistencyChecker().addCreatingTableId(table.getId()); - if (numReplicas > Config.create_table_max_serial_replicas) { - LOG.info("start to build {} partitions concurrently for table {}.{} with {} replicas", - partitions.size(), db.getFullName(), table.getName(), numReplicas); - TabletTaskExecutor.buildPartitionsConcurrently( - db.getId(), table, partitions, numReplicas, numAliveNodes, warehouseId); - } else { - LOG.info("start to build {} partitions sequentially for table {}.{} with {} replicas", - partitions.size(), db.getFullName(), table.getName(), numReplicas); - TabletTaskExecutor.buildPartitionsSequentially( - db.getId(), table, partitions, numReplicas, numAliveNodes, warehouseId); - } - } finally { - GlobalStateMgr.getCurrentState().getConsistencyChecker().deleteCreatingTableId(table.getId()); - } - } - - /* - * generate and check columns' order and key's existence - */ - void validateColumns(List columns) throws DdlException { - if (columns.isEmpty()) { - ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_MUST_HAVE_COLUMNS); - } - - boolean encounterValue = false; - boolean hasKey = false; - for (Column column : columns) { - if (column.isKey()) { - if (encounterValue) { - ErrorReport.reportDdlException(ErrorCode.ERR_OLAP_KEY_MUST_BEFORE_VALUE); - } - hasKey = true; - } else { - encounterValue = true; - } - } - - if (!hasKey) { - ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_MUST_HAVE_KEYS); - } - } - - // only for test - public void setColocateTableIndex(ColocateTableIndex colocateTableIndex) { - this.colocateTableIndex = colocateTableIndex; - } - - public ColocateTableIndex getColocateTableIndex() { - return colocateTableIndex; - } - - public void setLakeStorageInfo(Database db, OlapTable table, String storageVolumeId, Map properties) - throws DdlException { - DataCacheInfo dataCacheInfo = null; - try { - dataCacheInfo = PropertyAnalyzer.analyzeDataCacheInfo(properties); - } catch (AnalysisException e) { - throw new DdlException(e.getMessage()); - } - - // get service shard storage info from StarMgr - FilePathInfo pathInfo = !storageVolumeId.isEmpty() ? - stateMgr.getStarOSAgent().allocateFilePath(storageVolumeId, db.getId(), table.getId()) : - stateMgr.getStarOSAgent().allocateFilePath(db.getId(), table.getId()); - table.setStorageInfo(pathInfo, dataCacheInfo); - } - - public void onCreate(Database db, Table table, String storageVolumeId, boolean isSetIfNotExists) - throws DdlException { - // check database exists again, because database can be dropped when creating table - if (!tryLock(false)) { - throw new DdlException("Failed to acquire globalStateMgr lock. " + - "Try again or increasing value of `catalog_try_lock_timeout_ms` configuration."); - } - - try { - /* - * When creating table or mv, we need to create the tablets and prepare some of the - * metadata first before putting this new table or mv in the database. So after the - * first step, we need to acquire the global lock and double check whether the db still - * exists because it maybe dropped by other concurrent client. And if we don't use the lock - * protection and handle the concurrency properly, the replay of table/mv creation may fail - * on restart or on follower. - * - * After acquire the db lock, we also need to acquire the db lock and write edit log. Since the - * db lock maybe under high contention and IO is busy, current thread can hold the global lock - * for quite a long time and make the other operation waiting for the global lock fail. - * - * So here after the confirmation of existence of modifying database, we release the global lock - * When dropping database, we will set the `exist` field of db object to false. And in the following - * creation process, we will double-check the `exist` field. - */ - if (getDb(db.getId()) == null) { - throw new DdlException("Database has been dropped when creating table/mv/view"); - } - } finally { - unlock(); - } - - if (db.isSystemDatabase()) { - ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, table.getName(), - "cannot create table in system database"); - } - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - if (!db.isExist()) { - throw new DdlException("Database has been dropped when creating table/mv/view"); - } - - if (!db.registerTableUnlocked(table)) { - if (!isSetIfNotExists) { - table.delete(db.getId(), false); - ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, table.getName(), - "table already exists"); - } else { - LOG.info("Create table[{}] which already exists", table.getName()); - return; - } - } - - // NOTE: The table has been added to the database, and the following procedure cannot throw exception. - LOG.info("Successfully create table: {}-{}, in database: {}-{}", - table.getName(), table.getId(), db.getFullName(), db.getId()); - - CreateTableInfo createTableInfo = new CreateTableInfo(db.getFullName(), table, storageVolumeId); - GlobalStateMgr.getCurrentState().getEditLog().logCreateTable(createTableInfo); - table.onCreate(db); - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } - } - - public void replayCreateTable(CreateTableInfo info) { - Table table = info.getTable(); - Database db = this.fullNameToDb.get(info.getDbName()); - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - db.registerTableUnlocked(table); - if (table.isTemporaryTable()) { - TemporaryTableMgr temporaryTableMgr = GlobalStateMgr.getCurrentState().getTemporaryTableMgr(); - UUID sessionId = ((OlapTable) table).getSessionId(); - temporaryTableMgr.addTemporaryTable(sessionId, db.getId(), table.getName(), table.getId()); - } - table.onReload(); - } catch (Throwable e) { - LOG.error("replay create table failed: {}", table, e); - // Rethrow, we should not eat the exception when replaying editlog. - throw e; - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } - - if (!isCheckpointThread()) { - // add to inverted index - if (table.isOlapOrCloudNativeTable() || table.isMaterializedView()) { - TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); - OlapTable olapTable = (OlapTable) table; - long dbId = db.getId(); - long tableId = table.getId(); - for (PhysicalPartition partition : olapTable.getAllPhysicalPartitions()) { - long physicalPartitionId = partition.getId(); - TStorageMedium medium = olapTable.getPartitionInfo().getDataProperty( - partition.getParentId()).getStorageMedium(); - for (MaterializedIndex mIndex : partition - .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { - long indexId = mIndex.getId(); - int schemaHash = olapTable.getSchemaHashByIndexId(indexId); - TabletMeta tabletMeta = new TabletMeta(dbId, tableId, partition.getParentId(), physicalPartitionId, - indexId, schemaHash, medium, table.isCloudNativeTableOrMaterializedView()); - for (Tablet tablet : mIndex.getTablets()) { - long tabletId = tablet.getId(); - invertedIndex.addTablet(tabletId, tabletMeta); - if (tablet instanceof LocalTablet) { - for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { - invertedIndex.addReplica(tabletId, replica); - } - } - } - } - } // end for partitions - - DynamicPartitionUtil.registerOrRemovePartitionScheduleInfo(db.getId(), olapTable); - } - } - - // If user upgrades from 3.0, the storage volume id will be null - if (table.isCloudNativeTableOrMaterializedView() && info.getStorageVolumeId() != null) { - GlobalStateMgr.getCurrentState().getStorageVolumeMgr() - .replayBindTableToStorageVolume(info.getStorageVolumeId(), table.getId()); - } - } - - private void createLakeTablets(OlapTable table, long partitionId, long shardGroupId, MaterializedIndex index, - DistributionInfo distributionInfo, TabletMeta tabletMeta, - Set tabletIdSet, long warehouseId) - throws DdlException { - Preconditions.checkArgument(table.isCloudNativeTableOrMaterializedView()); - - DistributionInfo.DistributionInfoType distributionInfoType = distributionInfo.getType(); - if (distributionInfoType != DistributionInfo.DistributionInfoType.HASH - && distributionInfoType != DistributionInfo.DistributionInfoType.RANDOM) { - throw new DdlException("Unknown distribution type: " + distributionInfoType); - } - - Map properties = new HashMap<>(); - properties.put(LakeTablet.PROPERTY_KEY_TABLE_ID, Long.toString(table.getId())); - properties.put(LakeTablet.PROPERTY_KEY_PARTITION_ID, Long.toString(partitionId)); - properties.put(LakeTablet.PROPERTY_KEY_INDEX_ID, Long.toString(index.getId())); - int bucketNum = distributionInfo.getBucketNum(); - WarehouseManager warehouseManager = GlobalStateMgr.getCurrentState().getWarehouseMgr(); - Optional workerGroupId = warehouseManager.selectWorkerGroupByWarehouseId(warehouseId); - if (workerGroupId.isEmpty()) { - Warehouse warehouse = warehouseManager.getWarehouse(warehouseId); - throw ErrorReportException.report(ErrorCode.ERR_NO_NODES_IN_WAREHOUSE, warehouse.getName()); - } - List shardIds = stateMgr.getStarOSAgent().createShards(bucketNum, - table.getPartitionFilePathInfo(partitionId), table.getPartitionFileCacheInfo(partitionId), shardGroupId, - null, properties, workerGroupId.get()); - for (long shardId : shardIds) { - Tablet tablet = new LakeTablet(shardId); - index.addTablet(tablet, tabletMeta); - tabletIdSet.add(tablet.getId()); - } - } - - private void createOlapTablets(OlapTable table, MaterializedIndex index, Replica.ReplicaState replicaState, - DistributionInfo distributionInfo, long version, short replicationNum, - TabletMeta tabletMeta, Set tabletIdSet) throws DdlException { - Preconditions.checkArgument(replicationNum > 0); - - DistributionInfo.DistributionInfoType distributionInfoType = distributionInfo.getType(); - if (distributionInfoType != DistributionInfo.DistributionInfoType.HASH - && distributionInfoType != DistributionInfo.DistributionInfoType.RANDOM) { - throw new DdlException("Unknown distribution type: " + distributionInfoType); - } - - List> backendsPerBucketSeq = null; - ColocateTableIndex.GroupId groupId = null; - boolean initBucketSeqWithSameOrigNameGroup = false; - boolean isColocateTable = colocateTableIndex.isColocateTable(tabletMeta.getTableId()); - // chooseBackendsArbitrary is true, means this may be the first table of colocation group, - // or this is just a normal table, and we can choose backends arbitrary. - // otherwise, backends should be chosen from backendsPerBucketSeq; - boolean chooseBackendsArbitrary; - - // We should synchronize the creation of colocate tables, otherwise it can have concurrent issues. - // Considering the following situation, - // T1: P1 issues `create colocate table` and finds that there isn't a bucket sequence associated - // with the colocate group, so it will initialize the bucket sequence for the first time - // T2: P2 do the same thing as P1 - // T3: P1 set the bucket sequence for colocate group stored in `ColocateTableIndex` - // T4: P2 also set the bucket sequence, hence overwrite what P1 just wrote - // T5: After P1 creates the colocate table, the actual tablet distribution won't match the bucket sequence - // of the colocate group, and balancer will create a lot of COLOCATE_MISMATCH tasks which shouldn't exist. - if (isColocateTable) { - try { - // Optimization: wait first time, before global lock - colocateTableCreateSyncer.awaitZero(); - // Since we have supported colocate tables in different databases, - // we should use global lock, not db lock. - tryLock(false); - try { - // Wait again, for safety - // We are in global lock, we should have timeout in case holding lock for too long - colocateTableCreateSyncer.awaitZero(Config.catalog_try_lock_timeout_ms, TimeUnit.MILLISECONDS); - // if this is a colocate table, try to get backend seqs from colocation index. - groupId = colocateTableIndex.getGroup(tabletMeta.getTableId()); - backendsPerBucketSeq = colocateTableIndex.getBackendsPerBucketSeq(groupId); - if (backendsPerBucketSeq.isEmpty()) { - List colocateWithGroupsInOtherDb = - colocateTableIndex.getColocateWithGroupsInOtherDb(groupId); - if (!colocateWithGroupsInOtherDb.isEmpty()) { - backendsPerBucketSeq = - colocateTableIndex.getBackendsPerBucketSeq(colocateWithGroupsInOtherDb.get(0)); - initBucketSeqWithSameOrigNameGroup = true; - } - } - chooseBackendsArbitrary = backendsPerBucketSeq == null || backendsPerBucketSeq.isEmpty(); - if (chooseBackendsArbitrary) { - colocateTableCreateSyncer.increment(); - } - } finally { - unlock(); - } - } catch (InterruptedException e) { - LOG.warn("wait for concurrent colocate table creation finish failed, msg: {}", - e.getMessage(), e); - Thread.currentThread().interrupt(); - throw new DdlException("wait for concurrent colocate table creation finish failed", e); - } - } else { - chooseBackendsArbitrary = true; - } - - try { - if (chooseBackendsArbitrary) { - backendsPerBucketSeq = Lists.newArrayList(); - } - for (int i = 0; i < distributionInfo.getBucketNum(); ++i) { - // create a new tablet with random chosen backends - LocalTablet tablet = new LocalTablet(getNextId()); - - // add tablet to inverted index first - index.addTablet(tablet, tabletMeta); - tabletIdSet.add(tablet.getId()); - - // get BackendIds - List chosenBackendIds; - if (chooseBackendsArbitrary) { - // This is the first colocate table in the group, or just a normal table, - // randomly choose backends - if (Config.enable_strict_storage_medium_check) { - chosenBackendIds = - chosenBackendIdBySeq(replicationNum, table.getLocation(), tabletMeta.getStorageMedium()); - } else { - try { - chosenBackendIds = chosenBackendIdBySeq(replicationNum, table.getLocation()); - } catch (DdlException ex) { - throw new DdlException(String.format("%s, table=%s, default_replication_num=%d", - ex.getMessage(), table.getName(), Config.default_replication_num)); - } - } - backendsPerBucketSeq.add(chosenBackendIds); - } else { - // get backends from existing backend sequence - chosenBackendIds = backendsPerBucketSeq.get(i); - } - - // create replicas - for (long backendId : chosenBackendIds) { - long replicaId = getNextId(); - Replica replica = new Replica(replicaId, backendId, replicaState, version, - tabletMeta.getOldSchemaHash()); - tablet.addReplica(replica); - } - Preconditions.checkState(chosenBackendIds.size() == replicationNum, - chosenBackendIds.size() + " vs. " + replicationNum); - } - - // In the following two situations, we should set the bucket seq for colocate group and persist the info, - // 1. This is the first time we add a table to colocate group, and it doesn't have the same original name - // with colocate group in other database. - // 2. It's indeed the first time, but it should colocate with group in other db - // (because of having the same original name), we should use the bucket - // seq of other group to initialize our own. - if ((groupId != null && chooseBackendsArbitrary) || initBucketSeqWithSameOrigNameGroup) { - colocateTableIndex.addBackendsPerBucketSeq(groupId, backendsPerBucketSeq); - ColocatePersistInfo info = - ColocatePersistInfo.createForBackendsPerBucketSeq(groupId, backendsPerBucketSeq); - GlobalStateMgr.getCurrentState().getEditLog().logColocateBackendsPerBucketSeq(info); - } - } finally { - if (isColocateTable && chooseBackendsArbitrary) { - colocateTableCreateSyncer.decrement(); - } - } - } - - // create replicas for tablet with random chosen backends - private List chosenBackendIdBySeq(int replicationNum, Multimap locReq, - TStorageMedium storageMedium) - throws DdlException { - List chosenBackendIds = - GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getNodeSelector() - .seqChooseBackendIdsByStorageMedium(replicationNum, - true, true, locReq, storageMedium); - if (CollectionUtils.isEmpty(chosenBackendIds)) { - throw new DdlException( - "Failed to find enough hosts with storage medium " + storageMedium + - " at all backends, number of replicas needed: " + - replicationNum + ". Storage medium check failure can be forcefully ignored by executing " + - "'ADMIN SET FRONTEND CONFIG (\"enable_strict_storage_medium_check\" = \"false\");', " + - "but incompatible medium type can cause balance problem, so we strongly recommend" + - " creating table with compatible 'storage_medium' property set."); - } - return chosenBackendIds; - } - - private List chosenBackendIdBySeq(int replicationNum, Multimap locReq) throws DdlException { - SystemInfoService systemInfoService = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo(); - List chosenBackendIds = systemInfoService.getNodeSelector() - .seqChooseBackendIds(replicationNum, true, true, locReq); - if (!CollectionUtils.isEmpty(chosenBackendIds)) { - return chosenBackendIds; - } else if (replicationNum > 1) { - List backendIds = systemInfoService.getBackendIds(true); - throw new DdlException( - String.format("Table replication num should be less than or equal to the number of available BE nodes. " - + "You can change this default by setting the replication_num table properties. " - + "Current alive backend is [%s]. ", Joiner.on(",").join(backendIds))); - } else { - throw new DdlException("No alive nodes"); - } - } - - // Drop table - public void dropTable(DropTableStmt stmt) throws DdlException { - String dbName = stmt.getDbName(); - String tableName = stmt.getTableName(); - - // check database - Database db = getDb(dbName); - if (db == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); - } - if (db.isSystemDatabase()) { - ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR, - "cannot drop table in system database: " + db.getOriginName()); - } - db.dropTable(tableName, stmt.isSetIfExists(), stmt.isForceDrop()); - } - - public void dropTemporaryTable(String dbName, long tableId, String tableName, - boolean isSetIfExsists, boolean isForce) throws DdlException { - Database db = getDb(dbName); - if (db == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); - } - db.dropTemporaryTable(tableId, tableName, isSetIfExsists, isForce); - } - - public void replayDropTable(Database db, long tableId, boolean isForceDrop) { - Table table; + public void replayCreateTable(CreateTableInfo info) { + Table table = info.getTable(); + Database db = this.fullNameToDb.get(info.getDbName()); Locker locker = new Locker(); locker.lockDatabase(db.getId(), LockType.WRITE); try { - table = getTable(db.getId(), tableId); + db.registerTableUnlocked(table); if (table.isTemporaryTable()) { - table = db.unprotectDropTemporaryTable(tableId, isForceDrop, false); - UUID sessionId = ((OlapTable) table).getSessionId(); TemporaryTableMgr temporaryTableMgr = GlobalStateMgr.getCurrentState().getTemporaryTableMgr(); - temporaryTableMgr.dropTemporaryTable(sessionId, db.getId(), table.getName()); - } else { - table = db.unprotectDropTable(tableId, isForceDrop, true); - } - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } - if (table != null && isForceDrop) { - table.delete(db.getId(), true); - } - } - - public void replayEraseTable(long tableId) { - recycleBin.replayEraseTable(Collections.singletonList(tableId)); - } - - public void replayEraseMultiTables(MultiEraseTableInfo multiEraseTableInfo) { - List tableIds = multiEraseTableInfo.getTableIds(); - recycleBin.replayEraseTable(tableIds); - } - - public void replayDisableTableRecovery(DisableTableRecoveryInfo disableTableRecoveryInfo) { - recycleBin.replayDisableTableRecovery(disableTableRecoveryInfo.getTableIds()); - } - - public void replayDisablePartitionRecovery(DisablePartitionRecoveryInfo disablePartitionRecoveryInfo) { - recycleBin.replayDisablePartitionRecovery(disablePartitionRecoveryInfo.getPartitionId()); - } - - public void replayRecoverTable(RecoverInfo info) { - long dbId = info.getDbId(); - Database db = getDb(dbId); - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - recycleBin.replayRecoverTable(db, info.getTableId()); - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } - } - - public void replayAddReplica(ReplicaPersistInfo info) { - Database db = getDbIncludeRecycleBin(info.getDbId()); - if (db == null) { - LOG.warn("replay add replica failed, db is null, info: {}", info); - return; - } - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - OlapTable olapTable = (OlapTable) getTableIncludeRecycleBin(db, info.getTableId()); - if (olapTable == null) { - LOG.warn("replay add replica failed, table is null, info: {}", info); - return; - } - PhysicalPartition partition = getPhysicalPartitionIncludeRecycleBin(olapTable, info.getPartitionId()); - if (partition == null) { - LOG.warn("replay add replica failed, partition is null, info: {}", info); - return; - } - MaterializedIndex materializedIndex = partition.getIndex(info.getIndexId()); - if (materializedIndex == null) { - LOG.warn("replay add replica failed, materializedIndex is null, info: {}", info); - return; - } - LocalTablet tablet = (LocalTablet) materializedIndex.getTablet(info.getTabletId()); - if (tablet == null) { - LOG.warn("replay add replica failed, tablet is null, info: {}", info); - return; - } - - // for compatibility - int schemaHash = info.getSchemaHash(); - if (schemaHash == -1) { - schemaHash = olapTable.getSchemaHashByIndexId(info.getIndexId()); - } - - Replica replica = new Replica(info.getReplicaId(), info.getBackendId(), info.getVersion(), - schemaHash, info.getDataSize(), info.getRowCount(), - Replica.ReplicaState.NORMAL, - info.getLastFailedVersion(), - info.getLastSuccessVersion(), - info.getMinReadableVersion()); - tablet.addReplica(replica); - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } - } - - public void replayUpdateReplica(ReplicaPersistInfo info) { - Database db = getDbIncludeRecycleBin(info.getDbId()); - if (db == null) { - LOG.warn("replay update replica failed, db is null, info: {}", info); - return; - } - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - OlapTable olapTable = (OlapTable) getTableIncludeRecycleBin(db, info.getTableId()); - if (olapTable == null) { - LOG.warn("replay update replica failed, table is null, info: {}", info); - return; - } - PhysicalPartition partition = getPhysicalPartitionIncludeRecycleBin(olapTable, info.getPartitionId()); - if (partition == null) { - LOG.warn("replay update replica failed, partition is null, info: {}", info); - return; - } - MaterializedIndex materializedIndex = partition.getIndex(info.getIndexId()); - if (materializedIndex == null) { - LOG.warn("replay update replica failed, materializedIndex is null, info: {}", info); - return; - } - LocalTablet tablet = (LocalTablet) materializedIndex.getTablet(info.getTabletId()); - if (tablet == null) { - LOG.warn("replay update replica failed, tablet is null, info: {}", info); - return; - } - Replica replica = tablet.getReplicaByBackendId(info.getBackendId()); - if (replica == null) { - LOG.warn("replay update replica failed, replica is null, info: {}", info); - return; - } - replica.updateRowCount(info.getVersion(), info.getMinReadableVersion(), info.getDataSize(), info.getRowCount()); - replica.setBad(false); - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } - } - - public void replayDeleteReplica(ReplicaPersistInfo info) { - Database db = getDbIncludeRecycleBin(info.getDbId()); - if (db == null) { - LOG.warn("replay delete replica failed, db is null, info: {}", info); - return; - } - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - OlapTable olapTable = (OlapTable) getTableIncludeRecycleBin(db, info.getTableId()); - if (olapTable == null) { - LOG.warn("replay delete replica failed, table is null, info: {}", info); - return; - } - PhysicalPartition partition = getPhysicalPartitionIncludeRecycleBin(olapTable, info.getPartitionId()); - if (partition == null) { - LOG.warn("replay delete replica failed, partition is null, info: {}", info); - return; - } - MaterializedIndex materializedIndex = partition.getIndex(info.getIndexId()); - if (materializedIndex == null) { - LOG.warn("replay delete replica failed, materializedIndex is null, info: {}", info); - return; - } - LocalTablet tablet = (LocalTablet) materializedIndex.getTablet(info.getTabletId()); - if (tablet == null) { - LOG.warn("replay delete replica failed, tablet is null, info: {}", info); - return; + UUID sessionId = ((OlapTable) table).getSessionId(); + temporaryTableMgr.addTemporaryTable(sessionId, db.getId(), table.getName(), table.getId()); } - tablet.deleteReplicaByBackendId(info.getBackendId()); + table.onReload(); + } catch (Throwable e) { + LOG.error("replay create table failed: {}", table, e); + // Rethrow, we should not eat the exception when replaying editlog. + throw e; } finally { locker.unLockDatabase(db.getId(), LockType.WRITE); } - } - public void replayBatchDeleteReplica(BatchDeleteReplicaInfo info) { - if (info.getReplicaInfoList() != null) { - for (ReplicaPersistInfo persistInfo : info.getReplicaInfoList()) { - replayDeleteReplica(persistInfo); - } - } else { - LOG.warn("invalid BatchDeleteReplicaInfo, replicaInfoList is null"); - } - } + if (!isCheckpointThread()) { + // add to inverted index + if (table.isOlapOrCloudNativeTable() || table.isMaterializedView()) { + TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); + OlapTable olapTable = (OlapTable) table; + long dbId = db.getId(); + long tableId = table.getId(); + for (PhysicalPartition partition : olapTable.getAllPhysicalPartitions()) { + long physicalPartitionId = partition.getId(); + TStorageMedium medium = olapTable.getPartitionInfo().getDataProperty( + partition.getParentId()).getStorageMedium(); + for (MaterializedIndex mIndex : partition + .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + long indexId = mIndex.getId(); + int schemaHash = olapTable.getSchemaHashByIndexId(indexId); + TabletMeta tabletMeta = new TabletMeta(dbId, tableId, partition.getParentId(), physicalPartitionId, + indexId, schemaHash, medium, table.isCloudNativeTableOrMaterializedView()); + for (Tablet tablet : mIndex.getTablets()) { + long tabletId = tablet.getId(); + invertedIndex.addTablet(tabletId, tabletMeta); + if (tablet instanceof LocalTablet) { + for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { + invertedIndex.addReplica(tabletId, replica); + } + } + } + } + } // end for partitions - @Override - public Database getDb(String name) { - if (name == null) { - return null; - } - if (fullNameToDb.containsKey(name)) { - return fullNameToDb.get(name); - } else { - // This maybe an information_schema db request, and information_schema db name is case-insensitive. - // So, we first extract db name to check if it is information_schema. - // Then we reassemble the origin cluster name with lower case db name, - // and finally get information_schema db from the name map. - String dbName = ClusterNamespace.getNameFromFullName(name); - if (dbName.equalsIgnoreCase(InfoSchemaDb.DATABASE_NAME) || - dbName.equalsIgnoreCase(SysDb.DATABASE_NAME)) { - return fullNameToDb.get(dbName.toLowerCase()); + DynamicPartitionUtil.registerOrRemovePartitionScheduleInfo(db.getId(), olapTable); } } - return null; - } - - @Override - public Database getDb(long dbId) { - return idToDb.get(dbId); - } - - public Optional mayGetDb(String name) { - return Optional.ofNullable(getDb(name)); - } - - public Optional mayGetDb(long dbId) { - return Optional.ofNullable(getDb(dbId)); - } - - public Optional
mayGetTable(long dbId, long tableId) { - return mayGetDb(dbId).flatMap(db -> Optional.ofNullable(db.getTable(tableId))); - } - - public Optional
mayGetTable(String dbName, String tableName) { - return mayGetDb(dbName).flatMap(db -> Optional.ofNullable(db.getTable(tableName))); - } - - public ConcurrentHashMap getFullNameToDb() { - return fullNameToDb; - } - - public Database getDbIncludeRecycleBin(long dbId) { - Database db = idToDb.get(dbId); - if (db == null) { - db = recycleBin.getDatabase(dbId); - } - return db; - } - @Override - public boolean tableExists(String dbName, String tblName) { - Database database = getDb(dbName); - if (database == null) { - return false; + // If user upgrades from 3.0, the storage volume id will be null + if (table.isCloudNativeTableOrMaterializedView() && info.getStorageVolumeId() != null) { + GlobalStateMgr.getCurrentState().getStorageVolumeMgr() + .replayBindTableToStorageVolume(info.getStorageVolumeId(), table.getId()); } - return database.getTable(tblName) != null; } @Override - public Table getTable(String dbName, String tblName) { - Database database = getDb(dbName); - if (database == null) { - return null; - } - return database.getTable(tblName); + public void dropTable(DropInfo dropInfo) { + GlobalStateMgr.getCurrentState().getEditLog().logDropTable(dropInfo); } - public Table getTable(Long dbId, Long tableId) { - Database database = getDb(dbId); - if (database == null) { - return null; + public void replayDropTable(Database db, long tableId, boolean isForceDrop) { + Table table; + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + table = getTable(db.getId(), tableId); + if (table.isTemporaryTable()) { + table = db.unprotectDropTemporaryTable(tableId, isForceDrop, false); + UUID sessionId = ((OlapTable) table).getSessionId(); + TemporaryTableMgr temporaryTableMgr = GlobalStateMgr.getCurrentState().getTemporaryTableMgr(); + temporaryTableMgr.dropTemporaryTable(sessionId, db.getId(), table.getName()); + } else { + table = db.unprotectDropTable(tableId, isForceDrop, true); + } + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); } - return database.getTable(tableId); - } - - public List
getTables(Long dbId) { - Database database = getDb(dbId); - if (database == null) { - return Collections.emptyList(); - } else { - return database.getTables(); + if (table != null && isForceDrop) { + table.delete(db.getId(), true); } } @Override - public Pair getMaterializedViewIndex(String dbName, String indexName) { - Database database = getDb(dbName); - if (database == null) { - return null; - } - return database.getMaterializedViewIndex(indexName); - } - - public Table getTableIncludeRecycleBin(Database db, long tableId) { - Table table = getTable(db.getId(), tableId); - if (table == null) { - table = recycleBin.getTable(db.getId(), tableId); - } - return table; + public void renameTable(TableInfo tableInfo) { + GlobalStateMgr.getCurrentState().getEditLog().logTableRename(tableInfo); } - public List
getTablesIncludeRecycleBin(Database db) { - List
tables = db.getTables(); - tables.addAll(recycleBin.getTables(db.getId())); - return tables; - } + public void replayRenameTable(TableInfo tableInfo) { + long dbId = tableInfo.getDbId(); + long tableId = tableInfo.getTableId(); + String newTableName = tableInfo.getNewTableName(); - public Partition getPartitionIncludeRecycleBin(OlapTable table, long partitionId) { - Partition partition = table.getPartition(partitionId); - if (partition == null) { - partition = recycleBin.getPartition(partitionId); - } - return partition; - } + Database db = getDb(dbId); + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + OlapTable table = (OlapTable) getTable(db.getId(), tableId); + String tableName = table.getName(); + db.dropTable(tableName); + table.setName(newTableName); + db.registerTableUnlocked(table); + inactiveRelatedMaterializedView(db, table, + MaterializedViewExceptions.inactiveReasonForBaseTableRenamed(tableName)); - public PhysicalPartition getPhysicalPartitionIncludeRecycleBin(OlapTable table, long physicalPartitionId) { - PhysicalPartition partition = table.getPhysicalPartition(physicalPartitionId); - if (partition == null) { - partition = recycleBin.getPhysicalPartition(physicalPartitionId); + LOG.info("replay rename table[{}] to {}, tableId: {}", tableName, newTableName, table.getId()); + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); } - return partition; } - public Collection getPartitionsIncludeRecycleBin(OlapTable table) { - Collection partitions = new ArrayList<>(table.getPartitions()); - partitions.addAll(recycleBin.getPartitions(table.getId())); - return partitions; - } - - public Collection getAllPartitionsIncludeRecycleBin(OlapTable table) { - Collection partitions = table.getAllPartitions(); - partitions.addAll(recycleBin.getPartitions(table.getId())); - return partitions; + @Override + public void truncateTable(TruncateTableInfo info) { + GlobalStateMgr.getCurrentState().getEditLog().logTruncateTable(info); } - // NOTE: result can be null, cause partition erase is not in db lock - public DataProperty getDataPropertyIncludeRecycleBin(PartitionInfo info, long partitionId) { - DataProperty dataProperty = info.getDataProperty(partitionId); - if (dataProperty == null) { - dataProperty = recycleBin.getPartitionDataProperty(partitionId); + public void truncateTableInternal(OlapTable olapTable, List newPartitions, + boolean isEntireTable, boolean isReplay) { + // use new partitions to replace the old ones. + Set oldTablets = Sets.newHashSet(); + for (Partition newPartition : newPartitions) { + Partition oldPartition = olapTable.replacePartition(newPartition); + for (PhysicalPartition physicalPartition : oldPartition.getSubPartitions()) { + // save old tablets to be removed + for (MaterializedIndex index : physicalPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + // let HashSet do the deduplicate work + oldTablets.addAll(index.getTablets()); + } + } } - return dataProperty; - } - // NOTE: result can be -1, cause partition erase is not in db lock - public short getReplicationNumIncludeRecycleBin(PartitionInfo info, long partitionId) { - short replicaNum = info.getReplicationNum(partitionId); - if (replicaNum == (short) -1) { - replicaNum = recycleBin.getPartitionReplicationNum(partitionId); + if (isEntireTable) { + // drop all temp partitions + olapTable.dropAllTempPartitions(); } - return replicaNum; - } - - @Override - public List listDbNames() { - return Lists.newArrayList(fullNameToDb.keySet()); - } - @Override - public List listTableNames(String dbName) { - Database database = getDb(dbName); - if (database != null) { - return database.getTables().stream() - .map(Table::getName).collect(Collectors.toList()); - } else { - throw new StarRocksConnectorException("Database " + dbName + " doesn't exist"); + // remove the tablets in old partitions + for (Tablet tablet : oldTablets) { + TabletInvertedIndex index = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); + index.deleteTablet(tablet.getId()); + // Ensure that only the leader records truncate information. + // TODO(yangzaorang): the information will be lost when failover occurs. The probability of this case + // happening is small, and the trash data will be deleted by BE anyway, but we need to find a better + // solution. + if (!isReplay) { + index.markTabletForceDelete(tablet); + } } } - @Override - public List getDbIds() { - return Lists.newArrayList(idToDb.keySet()); - } - - public List getDbIdsIncludeRecycleBin() { - List dbIds = getDbIds(); - dbIds.addAll(recycleBin.getAllDbIds()); - return dbIds; - } - - public HashMap getPartitionIdToStorageMediumMap() { - HashMap storageMediumMap = new HashMap<>(); - - // record partition which need to change storage medium - // dbId -> (tableId -> partitionId) - HashMap> changedPartitionsMap = new HashMap<>(); - long currentTimeMs = System.currentTimeMillis(); - List dbIds = getDbIds(); - - for (long dbId : dbIds) { - Database db = getDb(dbId); - if (db == null) { - LOG.warn("db {} does not exist while doing backend report", dbId); - continue; - } - - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.READ); - try { - for (Table table : db.getTables()) { - if (!table.isOlapTableOrMaterializedView()) { - continue; - } - - long tableId = table.getId(); - OlapTable olapTable = (OlapTable) table; - PartitionInfo partitionInfo = olapTable.getPartitionInfo(); - for (Partition partition : olapTable.getAllPartitions()) { - long partitionId = partition.getId(); - DataProperty dataProperty = partitionInfo.getDataProperty(partition.getId()); - Preconditions.checkNotNull(dataProperty, - partition.getName() + ", pId:" + partitionId + ", db: " + dbId + ", tbl: " + tableId); - // only normal state table can migrate. - // PRIMARY_KEYS table does not support local migration. - if (dataProperty.getStorageMedium() == TStorageMedium.SSD - && dataProperty.getCooldownTimeMs() < currentTimeMs - && olapTable.getState() == OlapTable.OlapTableState.NORMAL) { - // expire. change to HDD. - // record and change when holding write lock - Multimap multimap = changedPartitionsMap.get(dbId); - if (multimap == null) { - multimap = HashMultimap.create(); - changedPartitionsMap.put(dbId, multimap); - } - multimap.put(tableId, partitionId); - } else { - storageMediumMap.put(partitionId, dataProperty.getStorageMedium()); - } - } // end for partitions - } // end for tables - } finally { - locker.unLockDatabase(db.getId(), LockType.READ); - } - } // end for dbs - - // handle data property changed - for (Long dbId : changedPartitionsMap.keySet()) { - Database db = getDb(dbId); - if (db == null) { - LOG.warn("db {} does not exist while checking backend storage medium", dbId); - continue; - } - Multimap tableIdToPartitionIds = changedPartitionsMap.get(dbId); - - // use try lock to avoid blocking a long time. - // if block too long, backend report rpc will timeout. - Locker locker = new Locker(); - if (!locker.tryLockDatabase(db.getId(), LockType.WRITE, Database.TRY_LOCK_TIMEOUT_MS, TimeUnit.MILLISECONDS)) { - LOG.warn("try get db {}-{} write lock but failed when checking backend storage medium", - db.getFullName(), dbId); - continue; - } - Preconditions.checkState(locker.isDbWriteLockHeldByCurrentThread(db)); - try { - for (Long tableId : tableIdToPartitionIds.keySet()) { - Table table = getTable(db.getId(), tableId); - if (table == null) { - continue; - } - OlapTable olapTable = (OlapTable) table; - PartitionInfo partitionInfo = olapTable.getPartitionInfo(); + public void replayTruncateTable(TruncateTableInfo info) { + Database db = getDb(info.getDbId()); + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + OlapTable olapTable = (OlapTable) getTable(db.getId(), info.getTblId()); + truncateTableInternal(olapTable, info.getPartitions(), info.isEntireTable(), true); - Collection partitionIds = tableIdToPartitionIds.get(tableId); - for (Long partitionId : partitionIds) { - Partition partition = olapTable.getPartition(partitionId); - if (partition == null) { - continue; - } - DataProperty dataProperty = partitionInfo.getDataProperty(partition.getId()); - if (dataProperty.getStorageMedium() == TStorageMedium.SSD - && dataProperty.getCooldownTimeMs() < currentTimeMs) { - // expire. change to HDD. - DataProperty hdd = new DataProperty(TStorageMedium.HDD); - partitionInfo.setDataProperty(partition.getId(), hdd); - storageMediumMap.put(partitionId, TStorageMedium.HDD); - LOG.debug("partition[{}-{}-{}] storage medium changed from SSD to HDD", - dbId, tableId, partitionId); - - // log - ModifyPartitionInfo info = - new ModifyPartitionInfo(db.getId(), olapTable.getId(), - partition.getId(), - hdd, - (short) -1, - partitionInfo.getIsInMemory(partition.getId())); - GlobalStateMgr.getCurrentState().getEditLog().logModifyPartition(info); + if (!GlobalStateMgr.isCheckpointThread()) { + // add tablet to inverted index + TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); + for (Partition partition : info.getPartitions()) { + long partitionId = partition.getId(); + TStorageMedium medium = olapTable.getPartitionInfo().getDataProperty( + partitionId).getStorageMedium(); + for (PhysicalPartition physicalPartition : partition.getSubPartitions()) { + for (MaterializedIndex mIndex : physicalPartition.getMaterializedIndices( + MaterializedIndex.IndexExtState.ALL)) { + long indexId = mIndex.getId(); + int schemaHash = olapTable.getSchemaHashByIndexId(indexId); + TabletMeta tabletMeta = new TabletMeta(db.getId(), olapTable.getId(), + physicalPartition.getId(), indexId, schemaHash, medium, + olapTable.isCloudNativeTableOrMaterializedView()); + for (Tablet tablet : mIndex.getTablets()) { + long tabletId = tablet.getId(); + invertedIndex.addTablet(tabletId, tabletMeta); + if (olapTable.isOlapTable()) { + for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { + invertedIndex.addReplica(tabletId, replica); + } + } + } } - } // end for partitions - } // end for tables - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); + } + } } - } // end for dbs - return storageMediumMap; + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); + } } - /* - * used for handling AlterTableStmt (for client is the ALTER TABLE command). - * including SchemaChangeHandler and RollupHandler - */ @Override - public void alterTable(ConnectContext context, AlterTableStmt stmt) throws UserException { - AlterJobExecutor alterJobExecutor = new AlterJobExecutor(); - alterJobExecutor.process(stmt, context); + public void swapTable(SwapTableOperationLog log) { + swapTableInternal(log); + GlobalStateMgr.getCurrentState().getEditLog().logSwapTable(log); + } + + public void replaySwapTable(SwapTableOperationLog log) { + swapTableInternal(log); + long dbId = log.getDbId(); + long origTblId = log.getOrigTblId(); + long newTblId = log.getNewTblId(); + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); + OlapTable origTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getId(), origTblId); + OlapTable newTbl = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getId(), newTblId); + LOG.debug("finish replay swap table {}-{} with table {}-{}", origTblId, origTable.getName(), newTblId, + newTbl.getName()); } /** - * used for handling AlterViewStmt (the ALTER VIEW command). + * The swap table operation works as follow: + * For example, SWAP TABLE A WITH TABLE B. + * must pre check A can be renamed to B and B can be renamed to A */ - @Override - public void alterView(AlterViewStmt stmt) throws UserException { - new AlterJobExecutor().process(stmt, ConnectContext.get()); - } + public void swapTableInternal(SwapTableOperationLog log) { + long dbId = log.getDbId(); + long origTblId = log.getOrigTblId(); + long newTblId = log.getNewTblId(); + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); + OlapTable origTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getId(), origTblId); + OlapTable newTbl = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getId(), newTblId); - @Override - public void createMaterializedView(CreateMaterializedViewStmt stmt) - throws AnalysisException, DdlException { - MaterializedViewHandler materializedViewHandler = - GlobalStateMgr.getCurrentState().getAlterJobMgr().getMaterializedViewHandler(); - String tableName = stmt.getBaseIndexName(); - // check db - String dbName = stmt.getDBName(); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbName); - if (db == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); - } - // check cluster capacity - GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().checkClusterCapacity(); - // check db quota - db.checkQuota(); + String origTblName = origTable.getName(); + String newTblName = newTbl.getName(); - Locker locker = new Locker(); - if (!locker.lockDatabaseAndCheckExist(db, LockType.WRITE)) { - throw new DdlException("create materialized failed. database:" + db.getFullName() + " not exist"); + // drop origin table and new table + db.dropTable(origTblName); + db.dropTable(newTblName); + + // rename new table name to origin table name and add it to database + newTbl.checkAndSetName(origTblName, false); + db.registerTableUnlocked(newTbl); + + // rename origin table name to new table name and add it to database + origTable.checkAndSetName(newTblName, false); + db.registerTableUnlocked(origTable); + + // swap dependencies of base table + if (origTable.isMaterializedView()) { + MaterializedView oldMv = (MaterializedView) origTable; + MaterializedView newMv = (MaterializedView) newTbl; + updateTaskDefinition(oldMv); + updateTaskDefinition(newMv); } - try { - Table table = getTable(db.getFullName(), tableName); - if (table == null) { - throw new DdlException("create materialized failed. table:" + tableName + " not exist"); - } - if (table.isCloudNativeTable()) { - throw new DdlException("Creating synchronous materialized view(rollup) is not supported in " + - "shared data clusters.\nPlease use asynchronous materialized view instead.\n" + - "Refer to https://docs.starrocks.io/en-us/latest/sql-reference/sql-statements" + - "/data-definition/CREATE%20MATERIALIZED%20VIEW#asynchronous-materialized-view for details."); - } - if (!table.isOlapTable()) { - throw new DdlException("Do not support create synchronous materialized view(rollup) on " + - table.getType().name() + " table[" + tableName + "]"); - } - OlapTable olapTable = (OlapTable) table; - if (olapTable.getKeysType() == KeysType.PRIMARY_KEYS) { - throw new DdlException( - "Do not support create materialized view on primary key table[" + tableName + "]"); - } - if (GlobalStateMgr.getCurrentState().getInsertOverwriteJobMgr().hasRunningOverwriteJob(olapTable.getId())) { - throw new DdlException("Table[" + olapTable.getName() + "] is doing insert overwrite job, " + - "please start to create materialized view after insert overwrite"); - } - olapTable.checkStableAndNormal(); + } - materializedViewHandler.processCreateMaterializedView(stmt, db, olapTable); - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); + private void updateTaskDefinition(MaterializedView materializedView) { + Task currentTask = GlobalStateMgr.getCurrentState().getTaskManager().getTask( + TaskBuilder.getMvTaskName(materializedView.getId())); + if (currentTask != null) { + currentTask.setDefinition(materializedView.getTaskDefinition()); + currentTask.setPostRun(TaskBuilder.getAnalyzeMVStmt(materializedView.getName())); } } - // TODO(murphy) refactor it into MVManager @Override - public void createMaterializedView(CreateMaterializedViewStatement stmt) + public void updateTableMeta(Database db, String tableName, Map properties, TTabletMetaType metaType) throws DdlException { - // check mv exists,name must be different from view/mv/table which exists in metadata - String mvName = stmt.getTableName().getTbl(); - String dbName = stmt.getTableName().getDb(); - LOG.debug("Begin create materialized view: {}", mvName); - // check if db exists - Database db = this.getDb(dbName); - if (db == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); - } + List partitions = Lists.newArrayList(); + OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() + .getTable(db.getFullName(), tableName); - // check if table exists in db Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.READ); + locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(olapTable.getId()), LockType.READ); try { - if (getTable(db.getFullName(), mvName) != null) { - if (stmt.isIfNotExists()) { - LOG.info("Create materialized view [{}] which already exists", mvName); - return; - } else { - ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, mvName); - } - } + partitions.addAll(olapTable.getPartitions()); } finally { - locker.unLockDatabase(db.getId(), LockType.READ); - } - // create columns - List baseSchema = stmt.getMvColumnItems(); - validateColumns(baseSchema); - - Map properties = stmt.getProperties(); - if (properties == null) { - properties = Maps.newHashMap(); + locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(olapTable.getId()), LockType.READ); } - // create partition info - PartitionInfo partitionInfo = buildPartitionInfo(stmt); - // create distribution info - DistributionDesc distributionDesc = stmt.getDistributionDesc(); - Preconditions.checkNotNull(distributionDesc); - DistributionInfo baseDistribution = distributionDesc.toDistributionInfo(baseSchema); - // create refresh scheme - MaterializedView.MvRefreshScheme mvRefreshScheme; - RefreshSchemeClause refreshSchemeDesc = stmt.getRefreshSchemeDesc(); - if (refreshSchemeDesc.getType() == MaterializedView.RefreshType.ASYNC) { - mvRefreshScheme = new MaterializedView.MvRefreshScheme(); - AsyncRefreshSchemeDesc asyncRefreshSchemeDesc = (AsyncRefreshSchemeDesc) refreshSchemeDesc; - MaterializedView.AsyncRefreshContext asyncRefreshContext = mvRefreshScheme.getAsyncRefreshContext(); - asyncRefreshContext.setDefineStartTime(asyncRefreshSchemeDesc.isDefineStartTime()); - int randomizeStart = 0; - if (properties.containsKey(PropertyAnalyzer.PROPERTY_MV_RANDOMIZE_START)) { - try { - randomizeStart = Integer.parseInt(properties.get((PropertyAnalyzer.PROPERTY_MV_RANDOMIZE_START))); - } catch (NumberFormatException e) { - ErrorReport.reportSemanticException(ErrorCode.ERR_INVALID_PARAMETER, - PropertyAnalyzer.PROPERTY_MV_RANDOMIZE_START + " only accept integer as parameter"); + boolean metaValue = false; + switch (metaType) { + case INMEMORY: + metaValue = Boolean.parseBoolean(properties.get(PropertyAnalyzer.PROPERTIES_INMEMORY)); + if (metaValue == olapTable.isInMemory()) { + return; } - // remove this transient variable - properties.remove(PropertyAnalyzer.PROPERTY_MV_RANDOMIZE_START); - } - - long random = getRandomStart(asyncRefreshSchemeDesc.getIntervalLiteral(), randomizeStart); - if (asyncRefreshSchemeDesc.isDefineStartTime() || randomizeStart == -1) { - long definedStartTime = Utils.getLongFromDateTime(asyncRefreshSchemeDesc.getStartTime()); - // Add random set only if mv_random_start > 0 when user has already set the start time - if (randomizeStart > 0) { - definedStartTime += random; + break; + case ENABLE_PERSISTENT_INDEX: + metaValue = Boolean.parseBoolean(properties.get(PropertyAnalyzer.PROPERTIES_ENABLE_PERSISTENT_INDEX)); + if (metaValue == olapTable.enablePersistentIndex()) { + return; } - asyncRefreshContext.setStartTime(definedStartTime); - } else if (asyncRefreshSchemeDesc.getIntervalLiteral() != null) { - long currentTimeSecond = Utils.getLongFromDateTime(LocalDateTime.now()); - long randomizedStart = currentTimeSecond + random; - asyncRefreshContext.setStartTime(randomizedStart); - } - if (asyncRefreshSchemeDesc.getIntervalLiteral() != null) { - long intervalStep = ((IntLiteral) asyncRefreshSchemeDesc.getIntervalLiteral().getValue()).getValue(); - String refreshTimeUnit = asyncRefreshSchemeDesc.getIntervalLiteral().getUnitIdentifier().getDescription(); - asyncRefreshContext.setStep(intervalStep); - asyncRefreshContext.setTimeUnit(refreshTimeUnit); - - // Check the interval time should not be less than the min allowed config time. - if (Config.materialized_view_min_refresh_interval > 0) { - TimeUnit intervalTimeUnit = TimeUtils.convertUnitIdentifierToTimeUnit(refreshTimeUnit); - long periodSeconds = TimeUtils.convertTimeUnitValueToSecond(intervalStep, intervalTimeUnit); - if (periodSeconds < Config.materialized_view_min_refresh_interval) { - throw new DdlException(String.format("Refresh schedule interval %s is too small which may cost " + - "a lot of memory/cpu resources to refresh the asynchronous materialized view, " + - "please config an interval larger than " + - "Config.materialized_view_min_refresh_interval(%ss).", - periodSeconds, - Config.materialized_view_min_refresh_interval)); - } + break; + case WRITE_QUORUM: + TWriteQuorumType writeQuorum = WriteQuorum + .findTWriteQuorumByName(properties.get(PropertyAnalyzer.PROPERTIES_WRITE_QUORUM)); + if (writeQuorum == olapTable.writeQuorum()) { + return; } - } - - // task which type is EVENT_TRIGGERED can not use external table as base table now. - if (asyncRefreshContext.getTimeUnit() == null) { - // asyncRefreshContext's timeUnit is null means this task's type is EVENT_TRIGGERED - Map tableNameTableMap = AnalyzerUtils.collectAllTable(stmt.getQueryStatement()); - if (tableNameTableMap.values().stream().anyMatch(table -> !table.isNativeTableOrMaterializedView())) { - throw new DdlException( - "Materialized view which type is ASYNC need to specify refresh interval for " + - "external table"); + break; + case REPLICATED_STORAGE: + metaValue = Boolean.parseBoolean(properties.get(PropertyAnalyzer.PROPERTIES_REPLICATED_STORAGE)); + if (metaValue == olapTable.enableReplicatedStorage()) { + return; } - } - } else if (refreshSchemeDesc.getType() == MaterializedView.RefreshType.SYNC) { - mvRefreshScheme = new MaterializedView.MvRefreshScheme(); - mvRefreshScheme.setType(MaterializedView.RefreshType.SYNC); - } else if (refreshSchemeDesc.getType().equals(MaterializedView.RefreshType.MANUAL)) { - mvRefreshScheme = new MaterializedView.MvRefreshScheme(); - mvRefreshScheme.setType(MaterializedView.RefreshType.MANUAL); - } else { - mvRefreshScheme = new MaterializedView.MvRefreshScheme(); - mvRefreshScheme.setType(MaterializedView.RefreshType.INCREMENTAL); + break; + case BUCKET_SIZE: + long bucketSize = Long.parseLong(properties.get(PropertyAnalyzer.PROPERTIES_BUCKET_SIZE)); + if (bucketSize == olapTable.getAutomaticBucketSize()) { + return; + } + break; + case MUTABLE_BUCKET_NUM: + long mutableBucketNum = Long.parseLong(properties.get(PropertyAnalyzer.PROPERTIES_MUTABLE_BUCKET_NUM)); + if (mutableBucketNum == olapTable.getMutableBucketNum()) { + return; + } + break; + case ENABLE_LOAD_PROFILE: + boolean enableLoadProfile = Boolean.parseBoolean(properties.get(PropertyAnalyzer.PROPERTIES_ENABLE_LOAD_PROFILE)); + if (enableLoadProfile == olapTable.enableLoadProfile()) { + return; + } + break; + case PRIMARY_INDEX_CACHE_EXPIRE_SEC: + int primaryIndexCacheExpireSec = Integer.parseInt(properties.get( + PropertyAnalyzer.PROPERTIES_PRIMARY_INDEX_CACHE_EXPIRE_SEC)); + if (primaryIndexCacheExpireSec == olapTable.primaryIndexCacheExpireSec()) { + return; + } + break; + default: + LOG.warn("meta type: {} does not support", metaType); + return; } - mvRefreshScheme.setMoment(refreshSchemeDesc.getMoment()); - // create mv - long mvId = GlobalStateMgr.getCurrentState().getNextId(); - MaterializedView materializedView; - if (RunMode.isSharedNothingMode()) { - if (refreshSchemeDesc.getType().equals(MaterializedView.RefreshType.INCREMENTAL)) { - materializedView = GlobalStateMgr.getCurrentState().getMaterializedViewMgr() - .createSinkTable(stmt, partitionInfo, mvId, db.getId()); - materializedView.setMaintenancePlan(stmt.getMaintenancePlan()); - } else { - materializedView = - new MaterializedView(mvId, db.getId(), mvName, baseSchema, stmt.getKeysType(), partitionInfo, - baseDistribution, mvRefreshScheme); - } - } else { - Preconditions.checkState(RunMode.isSharedDataMode()); - if (refreshSchemeDesc.getType().equals(MaterializedView.RefreshType.INCREMENTAL)) { - throw new DdlException("Incremental materialized view in shared_data mode is not supported"); - } - materializedView = - new LakeMaterializedView(mvId, db.getId(), mvName, baseSchema, stmt.getKeysType(), partitionInfo, - baseDistribution, mvRefreshScheme); + if (metaType == TTabletMetaType.INMEMORY || metaType == TTabletMetaType.ENABLE_PERSISTENT_INDEX) { + for (Partition partition : partitions) { + updatePartitionTabletMeta(db, olapTable.getName(), partition.getName(), metaValue, metaType); + } } - //bitmap indexes - List mvIndexes = stmt.getMvIndexes(); - materializedView.setIndexes(mvIndexes); + try (AutoCloseableLock ignore = + new AutoCloseableLock(new Locker(), db.getId(), Lists.newArrayList(olapTable.getId()), LockType.WRITE)) { + TableProperty tableProperty = olapTable.getTableProperty(); + if (tableProperty == null) { + tableProperty = new TableProperty(properties); + olapTable.setTableProperty(tableProperty); + } else { + tableProperty.modifyTableProperties(properties); + } - // sort keys - if (CollectionUtils.isNotEmpty(stmt.getSortKeys())) { - materializedView.setTableProperty(new TableProperty()); - materializedView.getTableProperty().setMvSortKeys(stmt.getSortKeys()); - } - // set comment - materializedView.setComment(stmt.getComment()); - // set baseTableIds - materializedView.setBaseTableInfos(stmt.getBaseTableInfos()); - // set viewDefineSql - materializedView.setViewDefineSql(stmt.getInlineViewDef()); - materializedView.setSimpleDefineSql(stmt.getSimpleViewDef()); - materializedView.setOriginalViewDefineSql(stmt.getOriginalViewDefineSql()); - // set partitionRefTableExprs - if (stmt.getPartitionRefTableExpr() != null) { - //avoid to get a list of null inside - materializedView.setPartitionRefTableExprs(Lists.newArrayList(stmt.getPartitionRefTableExpr())); - } - // set base index id - long baseIndexId = getNextId(); - materializedView.setBaseIndexId(baseIndexId); - // set query output indexes - materializedView.setQueryOutputIndices(stmt.getQueryOutputIndices()); - // set base index meta - int schemaVersion = 0; - int schemaHash = Util.schemaHash(schemaVersion, baseSchema, null, 0d); - short shortKeyColumnCount = GlobalStateMgr.calcShortKeyColumnCount(baseSchema, null); - TStorageType baseIndexStorageType = TStorageType.COLUMN; - materializedView.setIndexMeta(baseIndexId, mvName, baseSchema, schemaVersion, schemaHash, - shortKeyColumnCount, baseIndexStorageType, stmt.getKeysType()); - - // validate hint - Map optHints = Maps.newHashMap(); - if (stmt.isExistQueryScopeHint()) { - SessionVariable sessionVariable = VariableMgr.newSessionVariable(); - for (HintNode hintNode : stmt.getAllQueryScopeHints()) { - if (hintNode instanceof SetVarHint) { - for (Map.Entry entry : hintNode.getValue().entrySet()) { - VariableMgr.setSystemVariable(sessionVariable, - new SystemVariable(entry.getKey(), new StringLiteral(entry.getValue())), true); - optHints.put(entry.getKey(), entry.getValue()); + ModifyTablePropertyOperationLog info = + new ModifyTablePropertyOperationLog(db.getId(), olapTable.getId(), properties); + switch (metaType) { + case INMEMORY: + tableProperty.buildInMemory(); + // need to update partition info meta + for (Partition partition : olapTable.getPartitions()) { + olapTable.getPartitionInfo().setIsInMemory(partition.getId(), tableProperty.isInMemory()); } - } else if (hintNode instanceof UserVariableHint) { - throw new DdlException("unsupported user variable hint in Materialized view for now."); - } + GlobalStateMgr.getCurrentState().getEditLog().logModifyInMemory(info); + case ENABLE_PERSISTENT_INDEX: + tableProperty.buildEnablePersistentIndex(); + if (olapTable.isCloudNativeTable()) { + // now default to LOCAL + tableProperty.buildPersistentIndexType(); + } + GlobalStateMgr.getCurrentState().getEditLog().logModifyEnablePersistentIndex(info); + case WRITE_QUORUM: + tableProperty.buildWriteQuorum(); + GlobalStateMgr.getCurrentState().getEditLog().logModifyWriteQuorum(info); + case REPLICATED_STORAGE: + tableProperty.buildReplicatedStorage(); + GlobalStateMgr.getCurrentState().getEditLog().logModifyReplicatedStorage(info); + case BUCKET_SIZE: + tableProperty.buildBucketSize(); + GlobalStateMgr.getCurrentState().getEditLog().logModifyBucketSize(info); + case MUTABLE_BUCKET_NUM: + tableProperty.buildMutableBucketNum(); + GlobalStateMgr.getCurrentState().getEditLog().logModifyMutableBucketNum(info); + case ENABLE_LOAD_PROFILE: + tableProperty.buildEnableLoadProfile(); + GlobalStateMgr.getCurrentState().getEditLog().logModifyEnableLoadProfile(info); + case PRIMARY_INDEX_CACHE_EXPIRE_SEC: + tableProperty.buildPrimaryIndexCacheExpireSec(); + GlobalStateMgr.getCurrentState().getEditLog().logModifyPrimaryIndexCacheExpireSec(info); + default: + LOG.warn("meta type: {} does not support", metaType); + return; } } + } + + /** + * Update one specified partition's in-memory property by partition name of table + * This operation may return partial successfully, with a exception to inform user to retry + */ + public void updatePartitionTabletMeta(Database db, + String tableName, + String partitionName, + boolean metaValue, + TTabletMetaType metaType) throws DdlException { + // be id -> + Map> beIdToTabletSet = Maps.newHashMap(); + OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() + .getTable(db.getFullName(), tableName); - boolean isNonPartitioned = partitionInfo.isUnPartitioned(); - DataProperty dataProperty = PropertyAnalyzer.analyzeMVDataProperty(materializedView, properties); - PropertyAnalyzer.analyzeMVProperties(db, materializedView, properties, isNonPartitioned); + Locker locker = new Locker(); + locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(olapTable.getId()), LockType.READ); try { - Set tabletIdSet = new HashSet<>(); - // process single partition info - if (isNonPartitioned) { - long partitionId = GlobalStateMgr.getCurrentState().getNextId(); - Preconditions.checkNotNull(dataProperty); - partitionInfo.setDataProperty(partitionId, dataProperty); - partitionInfo.setReplicationNum(partitionId, materializedView.getDefaultReplicationNum()); - partitionInfo.setIsInMemory(partitionId, false); - partitionInfo.setTabletType(partitionId, TTabletType.TABLET_TYPE_DISK); - StorageInfo storageInfo = materializedView.getTableProperty().getStorageInfo(); - partitionInfo.setDataCacheInfo(partitionId, - storageInfo == null ? null : storageInfo.getDataCacheInfo()); - Long version = Partition.PARTITION_INIT_VERSION; - Partition partition = createPartition(db, materializedView, partitionId, mvName, version, tabletIdSet, - materializedView.getWarehouseId()); - buildPartitions(db, materializedView, new ArrayList<>(partition.getSubPartitions()), - materializedView.getWarehouseId()); - materializedView.addPartition(partition); - } else { - Expr partitionExpr = stmt.getPartitionExpDesc().getExpr(); - Map partitionExprMaps = MVPartitionExprResolver.getMVPartitionExprsChecked(partitionExpr, - stmt.getQueryStatement(), stmt.getBaseTableInfos()); - LOG.info("Generate mv {} partition exprs: {}", mvName, partitionExprMaps); - materializedView.setPartitionExprMaps(partitionExprMaps); + Partition partition = olapTable.getPartition(partitionName); + if (partition == null) { + throw new DdlException( + "Partition[" + partitionName + "] does not exist in table[" + olapTable.getName() + "]"); } - GlobalStateMgr.getCurrentState().getMaterializedViewMgr().prepareMaintenanceWork(stmt, materializedView); - - String storageVolumeId = ""; - if (materializedView.isCloudNativeMaterializedView()) { - storageVolumeId = GlobalStateMgr.getCurrentState().getStorageVolumeMgr() - .getStorageVolumeIdOfTable(materializedView.getId()); + for (PhysicalPartition physicalPartition : partition.getSubPartitions()) { + for (MaterializedIndex index + : physicalPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { + for (Tablet tablet : index.getTablets()) { + for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { + Set tabletSet = beIdToTabletSet.computeIfAbsent(replica.getBackendId(), k -> Sets.newHashSet()); + tabletSet.add(tablet.getId()); + } + } + } } - onCreate(db, materializedView, storageVolumeId, stmt.isIfNotExists()); - } catch (DdlException e) { - if (materializedView.isCloudNativeMaterializedView()) { - GlobalStateMgr.getCurrentState().getStorageVolumeMgr().unbindTableToStorageVolume(materializedView.getId()); + } finally { + locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(olapTable.getId()), LockType.READ); + } + + int totalTaskNum = beIdToTabletSet.keySet().size(); + MarkedCountDownLatch> countDownLatch = new MarkedCountDownLatch<>(totalTaskNum); + AgentBatchTask batchTask = new AgentBatchTask(); + for (Map.Entry> kv : beIdToTabletSet.entrySet()) { + countDownLatch.addMark(kv.getKey(), kv.getValue()); + long backendId = kv.getKey(); + Set tablets = kv.getValue(); + TabletMetadataUpdateAgentTask task = TabletMetadataUpdateAgentTaskFactory + .createGenericBooleanPropertyUpdateTask(backendId, tablets, metaValue, metaType); + Preconditions.checkState(task != null, "task is null"); + task.setLatch(countDownLatch); + batchTask.addTask(task); + } + if (!FeConstants.runningUnitTest) { + // send all tasks and wait them finished + AgentTaskQueue.addBatchTask(batchTask); + AgentTaskExecutor.submit(batchTask); + LOG.info("send update tablet meta task for table {}, partitions {}, number: {}", + tableName, partitionName, batchTask.getTaskNum()); + + // estimate timeout + long timeout = Config.tablet_create_timeout_second * 1000L * totalTaskNum; + timeout = Math.min(timeout, Config.max_create_table_timeout_second * 1000L); + boolean ok = false; + try { + ok = countDownLatch.await(timeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + LOG.warn("InterruptedException: ", e); } - throw e; - } - LOG.info("Successfully create materialized view [{}:{}]", mvName, materializedView.getMvId()); - // NOTE: The materialized view has been added to the database, and the following procedure cannot throw exception. - createTaskForMaterializedView(dbName, materializedView, optHints); - DynamicPartitionUtil.registerOrRemovePartitionTTLTable(db.getId(), materializedView); - } - - private long getRandomStart(IntervalLiteral interval, long randomizeStart) throws DdlException { - if (interval == null || randomizeStart == -1) { - return 0; - } - // randomize the start time if not specified manually, to avoid refresh conflicts - // default random interval is min(300s, INTERVAL/2) - // user could specify it through mv_randomize_start - long period = ((IntLiteral) interval.getValue()).getLongValue(); - TimeUnit timeUnit = - TimeUtils.convertUnitIdentifierToTimeUnit(interval.getUnitIdentifier().getDescription()); - long intervalSeconds = TimeUtils.convertTimeUnitValueToSecond(period, timeUnit); - long randomInterval = randomizeStart == 0 ? Math.min(300, intervalSeconds / 2) : randomizeStart; - return randomInterval > 0 ? ThreadLocalRandom.current().nextLong(randomInterval) : randomInterval; - } + if (!ok || !countDownLatch.getStatus().ok()) { + String errMsg = "Failed to update partition[" + partitionName + "]. tablet meta."; + // clear tasks + AgentTaskQueue.removeBatchTask(batchTask, TTaskType.UPDATE_TABLET_META_INFO); - public static PartitionInfo buildPartitionInfo(CreateMaterializedViewStatement stmt) throws DdlException { - ExpressionPartitionDesc expressionPartitionDesc = stmt.getPartitionExpDesc(); - if (expressionPartitionDesc != null) { - Expr expr = expressionPartitionDesc.getExpr(); - if (expr instanceof SlotRef) { - SlotRef slotRef = (SlotRef) expr; - if (slotRef.getType().getPrimitiveType() == PrimitiveType.VARCHAR) { - return new ListPartitionInfo(PartitionType.LIST, - Collections.singletonList(stmt.getPartitionColumn())); - } - } - if ((expr instanceof FunctionCallExpr)) { - FunctionCallExpr functionCallExpr = (FunctionCallExpr) expr; - if (functionCallExpr.getFnName().getFunction().equalsIgnoreCase(FunctionSet.STR2DATE)) { - Column partitionColumn = new Column(stmt.getPartitionColumn()); - partitionColumn.setType(com.starrocks.catalog.Type.DATE); - return expressionPartitionDesc.toPartitionInfo( - Collections.singletonList(partitionColumn), - Maps.newHashMap(), false); + if (!countDownLatch.getStatus().ok()) { + errMsg += " Error: " + countDownLatch.getStatus().getErrorMsg(); + } else { + List>> unfinishedMarks = countDownLatch.getLeftMarks(); + // only show at most 3 results + List>> subList = + unfinishedMarks.subList(0, Math.min(unfinishedMarks.size(), 3)); + if (!subList.isEmpty()) { + errMsg += " Unfinished mark: " + Joiner.on(", ").join(subList); + } } + errMsg += ". This operation maybe partial successfully, You should retry until success."; + LOG.warn(errMsg); + throw new DdlException(errMsg); } - return expressionPartitionDesc.toPartitionInfo( - Collections.singletonList(stmt.getPartitionColumn()), - Maps.newHashMap(), false); - } else { - return new SinglePartitionInfo(); } } - private void createTaskForMaterializedView(String dbName, MaterializedView materializedView, - Map optHints) throws DdlException { - MaterializedView.RefreshType refreshType = materializedView.getRefreshScheme().getType(); - MaterializedView.RefreshMoment refreshMoment = materializedView.getRefreshScheme().getMoment(); - - if (refreshType.equals(MaterializedView.RefreshType.INCREMENTAL)) { - GlobalStateMgr.getCurrentState().getMaterializedViewMgr().startMaintainMV(materializedView); - return; - } - - if (refreshType != MaterializedView.RefreshType.SYNC) { + public void replayModifyTableProperty(short opCode, ModifyTablePropertyOperationLog info) { + long dbId = info.getDbId(); + long tableId = info.getTableId(); + Map properties = info.getProperties(); + String comment = info.getComment(); - Task task = TaskBuilder.buildMvTask(materializedView, dbName); - TaskBuilder.updateTaskInfo(task, materializedView); + Database db = getDb(dbId); + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + OlapTable olapTable = (OlapTable) getTable(db.getId(), tableId); + if (opCode == OperationType.OP_SET_FORBIDDEN_GLOBAL_DICT) { + String enAble = properties.get(PropertyAnalyzer.ENABLE_LOW_CARD_DICT_TYPE); + Preconditions.checkState(enAble != null); + if (olapTable != null) { + if (enAble.equals(PropertyAnalyzer.DISABLE_LOW_CARD_DICT)) { + olapTable.setHasForbiddenGlobalDict(true); + IDictManager.getInstance().disableGlobalDict(olapTable.getId()); + } else { + olapTable.setHasForbiddenGlobalDict(false); + IDictManager.getInstance().enableGlobalDict(olapTable.getId()); + } + } + } else { + TableProperty tableProperty = olapTable.getTableProperty(); + if (tableProperty == null) { + tableProperty = new TableProperty(properties); + olapTable.setTableProperty(tableProperty.buildProperty(opCode)); + } else { + tableProperty.modifyTableProperties(properties); + tableProperty.buildProperty(opCode); + } - if (optHints != null) { - Map taskProperties = task.getProperties(); - taskProperties.putAll(optHints); - } + if (StringUtils.isNotEmpty(comment)) { + olapTable.setComment(comment); + } - TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager(); - taskManager.createTask(task, false); - if (refreshMoment.equals(MaterializedView.RefreshMoment.IMMEDIATE)) { - taskManager.executeTask(task.getName()); + // need to replay partition info meta + if (opCode == OperationType.OP_MODIFY_IN_MEMORY) { + for (Partition partition : olapTable.getPartitions()) { + olapTable.getPartitionInfo().setIsInMemory(partition.getId(), tableProperty.isInMemory()); + } + } else if (opCode == OperationType.OP_MODIFY_REPLICATION_NUM) { + // update partition replication num if this table is unpartitioned table + PartitionInfo partitionInfo = olapTable.getPartitionInfo(); + if (partitionInfo.getType() == PartitionType.UNPARTITIONED) { + String partitionName = olapTable.getName(); + Partition partition = olapTable.getPartition(partitionName); + if (partition != null) { + partitionInfo.setReplicationNum(partition.getId(), tableProperty.getReplicationNum()); + } + } + } else if (opCode == OperationType.OP_MODIFY_ENABLE_PERSISTENT_INDEX) { + olapTable.setEnablePersistentIndex(tableProperty.enablePersistentIndex()); + if (olapTable.isCloudNativeTable()) { + olapTable.setPersistentIndexType(tableProperty.getPersistentIndexType()); + } + } else if (opCode == OperationType.OP_MODIFY_PRIMARY_INDEX_CACHE_EXPIRE_SEC) { + olapTable.setPrimaryIndexCacheExpireSec(tableProperty.primaryIndexCacheExpireSec()); + } else if (opCode == OperationType.OP_MODIFY_BINLOG_CONFIG) { + if (!olapTable.isBinlogEnabled()) { + olapTable.clearBinlogAvailableVersion(); + } + } } - } - } - - /** - * Leave some clean up work to {@link MaterializedView#onDrop} - */ - @Override - public void dropMaterializedView(DropMaterializedViewStmt stmt) throws DdlException, MetaNotFoundException { - Database db = getDb(stmt.getDbName()); - if (db == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, stmt.getDbName()); - } - Table table; - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.READ); - try { - table = getTable(db.getFullName(), stmt.getMvName()); + } catch (Exception ex) { + LOG.warn("The replay log failed and this log was ignored.", ex); } finally { - locker.unLockDatabase(db.getId(), LockType.READ); - } - if (table instanceof MaterializedView) { - try { - Authorizer.checkMaterializedViewAction(ConnectContext.get().getCurrentUserIdentity(), - ConnectContext.get().getCurrentRoleIds(), stmt.getDbMvName(), PrivilegeType.DROP); - } catch (AccessDeniedException e) { - AccessDeniedException.reportAccessDenied( - stmt.getDbMvName().getCatalog(), - ConnectContext.get().getCurrentUserIdentity(), - ConnectContext.get().getCurrentRoleIds(), PrivilegeType.DROP.name(), ObjectType.MATERIALIZED_VIEW.name(), - stmt.getDbMvName().getTbl()); - } - - db.dropTable(table.getName(), stmt.isSetIfExists(), true); - } else { - stateMgr.getAlterJobMgr().processDropMaterializedView(stmt); + locker.unLockDatabase(db.getId(), LockType.WRITE); } } @Override - public void alterMaterializedView(AlterMaterializedViewStmt stmt) throws DdlException, MetaNotFoundException { - new AlterMVJobExecutor().process(stmt, ConnectContext.get()); + public void alterTable(ModifyTablePropertyOperationLog log) { + GlobalStateMgr.getCurrentState().getEditLog().logAlterTableProperties(log); } - private String executeRefreshMvTask(String dbName, MaterializedView materializedView, - ExecuteOption executeOption) - throws DdlException { - MaterializedView.RefreshType refreshType = materializedView.getRefreshScheme().getType(); - LOG.info("Start to execute refresh materialized view task, mv: {}, refreshType: {}, executionOption:{}", - materializedView.getName(), refreshType, executeOption); - - if (refreshType.equals(MaterializedView.RefreshType.INCREMENTAL)) { - GlobalStateMgr.getCurrentState().getMaterializedViewMgr().onTxnPublish(materializedView); - } else if (refreshType != MaterializedView.RefreshType.SYNC) { - TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager(); - final String mvTaskName = TaskBuilder.getMvTaskName(materializedView.getId()); - if (!taskManager.containTask(mvTaskName)) { - Task task = TaskBuilder.buildMvTask(materializedView, dbName); - TaskBuilder.updateTaskInfo(task, materializedView); - taskManager.createTask(task, false); - } - return taskManager.executeTask(mvTaskName, executeOption).getQueryId(); - } - return null; + @Override + public void renameColumn(ColumnRenameInfo columnRenameInfo) { + GlobalStateMgr.getCurrentState().getEditLog().logColumnRename(columnRenameInfo); } - private MaterializedView getMaterializedViewToRefresh(String dbName, String mvName) - throws DdlException, MetaNotFoundException { - Database db = this.getDb(dbName); - if (db == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); - } - final Table table = getTable(db.getFullName(), mvName); - MaterializedView materializedView = null; - if (table instanceof MaterializedView) { - materializedView = (MaterializedView) table; - } - if (materializedView == null) { - throw new MetaNotFoundException(mvName + " is not a materialized view"); + public void replayRenameColumn(ColumnRenameInfo columnRenameInfo) throws DdlException { + long dbId = columnRenameInfo.getDbId(); + long tableId = columnRenameInfo.getTableId(); + String colName = columnRenameInfo.getColumnName(); + String newColName = columnRenameInfo.getNewColumnName(); + Database db = getDb(dbId); + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + OlapTable olapTable = (OlapTable) getTable(db.getId(), tableId); + olapTable.renameColumn(colName, newColName); + LOG.info("replay rename column[{}] to {}", colName, newColName); + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); } - return materializedView; - } - - public String refreshMaterializedView(String dbName, String mvName, boolean force, PartitionRangeDesc range, - int priority, boolean mergeRedundant, boolean isManual) - throws DdlException, MetaNotFoundException { - return refreshMaterializedView(dbName, mvName, force, range, priority, mergeRedundant, isManual, false); - } - - public String refreshMaterializedView(String dbName, String mvName, boolean force, PartitionRangeDesc range, - int priority, boolean mergeRedundant, boolean isManual, boolean isSync) - throws DdlException, MetaNotFoundException { - MaterializedView materializedView = getMaterializedViewToRefresh(dbName, mvName); - - HashMap taskRunProperties = new HashMap<>(); - taskRunProperties.put(TaskRun.PARTITION_START, range == null ? null : range.getPartitionStart()); - taskRunProperties.put(TaskRun.PARTITION_END, range == null ? null : range.getPartitionEnd()); - taskRunProperties.put(TaskRun.FORCE, Boolean.toString(force)); - - ExecuteOption executeOption = new ExecuteOption(priority, mergeRedundant, taskRunProperties); - executeOption.setManual(isManual); - executeOption.setSync(isSync); - return executeRefreshMvTask(dbName, materializedView, executeOption); } @Override - public String refreshMaterializedView(RefreshMaterializedViewStatement refreshMaterializedViewStatement) - throws DdlException, MetaNotFoundException { - String dbName = refreshMaterializedViewStatement.getMvName().getDb(); - String mvName = refreshMaterializedViewStatement.getMvName().getTbl(); - boolean force = refreshMaterializedViewStatement.isForceRefresh(); - PartitionRangeDesc range = refreshMaterializedViewStatement.getPartitionRangeDesc(); - return refreshMaterializedView(dbName, mvName, force, range, Constants.TaskRunPriority.HIGH.value(), - Config.enable_mv_refresh_sync_refresh_mergeable, true, refreshMaterializedViewStatement.isSync()); - } - - @Override - public void cancelRefreshMaterializedView( - CancelRefreshMaterializedViewStmt stmt) throws DdlException, MetaNotFoundException { - String dbName = stmt.getMvName().getDb(); - String mvName = stmt.getMvName().getTbl(); - MaterializedView materializedView = getMaterializedViewToRefresh(dbName, mvName); - TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager(); - Task refreshTask = taskManager.getTask(TaskBuilder.getMvTaskName(materializedView.getId())); - boolean isForce = stmt.isForce(); - if (refreshTask != null) { - taskManager.killTask(refreshTask.getName(), isForce); + public List listTableNames(String dbName) { + Database database = getDb(dbName); + if (database != null) { + return database.getTables().stream() + .map(Table::getName).collect(Collectors.toList()); + } else { + throw new StarRocksConnectorException("Database " + dbName + " doesn't exist"); } } - /* - * used for handling CacnelAlterStmt (for client is the CANCEL ALTER - * command). including SchemaChangeHandler and RollupHandler - */ - public void cancelAlter(CancelAlterTableStmt stmt, String reason) throws DdlException { - if (stmt.getAlterType() == ShowAlterStmt.AlterType.ROLLUP) { - stateMgr.getRollupHandler().cancel(stmt, reason); - } else if (stmt.getAlterType() == ShowAlterStmt.AlterType.COLUMN - || stmt.getAlterType() == ShowAlterStmt.AlterType.OPTIMIZE) { - stateMgr.getSchemaChangeHandler().cancel(stmt, reason); - } else if (stmt.getAlterType() == ShowAlterStmt.AlterType.MATERIALIZED_VIEW) { - stateMgr.getRollupHandler().cancelMV(stmt); + @Override + public List
getTables(Long dbId) { + Database database = getDb(dbId); + if (database == null) { + return Collections.emptyList(); } else { - throw new DdlException("Cancel " + stmt.getAlterType() + " does not implement yet"); + return database.getTables(); } } - public void cancelAlter(CancelAlterTableStmt stmt) throws DdlException { - cancelAlter(stmt, "user cancelled"); + @Override + public Table getTable(String dbName, String tblName) { + Database database = getDb(dbName); + if (database == null) { + return null; + } + return database.getTable(tblName); } - // entry of rename table operation @Override - public void renameTable(Database db, Table table, TableRenameClause tableRenameClause) throws DdlException { - OlapTable olapTable = (OlapTable) table; - if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) { - throw new DdlException("Table[" + olapTable.getName() + "] is under " + olapTable.getState()); - } - - String oldTableName = olapTable.getName(); - String newTableName = tableRenameClause.getNewTableName(); - if (oldTableName.equals(newTableName)) { - throw new DdlException("Same table name"); - } - - // check if name is already used - if (getTable(db.getFullName(), newTableName) != null) { - throw new DdlException("Table name[" + newTableName + "] is already used"); + public Table getTable(Long dbId, Long tableId) { + Database database = getDb(dbId); + if (database == null) { + return null; } - - olapTable.checkAndSetName(newTableName, false); - - db.dropTable(oldTableName); - db.registerTableUnlocked(olapTable); - inactiveRelatedMaterializedView(db, olapTable, - MaterializedViewExceptions.inactiveReasonForBaseTableRenamed(oldTableName)); - - TableInfo tableInfo = TableInfo.createForTableRename(db.getId(), olapTable.getId(), newTableName); - GlobalStateMgr.getCurrentState().getEditLog().logTableRename(tableInfo); - LOG.info("rename table[{}] to {}, tableId: {}", oldTableName, newTableName, olapTable.getId()); + return database.getTable(tableId); } @Override - public void alterTableComment(Database db, Table table, AlterTableCommentClause clause) { - ModifyTablePropertyOperationLog log = new ModifyTablePropertyOperationLog(db.getId(), table.getId()); - log.setComment(clause.getNewComment()); - GlobalStateMgr.getCurrentState().getEditLog().logAlterTableProperties(log); - - table.setComment(clause.getNewComment()); + public void modifyViewDef(AlterViewInfo alterViewInfo) { + alterView(alterViewInfo); + GlobalStateMgr.getCurrentState().getEditLog().logModifyViewDef(alterViewInfo); } - public static void inactiveRelatedMaterializedView(Database db, Table olapTable, String reason) { - for (MvId mvId : olapTable.getRelatedMaterializedViews()) { - MaterializedView mv = (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getId(), mvId.getId()); - if (mv != null) { - LOG.warn("Inactive MV {}/{} because {}", mv.getName(), mv.getId(), reason); - mv.setInactiveAndReason(reason); - - // recursive inactive - inactiveRelatedMaterializedView(db, mv, - MaterializedViewExceptions.inactiveReasonForBaseTableActive(mv.getName())); - } else { - LOG.info("Ignore materialized view {} does not exists", mvId); - } - } - } + public void alterView(AlterViewInfo alterViewInfo) { + long dbId = alterViewInfo.getDbId(); + long tableId = alterViewInfo.getTableId(); + String inlineViewDef = alterViewInfo.getInlineViewDef(); + List newFullSchema = alterViewInfo.getNewFullSchema(); + String comment = alterViewInfo.getComment(); - public void replayRenameTable(TableInfo tableInfo) { - long dbId = tableInfo.getDbId(); - long tableId = tableInfo.getTableId(); - String newTableName = tableInfo.getNewTableName(); + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); + View view = (View) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getId(), tableId); - Database db = getDb(dbId); Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); + locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(view.getId()), LockType.WRITE); try { - OlapTable table = (OlapTable) getTable(db.getId(), tableId); - String tableName = table.getName(); - db.dropTable(tableName); - table.setName(newTableName); - db.registerTableUnlocked(table); - inactiveRelatedMaterializedView(db, table, - MaterializedViewExceptions.inactiveReasonForBaseTableRenamed(tableName)); + String viewName = view.getName(); + view.setInlineViewDefWithSqlMode(inlineViewDef, alterViewInfo.getSqlMode()); + try { + view.init(); + } catch (UserException e) { + throw new AlterJobException("failed to init view stmt", e); + } + view.setNewFullSchema(newFullSchema); + view.setComment(comment); + inactiveRelatedMaterializedView(db, view, + MaterializedViewExceptions.inactiveReasonForBaseViewChanged(viewName)); + db.dropTable(viewName); + db.registerTableUnlocked(view); - LOG.info("replay rename table[{}] to {}, tableId: {}", tableName, newTableName, table.getId()); + LOG.info("replay modify view[{}] definition to {}", viewName, inlineViewDef); } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); + locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(view.getId()), LockType.WRITE); } } @Override - public void renamePartition(Database db, Table table, PartitionRenameClause renameClause) throws DdlException { - OlapTable olapTable = (OlapTable) table; - if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) { - throw new DdlException("Table[" + olapTable.getName() + "] is under " + olapTable.getState()); - } + public void renameMaterializedView(RenameMaterializedViewLog renameMaterializedViewLog) { + GlobalStateMgr.getCurrentState().getEditLog().logMvRename(renameMaterializedViewLog); + } - if (!olapTable.getPartitionInfo().isRangePartition()) { - throw new DdlException("Table[" + olapTable.getName() + "] is single partitioned. " - + "no need to rename partition name."); + public void replayRenameMaterializedView(RenameMaterializedViewLog log) { + long dbId = log.getDbId(); + long materializedViewId = log.getId(); + String newMaterializedViewName = log.getNewMaterializedViewName(); + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); + MaterializedView oldMaterializedView = (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() + .getTable(db.getId(), materializedViewId); + if (oldMaterializedView != null) { + try (AutoCloseableLock ignore = new AutoCloseableLock(new Locker(), db.getId(), + Lists.newArrayList(oldMaterializedView.getId()), LockType.WRITE)) { + db.dropTable(oldMaterializedView.getName()); + oldMaterializedView.setName(newMaterializedViewName); + db.registerTableUnlocked(oldMaterializedView); + updateTaskDefinition(oldMaterializedView); + LOG.info("Replay rename materialized view [{}] to {}, id: {}", oldMaterializedView.getName(), + newMaterializedViewName, oldMaterializedView.getId()); + } catch (Throwable e) { + oldMaterializedView.setInactiveAndReason("replay rename failed: " + e.getMessage()); + LOG.warn("replay rename materialized-view failed: {}", oldMaterializedView.getName(), e); + } } + } - String partitionName = renameClause.getPartitionName(); - String newPartitionName = renameClause.getNewPartitionName(); - if (partitionName.equalsIgnoreCase(newPartitionName)) { - throw new DdlException("Same partition name"); - } + @Override + public void alterMvBaseTableInfos(AlterMaterializedViewBaseTableInfosLog alterMaterializedViewBaseTableInfos) { + GlobalStateMgr.getCurrentState().getEditLog().logAlterMvBaseTableInfos(alterMaterializedViewBaseTableInfos); + } - Partition partition = olapTable.getPartition(partitionName); - if (partition == null) { - throw new DdlException("Partition[" + partitionName + "] does not exists"); + public void replayAlterMaterializedViewBaseTableInfos(AlterMaterializedViewBaseTableInfosLog log) { + long dbId = log.getDbId(); + long mvId = log.getMvId(); + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); + MaterializedView mv = (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getId(), mvId); + if (mv == null) { + return; } - // check if name is already used - if (olapTable.checkPartitionNameExist(newPartitionName)) { - throw new DdlException("Partition name[" + newPartitionName + "] is already used"); + Locker locker = new Locker(); + locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(mv.getId()), LockType.WRITE); + try { + mv.replayAlterMaterializedViewBaseTableInfos(log); + } catch (Throwable e) { + LOG.warn("replay alter materialized-view status failed: {}", mv.getName(), e); + mv.setInactiveAndReason("replay alter status failed: " + e.getMessage()); + } finally { + locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(mv.getId()), LockType.WRITE); } + } - olapTable.renamePartition(partitionName, newPartitionName); - - // log - TableInfo tableInfo = TableInfo.createForPartitionRename(db.getId(), olapTable.getId(), partition.getId(), - newPartitionName); - GlobalStateMgr.getCurrentState().getEditLog().logPartitionRename(tableInfo); - LOG.info("rename partition[{}] to {}", partitionName, newPartitionName); + @Override + public void alterMvStatus(AlterMaterializedViewStatusLog log) { + GlobalStateMgr.getCurrentState().getEditLog().logAlterMvStatus(log); } - public void replayRenamePartition(TableInfo tableInfo) { - long dbId = tableInfo.getDbId(); - long tableId = tableInfo.getTableId(); - long partitionId = tableInfo.getPartitionId(); - String newPartitionName = tableInfo.getNewPartitionName(); + public void replayAlterMaterializedViewStatus(AlterMaterializedViewStatusLog log) { + long dbId = log.getDbId(); + long tableId = log.getTableId(); + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); + MaterializedView mv = (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() + .getTable(db.getId(), tableId); + if (mv == null) { + return; + } - Database db = getDb(dbId); Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); + locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(mv.getId()), LockType.WRITE); try { - OlapTable table = (OlapTable) getTable(db.getId(), tableId); - Partition partition = table.getPartition(partitionId); - table.renamePartition(partition.getName(), newPartitionName); - LOG.info("replay rename partition[{}] to {}", partition.getName(), newPartitionName); + alterMaterializedViewStatus(mv, log.getStatus(), true); + } catch (Throwable e) { + LOG.warn("replay alter materialized-view status failed: {}", mv.getName(), e); + mv.setInactiveAndReason("replay alter status failed: " + e.getMessage()); } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); + locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(mv.getId()), LockType.WRITE); } } - public void renameRollup(Database db, OlapTable table, RollupRenameClause renameClause) throws DdlException { - if (table.getState() != OlapTable.OlapTableState.NORMAL) { - throw new DdlException("Table[" + table.getName() + "] is under " + table.getState()); - } - - String rollupName = renameClause.getRollupName(); - // check if it is base table name - if (rollupName.equals(table.getName())) { - throw new DdlException("Using ALTER TABLE RENAME to change table name"); - } - - String newRollupName = renameClause.getNewRollupName(); - if (rollupName.equals(newRollupName)) { - throw new DdlException("Same rollup name"); - } + public void alterMaterializedViewStatus(MaterializedView materializedView, String status, boolean isReplay) { + LOG.info("process change materialized view {} status to {}, isReplay: {}", + materializedView.getName(), status, isReplay); + if (AlterMaterializedViewStatusClause.ACTIVE.equalsIgnoreCase(status)) { + ConnectContext context = new ConnectContext(); + context.setGlobalStateMgr(GlobalStateMgr.getCurrentState()); + context.setQualifiedUser(AuthenticationMgr.ROOT_USER); + context.setCurrentUserIdentity(UserIdentity.ROOT); + context.setCurrentRoleIds(Sets.newHashSet(PrivilegeBuiltinConstants.ROOT_ROLE_ID)); - Map indexNameToIdMap = table.getIndexNameToId(); - if (indexNameToIdMap.get(rollupName) == null) { - throw new DdlException("Rollup index[" + rollupName + "] does not exists"); - } + String createMvSql = materializedView.getMaterializedViewDdlStmt(false, isReplay); + QueryStatement mvQueryStatement = null; + try { + mvQueryStatement = recreateMVQuery(materializedView, context, createMvSql); + } catch (SemanticException e) { + throw new SemanticException("Can not active materialized view [%s]" + + " because analyze materialized view define sql: \n\n%s" + + "\n\nCause an error: %s", materializedView.getName(), createMvSql, e.getDetailMsg()); + } - // check if name is already used - if (indexNameToIdMap.get(newRollupName) != null) { - throw new DdlException("Rollup name[" + newRollupName + "] is already used"); + // Skip checks to maintain eventual consistency when replay + List baseTableInfos = + Lists.newArrayList(MaterializedViewAnalyzer.getBaseTableInfos(mvQueryStatement, !isReplay)); + materializedView.setBaseTableInfos(baseTableInfos); + materializedView.getRefreshScheme().getAsyncRefreshContext().clearVisibleVersionMap(); + materializedView.onReload(); + materializedView.setActive(); + } else if (AlterMaterializedViewStatusClause.INACTIVE.equalsIgnoreCase(status)) { + materializedView.setInactiveAndReason("user use alter materialized view set status to inactive"); } + } - long indexId = indexNameToIdMap.remove(rollupName); - indexNameToIdMap.put(newRollupName, indexId); + /* + * Recreate the MV query and validate the correctness of syntax and schema + */ + public static QueryStatement recreateMVQuery(MaterializedView materializedView, + ConnectContext context, + String createMvSql) { + // If we could parse the MV sql successfully, and the schema of mv does not change, + // we could reuse the existing MV + Optional mayDb = GlobalStateMgr.getCurrentState().getStarRocksMetadata().mayGetDb(materializedView.getDbId()); + + // check database existing + String dbName = mayDb.orElseThrow(() -> + new SemanticException("database " + materializedView.getDbId() + " not exists")).getFullName(); + context.setDatabase(dbName); + + // Try to parse and analyze the creation sql + List statementBaseList = SqlParser.parse(createMvSql, context.getSessionVariable()); + CreateMaterializedViewStatement createStmt = (CreateMaterializedViewStatement) statementBaseList.get(0); + Analyzer.analyze(createStmt, context); + + // validate the schema + List newColumns = createStmt.getMvColumnItems().stream() + .sorted(Comparator.comparing(Column::getName)) + .collect(Collectors.toList()); + List existedColumns = materializedView.getColumns().stream() + .sorted(Comparator.comparing(Column::getName)) + .collect(Collectors.toList()); + if (newColumns.size() != existedColumns.size()) { + throw new SemanticException(String.format("number of columns changed: %d != %d", + existedColumns.size(), newColumns.size())); + } + for (int i = 0; i < existedColumns.size(); i++) { + Column existed = existedColumns.get(i); + Column created = newColumns.get(i); + if (!existed.isSchemaCompatible(created)) { + String message = MaterializedViewExceptions.inactiveReasonForColumnNotCompatible( + existed.toString(), created.toString()); + materializedView.setInactiveAndReason(message); + throw new SemanticException(message); + } + } + + return createStmt.getQueryStatement(); + } - // log - TableInfo tableInfo = TableInfo.createForRollupRename(db.getId(), table.getId(), indexId, newRollupName); - GlobalStateMgr.getCurrentState().getEditLog().logRollupRename(tableInfo); - LOG.info("rename rollup[{}] to {}", rollupName, newRollupName); + @Override + public void alterMaterializedViewProperties(ModifyTablePropertyOperationLog log) { + GlobalStateMgr.getCurrentState().getEditLog().logAlterMaterializedViewProperties(log); } - public void replayRenameRollup(TableInfo tableInfo) { - long dbId = tableInfo.getDbId(); - long tableId = tableInfo.getTableId(); - long indexId = tableInfo.getIndexId(); - String newRollupName = tableInfo.getNewRollupName(); + public void replayAlterMaterializedViewProperties(short opCode, ModifyTablePropertyOperationLog log) { + long dbId = log.getDbId(); + long tableId = log.getTableId(); + Map properties = log.getProperties(); + + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); + MaterializedView mv = (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() + .getTable(db.getId(), tableId); + if (mv == null) { + LOG.warn("Ignore change materialized view properties og because table:" + tableId + "is null"); + return; + } - Database db = getDb(dbId); Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); + locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(mv.getId()), LockType.WRITE); try { - OlapTable table = (OlapTable) getTable(db.getId(), tableId); - String rollupName = table.getIndexNameById(indexId); - Map indexNameToIdMap = table.getIndexNameToId(); - indexNameToIdMap.remove(rollupName); - indexNameToIdMap.put(newRollupName, indexId); - - LOG.info("replay rename rollup[{}] to {}", rollupName, newRollupName); + TableProperty tableProperty = mv.getTableProperty(); + if (tableProperty == null) { + tableProperty = new TableProperty(properties); + mv.setTableProperty(tableProperty.buildProperty(opCode)); + } else { + tableProperty.modifyTableProperties(properties); + tableProperty.buildProperty(opCode); + } + } catch (Throwable e) { + mv.setInactiveAndReason("replay failed: " + e.getMessage()); + LOG.warn("replay alter materialized-view properties failed: {}", mv.getName(), e); } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); + locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(mv.getId()), LockType.WRITE); } } - public void renameColumn(Database db, Table table, ColumnRenameClause renameClause) { - if (!(table instanceof OlapTable)) { - throw ErrorReportException.report(ErrorCode.ERR_COLUMN_RENAME_ONLY_FOR_OLAP_TABLE); - } - if (db.isSystemDatabase() || db.isStatisticsDatabase()) { - throw ErrorReportException.report(ErrorCode.ERR_CANNOT_RENAME_COLUMN_IN_INTERNAL_DB, db.getFullName()); - } - OlapTable olapTable = (OlapTable) table; - if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) { - throw ErrorReportException.report(ErrorCode.ERR_CANNOT_RENAME_COLUMN_OF_NOT_NORMAL_TABLE, olapTable.getState()); - } - - String colName = renameClause.getColName(); - String newColName = renameClause.getNewColName(); + @Override + public void changeMaterializedRefreshScheme(ChangeMaterializedViewRefreshSchemeLog log) { + GlobalStateMgr.getCurrentState().getEditLog().logMvChangeRefreshScheme(log); + } - Column column = olapTable.getColumn(colName); - if (column == null) { - throw ErrorReportException.report(ErrorCode.ERR_BAD_FIELD_ERROR, colName, table.getName()); - } - Column currentColumn = olapTable.getColumn(newColName); - if (currentColumn != null) { - throw ErrorReportException.report(ErrorCode.ERR_DUP_FIELDNAME, newColName); + public void replayChangeMaterializedViewRefreshScheme(ChangeMaterializedViewRefreshSchemeLog log) { + long dbId = log.getDbId(); + long id = log.getId(); + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); + if (db == null) { + return; } - olapTable.renameColumn(colName, newColName); - ColumnRenameInfo columnRenameInfo = new ColumnRenameInfo(db.getId(), table.getId(), colName, newColName); - GlobalStateMgr.getCurrentState().getEditLog().logColumnRename(columnRenameInfo); - LOG.info("rename column {} to {}", colName, newColName); - } + MaterializedView oldMaterializedView = (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() + .getTable(db.getId(), id); + if (oldMaterializedView == null) { + LOG.warn("Ignore change materialized view refresh scheme log because table:" + id + "is null"); + return; + } - public void replayRenameColumn(ColumnRenameInfo columnRenameInfo) throws DdlException { - long dbId = columnRenameInfo.getDbId(); - long tableId = columnRenameInfo.getTableId(); - String colName = columnRenameInfo.getColumnName(); - String newColName = columnRenameInfo.getNewColumnName(); - Database db = getDb(dbId); Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); + locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(oldMaterializedView.getId()), LockType.WRITE); try { - OlapTable olapTable = (OlapTable) getTable(db.getId(), tableId); - olapTable.renameColumn(colName, newColName); - LOG.info("replay rename column[{}] to {}", colName, newColName); + final MaterializedView.MvRefreshScheme newMvRefreshScheme = new MaterializedView.MvRefreshScheme(); + final MaterializedView.MvRefreshScheme oldRefreshScheme = oldMaterializedView.getRefreshScheme(); + newMvRefreshScheme.setAsyncRefreshContext(oldRefreshScheme.getAsyncRefreshContext()); + newMvRefreshScheme.setLastRefreshTime(oldRefreshScheme.getLastRefreshTime()); + final MaterializedView.RefreshType refreshType = log.getRefreshType(); + final MaterializedView.AsyncRefreshContext asyncRefreshContext = log.getAsyncRefreshContext(); + newMvRefreshScheme.setType(refreshType); + newMvRefreshScheme.setAsyncRefreshContext(asyncRefreshContext); + + long maxChangedTableRefreshTime = + MvUtils.getMaxTablePartitionInfoRefreshTime( + log.getAsyncRefreshContext().getBaseTableVisibleVersionMap().values()); + newMvRefreshScheme.setLastRefreshTime(maxChangedTableRefreshTime); + + oldMaterializedView.setRefreshScheme(newMvRefreshScheme); + LOG.info( + "Replay materialized view [{}]'s refresh type to {}, start time to {}, " + + "interval step to {}, timeunit to {}, id: {}, maxChangedTableRefreshTime:{}", + oldMaterializedView.getName(), refreshType.name(), asyncRefreshContext.getStartTime(), + asyncRefreshContext.getStep(), + asyncRefreshContext.getTimeUnit(), oldMaterializedView.getId(), maxChangedTableRefreshTime); + } catch (Throwable e) { + oldMaterializedView.setInactiveAndReason("replay failed: " + e.getMessage()); + LOG.warn("replay change materialized-view refresh scheme failed: {}", + oldMaterializedView.getName(), e); } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); + locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(oldMaterializedView.getId()), LockType.WRITE); } } - public void modifyTableDynamicPartition(Database db, OlapTable table, Map properties) + @Override + public void addPartitionLog(Database db, OlapTable olapTable, List partitionDescs, + boolean isTempPartition, PartitionInfo partitionInfo, + List partitionList, Set existPartitionNameSet) throws DdlException { - Map logProperties = new HashMap<>(properties); - TableProperty tableProperty = table.getTableProperty(); - if (tableProperty == null) { - DynamicPartitionUtil.checkAndSetDynamicPartitionProperty(table, properties); + PartitionType partitionType = partitionInfo.getType(); + if (partitionInfo.isRangePartition()) { + addRangePartitionLog(db, olapTable, partitionDescs, isTempPartition, partitionInfo, partitionList, + existPartitionNameSet); + } else if (partitionType == PartitionType.LIST) { + addListPartitionLog(db, olapTable, partitionDescs, isTempPartition, partitionInfo, partitionList, + existPartitionNameSet); } else { - Map analyzedDynamicPartition = DynamicPartitionUtil.analyzeDynamicPartition(properties); - tableProperty.modifyTableProperties(analyzedDynamicPartition); - tableProperty.buildDynamicProperty(); + throw new DdlException("Only support adding partition log to range/list partitioned table"); } - - DynamicPartitionUtil.registerOrRemovePartitionScheduleInfo(db.getId(), table); - - ModifyTablePropertyOperationLog info = - new ModifyTablePropertyOperationLog(db.getId(), table.getId(), logProperties); - GlobalStateMgr.getCurrentState().getEditLog().logDynamicPartition(info); } - public void alterTableProperties(Database db, OlapTable table, Map properties) - throws DdlException { - Map propertiesToPersist = new HashMap<>(properties); - Map results = validateToBeModifiedProps(properties, table); - - TableProperty tableProperty = table.getTableProperty(); - for (String key : results.keySet()) { - if (propertiesToPersist.containsKey(PropertyAnalyzer.PROPERTIES_PARTITION_LIVE_NUMBER)) { - int partitionLiveNumber = (int) results.get(key); - tableProperty.getProperties().put(PropertyAnalyzer.PROPERTIES_PARTITION_LIVE_NUMBER, - String.valueOf(partitionLiveNumber)); - if (partitionLiveNumber == TableProperty.INVALID) { - GlobalStateMgr.getCurrentState().getDynamicPartitionScheduler().removeTtlPartitionTable(db.getId(), - table.getId()); - } else { - GlobalStateMgr.getCurrentState().getDynamicPartitionScheduler().registerTtlPartitionTable(db.getId(), - table.getId()); - } - tableProperty.setPartitionTTLNumber(partitionLiveNumber); - ModifyTablePropertyOperationLog info = - new ModifyTablePropertyOperationLog(db.getId(), table.getId(), - ImmutableMap.of(key, propertiesToPersist.get(key))); - GlobalStateMgr.getCurrentState().getEditLog().logAlterTableProperties(info); - } - if (propertiesToPersist.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_MEDIUM)) { - DataProperty dataProperty = (DataProperty) results.get(key); - TStorageMedium storageMedium = dataProperty.getStorageMedium(); - table.setStorageMedium(storageMedium); - tableProperty.getProperties() - .put(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TIME, - String.valueOf(dataProperty.getCooldownTimeMs())); - ModifyTablePropertyOperationLog info = - new ModifyTablePropertyOperationLog(db.getId(), table.getId(), - ImmutableMap.of(key, propertiesToPersist.get(key))); - GlobalStateMgr.getCurrentState().getEditLog().logAlterTableProperties(info); - } - if (propertiesToPersist.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TTL)) { - String storageCoolDownTTL = propertiesToPersist.get(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TTL); - tableProperty.getProperties().put(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TTL, storageCoolDownTTL); - tableProperty.buildStorageCoolDownTTL(); - ModifyTablePropertyOperationLog info = - new ModifyTablePropertyOperationLog(db.getId(), table.getId(), - ImmutableMap.of(key, propertiesToPersist.get(key))); - GlobalStateMgr.getCurrentState().getEditLog().logAlterTableProperties(info); - } - if (propertiesToPersist.containsKey(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION)) { - String partitionDuration = propertiesToPersist.get(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION); - tableProperty.getProperties().put(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION, partitionDuration); - tableProperty.buildDataCachePartitionDuration(); - ModifyTablePropertyOperationLog info = - new ModifyTablePropertyOperationLog(db.getId(), table.getId(), - ImmutableMap.of(key, propertiesToPersist.get(key))); - GlobalStateMgr.getCurrentState().getEditLog().logAlterTableProperties(info); - } - if (propertiesToPersist.containsKey(PropertyAnalyzer.PROPERTIES_LABELS_LOCATION)) { - String location = propertiesToPersist.get(PropertyAnalyzer.PROPERTIES_LABELS_LOCATION); - table.setLocation(location); - ModifyTablePropertyOperationLog info = - new ModifyTablePropertyOperationLog(db.getId(), table.getId(), - ImmutableMap.of(key, propertiesToPersist.get(key))); - GlobalStateMgr.getCurrentState().getEditLog().logAlterTableProperties(info); + private void addRangePartitionLog(Database db, OlapTable olapTable, List partitionDescs, + boolean isTempPartition, PartitionInfo partitionInfo, + List partitionList, Set existPartitionNameSet) { + int partitionLen = partitionList.size(); + List partitionInfoV2List = Lists.newArrayListWithCapacity(partitionLen); + if (partitionLen == 1) { + Partition partition = partitionList.get(0); + if (existPartitionNameSet.contains(partition.getName())) { + LOG.info("add partition[{}] which already exists", partition.getName()); + return; } - } - } + PartitionPersistInfoV2 info = new RangePartitionPersistInfo(db.getId(), olapTable.getId(), partition, + partitionDescs.get(0).getPartitionDataProperty(), + partitionInfo.getReplicationNum(partition.getId()), + partitionInfo.getIsInMemory(partition.getId()), isTempPartition, + ((RangePartitionInfo) partitionInfo).getRange(partition.getId()), + ((SingleRangePartitionDesc) partitionDescs.get(0)).getDataCacheInfo()); + partitionInfoV2List.add(info); + AddPartitionsInfoV2 infos = new AddPartitionsInfoV2(partitionInfoV2List); + GlobalStateMgr.getCurrentState().getEditLog().logAddPartitions(infos); - private Map validateToBeModifiedProps(Map properties, OlapTable table) throws DdlException { - Map results = Maps.newHashMap(); - if (properties.containsKey(PropertyAnalyzer.PROPERTIES_PARTITION_LIVE_NUMBER)) { - int partitionLiveNumber = PropertyAnalyzer.analyzePartitionLiveNumber(properties, true); - results.put(PropertyAnalyzer.PROPERTIES_PARTITION_LIVE_NUMBER, partitionLiveNumber); - } - if (properties.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_MEDIUM)) { - try { - DataProperty dataProperty = DataProperty.getInferredDefaultDataProperty(); - dataProperty = PropertyAnalyzer.analyzeDataProperty(properties, dataProperty, false); - results.put(PropertyAnalyzer.PROPERTIES_STORAGE_MEDIUM, dataProperty); - } catch (AnalysisException ex) { - throw new RuntimeException(ex.getMessage()); - } - } - if (properties.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TTL)) { - try { - PropertyAnalyzer.analyzeStorageCoolDownTTL(properties, true); - results.put(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TTL, null); - } catch (AnalysisException ex) { - throw new RuntimeException(ex.getMessage()); - } - } - if (properties.containsKey(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION)) { - try { - PropertyAnalyzer.analyzeDataCachePartitionDuration(properties); - results.put(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION, null); - } catch (AnalysisException ex) { - throw new RuntimeException(ex.getMessage()); - } - } - if (properties.containsKey(PropertyAnalyzer.PROPERTIES_LABELS_LOCATION)) { - if (table.getColocateGroup() != null) { - throw new DdlException("Cannot set location for colocate table"); - } - String locations = PropertyAnalyzer.analyzeLocation(properties, true); - results.put(PropertyAnalyzer.PROPERTIES_LABELS_LOCATION, locations); - } - if (!properties.isEmpty()) { - throw new DdlException("Modify failed because unknown properties: " + properties); - } - return results; - } + LOG.info("succeed in creating partition[{}], name: {}, temp: {}", partition.getId(), + partition.getName(), isTempPartition); + } else { + for (int i = 0; i < partitionLen; i++) { + Partition partition = partitionList.get(i); + if (!existPartitionNameSet.contains(partition.getName())) { + PartitionPersistInfoV2 info = new RangePartitionPersistInfo(db.getId(), olapTable.getId(), + partition, partitionDescs.get(i).getPartitionDataProperty(), + partitionInfo.getReplicationNum(partition.getId()), + partitionInfo.getIsInMemory(partition.getId()), isTempPartition, + ((RangePartitionInfo) partitionInfo).getRange(partition.getId()), + ((SingleRangePartitionDesc) partitionDescs.get(i)).getDataCacheInfo()); - /** - * Set replication number for unpartitioned table. - * ATTN: only for unpartitioned table now. - * - * @param db - * @param table - * @param properties - * @throws DdlException - */ - // The caller need to hold the db write lock - public void modifyTableReplicationNum(Database db, OlapTable table, Map properties) - throws DdlException { - if (colocateTableIndex.isColocateTable(table.getId())) { - throw new DdlException("table " + table.getName() + " is colocate table, cannot change replicationNum"); - } + partitionInfoV2List.add(info); + } + } - String defaultReplicationNumName = "default." + PropertyAnalyzer.PROPERTIES_REPLICATION_NUM; - PartitionInfo partitionInfo = table.getPartitionInfo(); - if (partitionInfo.isRangePartition()) { - throw new DdlException( - "This is a range partitioned table, you should specify partitions with MODIFY PARTITION clause." + - " If you want to set default replication number, please use '" + defaultReplicationNumName + - "' instead of '" + PropertyAnalyzer.PROPERTIES_REPLICATION_NUM + "' to escape misleading."); - } + AddPartitionsInfoV2 infos = new AddPartitionsInfoV2(partitionInfoV2List); + GlobalStateMgr.getCurrentState().getEditLog().logAddPartitions(infos); - // unpartitioned table - // update partition replication num - String partitionName = table.getName(); - Partition partition = table.getPartition(partitionName); - if (partition == null) { - throw new DdlException("Partition does not exist. name: " + partitionName); + for (PartitionPersistInfoV2 infoV2 : partitionInfoV2List) { + LOG.info("succeed in creating partition[{}], name: {}, temp: {}", infoV2.getPartition().getId(), + infoV2.getPartition().getName(), isTempPartition); + } } - - short replicationNum = Short.parseShort(properties.get(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM)); - boolean isInMemory = partitionInfo.getIsInMemory(partition.getId()); - DataProperty newDataProperty = partitionInfo.getDataProperty(partition.getId()); - partitionInfo.setReplicationNum(partition.getId(), replicationNum); - - // update table default replication num - table.setReplicationNum(replicationNum); - - // log - ModifyPartitionInfo info = new ModifyPartitionInfo(db.getId(), table.getId(), partition.getId(), - newDataProperty, replicationNum, isInMemory); - GlobalStateMgr.getCurrentState().getEditLog().logModifyPartition(info); - LOG.info("modify partition[{}-{}-{}] replication num to {}", db.getOriginName(), table.getName(), - partition.getName(), replicationNum); } - /** - * Set default replication number for a specified table. - * You can see the default replication number by Show Create Table stmt. - * - * @param db - * @param table - * @param properties - */ - // The caller need to hold the db write lock - public void modifyTableDefaultReplicationNum(Database db, OlapTable table, Map properties) + @VisibleForTesting + public void addListPartitionLog(Database db, OlapTable olapTable, List partitionDescs, + boolean isTempPartition, PartitionInfo partitionInfo, + List partitionList, Set existPartitionNameSet) throws DdlException { - Locker locker = new Locker(); - Preconditions.checkArgument(locker.isDbWriteLockHeldByCurrentThread(db)); - if (colocateTableIndex.isColocateTable(table.getId())) { - throw new DdlException("table " + table.getName() + " is colocate table, cannot change replicationNum"); + if (partitionList == null) { + throw new DdlException("partitionList should not null"); + } else if (partitionList.size() == 0) { + return; } - // check unpartitioned table - PartitionInfo partitionInfo = table.getPartitionInfo(); - Partition partition = null; - boolean isUnpartitionedTable = false; - if (partitionInfo.getType() == PartitionType.UNPARTITIONED) { - isUnpartitionedTable = true; - String partitionName = table.getName(); - partition = table.getPartition(partitionName); - if (partition == null) { - throw new DdlException("Partition does not exist. name: " + partitionName); + // TODO: add only 1 log for multi list partition + int i = 0; + for (Partition partition : partitionList) { + if (existPartitionNameSet.contains(partition.getName())) { + LOG.info("add partition[{}] which already exists", partition.getName()); + continue; } + long partitionId = partition.getId(); + PartitionPersistInfoV2 info = new ListPartitionPersistInfo(db.getId(), olapTable.getId(), partition, + partitionDescs.get(i).getPartitionDataProperty(), + partitionInfo.getReplicationNum(partitionId), + partitionInfo.getIsInMemory(partitionId), + isTempPartition, + ((ListPartitionInfo) partitionInfo).getIdToValues().get(partitionId), + ((ListPartitionInfo) partitionInfo).getIdToMultiValues().get(partitionId), + partitionDescs.get(i).getDataCacheInfo()); + GlobalStateMgr.getCurrentState().getEditLog().logAddPartition(info); + LOG.info("succeed in creating list partition[{}], name: {}, temp: {}", partitionId, + partition.getName(), isTempPartition); + i++; } - - TableProperty tableProperty = table.getTableProperty(); - if (tableProperty == null) { - tableProperty = new TableProperty(properties); - table.setTableProperty(tableProperty); - } else { - tableProperty.modifyTableProperties(properties); - } - tableProperty.buildReplicationNum(); - - // update partition replication num if this table is unpartitioned table - if (isUnpartitionedTable) { - Preconditions.checkNotNull(partition); - partitionInfo.setReplicationNum(partition.getId(), tableProperty.getReplicationNum()); - } - - // log - ModifyTablePropertyOperationLog info = - new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties); - GlobalStateMgr.getCurrentState().getEditLog().logModifyReplicationNum(info); - LOG.info("modify table[{}] replication num to {}", table.getName(), - properties.get(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM)); - } - - public void modifyTableEnablePersistentIndexMeta(Database db, OlapTable table, Map properties) { - Locker locker = new Locker(); - Preconditions.checkArgument(locker.isDbWriteLockHeldByCurrentThread(db)); - TableProperty tableProperty = table.getTableProperty(); - if (tableProperty == null) { - tableProperty = new TableProperty(properties); - table.setTableProperty(tableProperty); - } else { - tableProperty.modifyTableProperties(properties); - } - tableProperty.buildEnablePersistentIndex(); - - if (table.isCloudNativeTable()) { - // now default to LOCAL - tableProperty.buildPersistentIndexType(); - } - - ModifyTablePropertyOperationLog info = - new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties); - GlobalStateMgr.getCurrentState().getEditLog().logModifyEnablePersistentIndex(info); - } - public void modifyBinlogMeta(Database db, OlapTable table, BinlogConfig binlogConfig) { - Locker locker = new Locker(); - Preconditions.checkArgument(locker.isDbWriteLockHeldByCurrentThread(db)); - ModifyTablePropertyOperationLog log = new ModifyTablePropertyOperationLog( - db.getId(), - table.getId(), - binlogConfig.toProperties()); - GlobalStateMgr.getCurrentState().getEditLog().logModifyBinlogConfig(log); - - if (!binlogConfig.getBinlogEnable()) { - table.clearBinlogAvailableVersion(); - table.setBinlogTxnId(BinlogConfig.INVALID); - } - table.setCurBinlogConfig(binlogConfig); + @Override + public void addPartition(AddPartitionsInfoV2 addPartitionsInfo) { + GlobalStateMgr.getCurrentState().getEditLog().logAddPartitions(addPartitionsInfo); } - // The caller need to hold the db write lock - public void modifyTableInMemoryMeta(Database db, OlapTable table, Map properties) { + public void replayAddPartition(PartitionPersistInfoV2 info) throws DdlException { + Database db = this.getDb(info.getDbId()); Locker locker = new Locker(); - Preconditions.checkArgument(locker.isDbWriteLockHeldByCurrentThread(db)); - TableProperty tableProperty = table.getTableProperty(); - if (tableProperty == null) { - tableProperty = new TableProperty(properties); - table.setTableProperty(tableProperty); - } else { - tableProperty.modifyTableProperties(properties); - } - tableProperty.buildInMemory(); - - // need to update partition info meta - for (Partition partition : table.getPartitions()) { - table.getPartitionInfo().setIsInMemory(partition.getId(), tableProperty.isInMemory()); - } - - ModifyTablePropertyOperationLog info = - new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties); - GlobalStateMgr.getCurrentState().getEditLog().logModifyInMemory(info); - } + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + OlapTable olapTable = (OlapTable) getTable(db.getId(), info.getTableId()); + Partition partition = info.getPartition(); - // The caller need to hold the db write lock - public void modifyTableConstraint(Database db, String tableName, Map properties) - throws DdlException { - Locker locker = new Locker(); - Preconditions.checkArgument(locker.isDbWriteLockHeldByCurrentThread(db)); - Table table = getTable(db.getFullName(), tableName); - if (table == null) { - throw new DdlException(String.format("table:%s does not exist", tableName)); - } - OlapTable olapTable = (OlapTable) table; - TableProperty tableProperty = olapTable.getTableProperty(); - if (tableProperty == null) { - tableProperty = new TableProperty(properties); - olapTable.setTableProperty(tableProperty); - } else { - tableProperty.modifyTableProperties(properties); - } - tableProperty.buildConstraint(); + PartitionInfo partitionInfo = olapTable.getPartitionInfo(); + if (info.isTempPartition()) { + olapTable.addTempPartition(partition); + } else { + olapTable.addPartition(partition); + } - ModifyTablePropertyOperationLog info = - new ModifyTablePropertyOperationLog(db.getId(), olapTable.getId(), properties); - GlobalStateMgr.getCurrentState().getEditLog().logModifyConstraint(info); - } + PartitionType partitionType = partitionInfo.getType(); + if (partitionType == PartitionType.LIST) { + try { + ((ListPartitionInfo) partitionInfo).unprotectHandleNewPartitionDesc( + olapTable.getIdToColumn(), info.asListPartitionPersistInfo()); + } catch (AnalysisException e) { + throw new DdlException(e.getMessage()); + } + } else if (partitionInfo.isRangePartition()) { + ((RangePartitionInfo) partitionInfo).unprotectHandleNewSinglePartitionDesc( + info.asRangePartitionPersistInfo()); + } else if (partitionType == PartitionType.UNPARTITIONED) { + // insert overwrite job will create temp partition and replace the single partition. + partitionInfo.addPartition(partition.getId(), info.getDataProperty(), info.getReplicationNum(), + info.isInMemory(), info.getDataCacheInfo()); + } else { + throw new DdlException("Unsupported partition type: " + partitionType.name()); + } - // The caller need to hold the db write lock - public void modifyTableWriteQuorum(Database db, OlapTable table, Map properties) { - Locker locker = new Locker(); - Preconditions.checkArgument(locker.isDbWriteLockHeldByCurrentThread(db)); - TableProperty tableProperty = table.getTableProperty(); - if (tableProperty == null) { - tableProperty = new TableProperty(properties); - table.setTableProperty(tableProperty); - } else { - tableProperty.modifyTableProperties(properties); + if (!isCheckpointThread()) { + // add to inverted index + TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); + for (PhysicalPartition physicalPartition : partition.getSubPartitions()) { + for (MaterializedIndex index : + physicalPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + long indexId = index.getId(); + int schemaHash = olapTable.getSchemaHashByIndexId(indexId); + TabletMeta tabletMeta = new TabletMeta(info.getDbId(), info.getTableId(), partition.getId(), + index.getId(), schemaHash, info.getDataProperty().getStorageMedium()); + for (Tablet tablet : index.getTablets()) { + long tabletId = tablet.getId(); + invertedIndex.addTablet(tabletId, tabletMeta); + // modify some logic + if (tablet instanceof LocalTablet) { + for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { + invertedIndex.addReplica(tabletId, replica); + } + } + } + } + } + } + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); } - tableProperty.buildWriteQuorum(); - - ModifyTablePropertyOperationLog info = - new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties); - GlobalStateMgr.getCurrentState().getEditLog().logModifyWriteQuorum(info); } - // The caller need to hold the db write lock - public void modifyTableReplicatedStorage(Database db, OlapTable table, Map properties) { - Locker locker = new Locker(); - Preconditions.checkArgument(locker.isDbWriteLockHeldByCurrentThread(db)); - TableProperty tableProperty = table.getTableProperty(); - if (tableProperty == null) { - tableProperty = new TableProperty(properties); - table.setTableProperty(tableProperty); - } else { - tableProperty.modifyTableProperties(properties); - } - tableProperty.buildReplicatedStorage(); - - ModifyTablePropertyOperationLog info = - new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties); - GlobalStateMgr.getCurrentState().getEditLog().logModifyReplicatedStorage(info); + @Override + public void dropPartition(DropPartitionInfo dropPartitionInfo) { + GlobalStateMgr.getCurrentState().getEditLog().logDropPartition(dropPartitionInfo); } - // The caller need to hold the db write lock - public void modifyTableAutomaticBucketSize(Database db, OlapTable table, Map properties) { + public void replayDropPartition(DropPartitionInfo info) { + Database db = this.getDb(info.getDbId()); Locker locker = new Locker(); - Preconditions.checkArgument(locker.isDbWriteLockHeldByCurrentThread(db)); - TableProperty tableProperty = table.getTableProperty(); - if (tableProperty == null) { - tableProperty = new TableProperty(properties); - table.setTableProperty(tableProperty); - } else { - tableProperty.modifyTableProperties(properties); + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + OlapTable olapTable = (OlapTable) getTable(db.getId(), info.getTableId()); + if (info.isTempPartition()) { + olapTable.dropTempPartition(info.getPartitionName(), true); + } else { + olapTable.dropPartition(info.getDbId(), info.getPartitionName(), info.isForceDrop()); + } + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); } - tableProperty.buildBucketSize(); - - ModifyTablePropertyOperationLog info = - new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties); - GlobalStateMgr.getCurrentState().getEditLog().logModifyBucketSize(info); } - public void modifyTableMutableBucketNum(Database db, OlapTable table, Map properties) { - Locker locker = new Locker(); - Preconditions.checkArgument(locker.isDbWriteLockHeldByCurrentThread(db)); - TableProperty tableProperty = table.getTableProperty(); - if (tableProperty == null) { - tableProperty = new TableProperty(properties); - table.setTableProperty(tableProperty); - } else { - tableProperty.modifyTableProperties(properties); - } - tableProperty.buildMutableBucketNum(); - - ModifyTablePropertyOperationLog info = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), - properties); - GlobalStateMgr.getCurrentState().getEditLog().logModifyMutableBucketNum(info); + @Override + public void dropPartitions(DropPartitionsInfo dropPartitionInfo) { + GlobalStateMgr.getCurrentState().getEditLog().logDropPartitions(dropPartitionInfo); } - public void modifyTableEnableLoadProfile(Database db, OlapTable table, Map properties) { + public void replayDropPartitions(DropPartitionsInfo info) { + Database db = this.getDb(info.getDbId()); Locker locker = new Locker(); - Preconditions.checkArgument(locker.isDbWriteLockHeldByCurrentThread(db)); - TableProperty tableProperty = table.getTableProperty(); - if (tableProperty == null) { - tableProperty = new TableProperty(properties); - table.setTableProperty(tableProperty); - } else { - tableProperty.modifyTableProperties(properties); + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + LOG.info("Begin to unprotect drop partitions. db = " + info.getDbId() + + " table = " + info.getTableId() + + " partitionNames = " + info.getPartitionNames()); + List partitionNames = info.getPartitionNames(); + OlapTable olapTable = (OlapTable) getTable(db.getId(), info.getTableId()); + boolean isTempPartition = info.isTempPartition(); + long dbId = info.getDbId(); + boolean isForceDrop = info.isForceDrop(); + partitionNames.stream().forEach(partitionName -> { + if (isTempPartition) { + olapTable.dropTempPartition(partitionName, true); + } else { + olapTable.dropPartition(dbId, partitionName, isForceDrop); + } + }); + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); } - tableProperty.buildEnableLoadProfile(); - - ModifyTablePropertyOperationLog info = - new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties); - GlobalStateMgr.getCurrentState().getEditLog().logModifyEnableLoadProfile(info); } - public void modifyTablePrimaryIndexCacheExpireSec(Database db, OlapTable table, Map properties) { - Locker locker = new Locker(); - Preconditions.checkArgument(locker.isDbWriteLockHeldByCurrentThread(db)); - TableProperty tableProperty = table.getTableProperty(); - if (tableProperty == null) { - tableProperty = new TableProperty(properties); - table.setTableProperty(tableProperty); - } else { - tableProperty.modifyTableProperties(properties); - } - tableProperty.buildPrimaryIndexCacheExpireSec(); - - ModifyTablePropertyOperationLog info = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), - properties); - GlobalStateMgr.getCurrentState().getEditLog().logModifyPrimaryIndexCacheExpireSec(info); + @Override + public void renamePartition(TableInfo tableInfo) { + GlobalStateMgr.getCurrentState().getEditLog().logPartitionRename(tableInfo); } - public void modifyTableMeta(Database db, OlapTable table, Map properties, - TTabletMetaType metaType) { - if (metaType == TTabletMetaType.INMEMORY) { - modifyTableInMemoryMeta(db, table, properties); - } else if (metaType == TTabletMetaType.ENABLE_PERSISTENT_INDEX) { - modifyTableEnablePersistentIndexMeta(db, table, properties); - } else if (metaType == TTabletMetaType.WRITE_QUORUM) { - modifyTableWriteQuorum(db, table, properties); - } else if (metaType == TTabletMetaType.REPLICATED_STORAGE) { - modifyTableReplicatedStorage(db, table, properties); - } else if (metaType == TTabletMetaType.BUCKET_SIZE) { - modifyTableAutomaticBucketSize(db, table, properties); - } else if (metaType == TTabletMetaType.MUTABLE_BUCKET_NUM) { - modifyTableMutableBucketNum(db, table, properties); - } else if (metaType == TTabletMetaType.PRIMARY_INDEX_CACHE_EXPIRE_SEC) { - modifyTablePrimaryIndexCacheExpireSec(db, table, properties); - } else if (metaType == TTabletMetaType.ENABLE_LOAD_PROFILE) { - modifyTableEnableLoadProfile(db, table, properties); - } - } + public void replayRenamePartition(TableInfo tableInfo) { + long dbId = tableInfo.getDbId(); + long tableId = tableInfo.getTableId(); + long partitionId = tableInfo.getPartitionId(); + String newPartitionName = tableInfo.getNewPartitionName(); - public void setHasForbiddenGlobalDict(String dbName, String tableName, boolean isForbit) throws DdlException { - Map property = new HashMap<>(); - Database db = getDb(dbName); - if (db == null) { - throw new DdlException("the DB " + dbName + " is not exist"); - } + Database db = getDb(dbId); Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.READ); + locker.lockDatabase(db.getId(), LockType.WRITE); try { - Table table = getTable(db.getFullName(), tableName); - if (table == null) { - throw new DdlException("the DB " + dbName + " table: " + tableName + "isn't exist"); - } - - if (table instanceof OlapTable) { - OlapTable olapTable = (OlapTable) table; - olapTable.setHasForbiddenGlobalDict(isForbit); - if (isForbit) { - property.put(PropertyAnalyzer.ENABLE_LOW_CARD_DICT_TYPE, PropertyAnalyzer.DISABLE_LOW_CARD_DICT); - IDictManager.getInstance().disableGlobalDict(olapTable.getId()); - } else { - property.put(PropertyAnalyzer.ENABLE_LOW_CARD_DICT_TYPE, PropertyAnalyzer.ABLE_LOW_CARD_DICT); - IDictManager.getInstance().enableGlobalDict(olapTable.getId()); - } - ModifyTablePropertyOperationLog info = - new ModifyTablePropertyOperationLog(db.getId(), table.getId(), property); - GlobalStateMgr.getCurrentState().getEditLog().logSetHasForbiddenGlobalDict(info); - } + OlapTable table = (OlapTable) getTable(db.getId(), tableId); + Partition partition = table.getPartition(partitionId); + table.renamePartition(partition.getName(), newPartitionName); + LOG.info("replay rename partition[{}] to {}", partition.getName(), newPartitionName); } finally { - locker.unLockDatabase(db.getId(), LockType.READ); + locker.unLockDatabase(db.getId(), LockType.WRITE); } } - public void replayModifyHiveTableColumn(short opCode, ModifyTableColumnOperationLog info) { - if (info.getDbName() == null) { + @Override + public void replaceTempPartition(ReplacePartitionOperationLog info) { + GlobalStateMgr.getCurrentState().getEditLog().logReplaceTempPartition(info); + } + + public void replayReplaceTempPartition(ReplacePartitionOperationLog replaceTempPartitionLog) { + Database db = getDb(replaceTempPartitionLog.getDbId()); + if (db == null) { return; } - String hiveExternalDb = info.getDbName(); - String hiveExternalTable = info.getTableName(); - LOG.info("replayModifyTableColumn hiveDb:{},hiveTable:{}", hiveExternalDb, hiveExternalTable); - List columns = info.getColumns(); - Database db = getDb(hiveExternalDb); - HiveTable table; Locker locker = new Locker(); locker.lockDatabase(db.getId(), LockType.WRITE); try { - Table tbl = getTable(db.getFullName(), hiveExternalTable); - table = (HiveTable) tbl; - table.setNewFullSchema(columns); + OlapTable olapTable = (OlapTable) getTable(db.getId(), replaceTempPartitionLog.getTblId()); + if (olapTable == null) { + return; + } + if (replaceTempPartitionLog.isUnPartitionedTable()) { + olapTable.replacePartition(replaceTempPartitionLog.getPartitions().get(0), + replaceTempPartitionLog.getTempPartitions().get(0)); + return; + } + olapTable.replaceTempPartitions(replaceTempPartitionLog.getPartitions(), + replaceTempPartitionLog.getTempPartitions(), + replaceTempPartitionLog.isStrictRange(), + replaceTempPartitionLog.useTempPartitionName()); + } catch (DdlException e) { + LOG.warn("should not happen.", e); } finally { locker.unLockDatabase(db.getId(), LockType.WRITE); } } - public void replayModifyTableProperty(short opCode, ModifyTablePropertyOperationLog info) { + public void replayRecoverPartition(RecoverInfo info) { long dbId = info.getDbId(); - long tableId = info.getTableId(); - Map properties = info.getProperties(); - String comment = info.getComment(); - Database db = getDb(dbId); Locker locker = new Locker(); locker.lockDatabase(db.getId(), LockType.WRITE); try { - OlapTable olapTable = (OlapTable) getTable(db.getId(), tableId); - if (opCode == OperationType.OP_SET_FORBIDDEN_GLOBAL_DICT) { - String enAble = properties.get(PropertyAnalyzer.ENABLE_LOW_CARD_DICT_TYPE); - Preconditions.checkState(enAble != null); - if (olapTable != null) { - if (enAble.equals(PropertyAnalyzer.DISABLE_LOW_CARD_DICT)) { - olapTable.setHasForbiddenGlobalDict(true); - IDictManager.getInstance().disableGlobalDict(olapTable.getId()); - } else { - olapTable.setHasForbiddenGlobalDict(false); - IDictManager.getInstance().enableGlobalDict(olapTable.getId()); - } - } - } else { - TableProperty tableProperty = olapTable.getTableProperty(); - if (tableProperty == null) { - tableProperty = new TableProperty(properties); - olapTable.setTableProperty(tableProperty.buildProperty(opCode)); - } else { - tableProperty.modifyTableProperties(properties); - tableProperty.buildProperty(opCode); - } - - if (StringUtils.isNotEmpty(comment)) { - olapTable.setComment(comment); - } - - // need to replay partition info meta - if (opCode == OperationType.OP_MODIFY_IN_MEMORY) { - for (Partition partition : olapTable.getPartitions()) { - olapTable.getPartitionInfo().setIsInMemory(partition.getId(), tableProperty.isInMemory()); - } - } else if (opCode == OperationType.OP_MODIFY_REPLICATION_NUM) { - // update partition replication num if this table is unpartitioned table - PartitionInfo partitionInfo = olapTable.getPartitionInfo(); - if (partitionInfo.getType() == PartitionType.UNPARTITIONED) { - String partitionName = olapTable.getName(); - Partition partition = olapTable.getPartition(partitionName); - if (partition != null) { - partitionInfo.setReplicationNum(partition.getId(), tableProperty.getReplicationNum()); - } - } - } else if (opCode == OperationType.OP_MODIFY_ENABLE_PERSISTENT_INDEX) { - olapTable.setEnablePersistentIndex(tableProperty.enablePersistentIndex()); - if (olapTable.isCloudNativeTable()) { - olapTable.setPersistentIndexType(tableProperty.getPersistentIndexType()); - } - } else if (opCode == OperationType.OP_MODIFY_PRIMARY_INDEX_CACHE_EXPIRE_SEC) { - olapTable.setPrimaryIndexCacheExpireSec(tableProperty.primaryIndexCacheExpireSec()); - } else if (opCode == OperationType.OP_MODIFY_BINLOG_CONFIG) { - if (!olapTable.isBinlogEnabled()) { - olapTable.clearBinlogAvailableVersion(); - } - } - } - } catch (Exception ex) { - LOG.warn("The replay log failed and this log was ignored.", ex); + Table table = getTable(db.getId(), info.getTableId()); + GlobalStateMgr.getCurrentState().getRecycleBin().replayRecoverPartition((OlapTable) table, info.getPartitionId()); } finally { locker.unLockDatabase(db.getId(), LockType.WRITE); } } @Override - public void createView(CreateViewStmt stmt) throws DdlException { - String dbName = stmt.getDbName(); - String tableName = stmt.getTable(); + public void modifyPartition(ModifyPartitionInfo info) { + GlobalStateMgr.getCurrentState().getEditLog().logModifyPartition(info); + } + + public void replayModifyPartition(ModifyPartitionInfo info) { + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(info.getDbId()); + OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() + .getTable(db.getId(), info.getTableId()); - // check if db exists - Database db = this.getDb(stmt.getDbName()); - if (db == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName); - } - // check if table exists in db - boolean existed = false; Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.READ); + locker.lockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(olapTable.getId()), LockType.WRITE); try { - if (getTable(db.getFullName(), tableName) != null) { - existed = true; - if (stmt.isSetIfNotExists()) { - LOG.info("create view[{}] which already exists", tableName); - return; - } else if (stmt.isReplace()) { - LOG.info("view {} already exists, need to replace it", tableName); - } else { - ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName); + PartitionInfo partitionInfo = olapTable.getPartitionInfo(); + if (info.getDataProperty() != null) { + partitionInfo.setDataProperty(info.getPartitionId(), info.getDataProperty()); + } + if (info.getReplicationNum() != (short) -1) { + short replicationNum = info.getReplicationNum(); + partitionInfo.setReplicationNum(info.getPartitionId(), replicationNum); + // update default replication num if this table is unpartitioned table + if (partitionInfo.getType() == PartitionType.UNPARTITIONED) { + olapTable.setReplicationNum(replicationNum); } } + partitionInfo.setIsInMemory(info.getPartitionId(), info.isInMemory()); } finally { - locker.unLockDatabase(db.getId(), LockType.READ); - } - - if (existed) { - // already existed, need to alter the view - AlterViewStmt alterViewStmt = AlterViewStmt.fromReplaceStmt(stmt); - new AlterJobExecutor().process(alterViewStmt, ConnectContext.get()); - LOG.info("replace view {} successfully", tableName); - } else { - List columns = stmt.getColumns(); - long tableId = getNextId(); - View view = new View(tableId, tableName, columns); - view.setComment(stmt.getComment()); - view.setInlineViewDefWithSqlMode(stmt.getInlineViewDef(), - ConnectContext.get().getSessionVariable().getSqlMode()); - // init here in case the stmt string from view.toSql() has some syntax error. - try { - view.init(); - } catch (UserException e) { - throw new DdlException("failed to init view stmt", e); - } - - onCreate(db, view, "", stmt.isSetIfNotExists()); - LOG.info("successfully create view[" + tableName + "-" + view.getId() + "]"); + locker.unLockTablesWithIntensiveDbLock(db.getId(), Lists.newArrayList(olapTable.getId()), LockType.WRITE); } } - public void replayUpdateClusterAndBackends(BackendIdsUpdateInfo info) { - for (long id : info.getBackendList()) { - final Backend backend = stateMgr.getNodeMgr().getClusterInfo().getBackend(id); - backend.setDecommissioned(false); - backend.setBackendState(Backend.BackendState.free); - } + @Override + public void setPartitionVersion(PartitionVersionRecoveryInfo info) { + GlobalStateMgr.getCurrentState().getEditLog().logRecoverPartitionVersion(info); } - /* - * Truncate specified table or partitions. - * The main idea is: - * - * 1. using the same schema to create new table(partitions) - * 2. use the new created table(partitions) to replace the old ones. - * - * if no partition specified, it will truncate all partitions of this table, including all temp partitions, - * otherwise, it will only truncate those specified partitions. - * - */ @Override - public void truncateTable(TruncateTableStmt truncateTableStmt, ConnectContext context) throws DdlException { - TableRef tblRef = truncateTableStmt.getTblRef(); - TableName dbTbl = tblRef.getName(); - // check, and save some info which need to be checked again later - Map origPartitions = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER); - OlapTable copiedTbl; - Database db = getDb(dbTbl.getDb()); - if (db == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbTbl.getDb()); - } + public void addSubPartitionLog(AddSubPartitionsInfoV2 addSubPartitionsInfo) { + GlobalStateMgr.getCurrentState().getEditLog().logAddSubPartitions(addSubPartitionsInfo); + } - boolean truncateEntireTable = tblRef.getPartitionNames() == null; + public void replayAddSubPartition(PhysicalPartitionPersistInfoV2 info) throws DdlException { + Database db = this.getDb(info.getDbId()); Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.READ); + locker.lockDatabase(db.getId(), LockType.WRITE); try { - Table table = MetaUtils.getSessionAwareTable(context, db, dbTbl); - if (table == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, dbTbl.getTbl()); - } - - if (!table.isOlapOrCloudNativeTable()) { - throw new DdlException("Only support truncate OLAP table or LAKE table"); - } - - OlapTable olapTable = (OlapTable) table; - if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) { - throw InvalidOlapTableStateException.of(olapTable.getState(), olapTable.getName()); - } + OlapTable olapTable = (OlapTable) getTable(db.getId(), info.getTableId()); + Partition partition = olapTable.getPartition(info.getPartitionId()); + PhysicalPartition physicalPartition = info.getPhysicalPartition(); + partition.addSubPartition(physicalPartition); + olapTable.addPhysicalPartition(physicalPartition); - if (!truncateEntireTable) { - for (String partName : tblRef.getPartitionNames().getPartitionNames()) { - Partition partition = olapTable.getPartition(partName); - if (partition == null) { - throw new DdlException("Partition " + partName + " does not exist"); + if (!isCheckpointThread()) { + // add to inverted index + TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); + for (MaterializedIndex index : physicalPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + long indexId = index.getId(); + int schemaHash = olapTable.getSchemaHashByIndexId(indexId); + TabletMeta tabletMeta = new TabletMeta(info.getDbId(), info.getTableId(), info.getPartitionId(), + physicalPartition.getId(), index.getId(), schemaHash, olapTable.getPartitionInfo().getDataProperty( + info.getPartitionId()).getStorageMedium(), false); + for (Tablet tablet : index.getTablets()) { + long tabletId = tablet.getId(); + invertedIndex.addTablet(tabletId, tabletMeta); + // modify some logic + if (tablet instanceof LocalTablet) { + for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { + invertedIndex.addReplica(tabletId, replica); + } + } } - - origPartitions.put(partName, partition); - GlobalStateMgr.getCurrentState().getAnalyzeMgr().recordDropPartition(partition.getId()); - } - } else { - for (Partition partition : olapTable.getPartitions()) { - origPartitions.put(partition.getName(), partition); - GlobalStateMgr.getCurrentState().getAnalyzeMgr().recordDropPartition(partition.getId()); } } - - copiedTbl = getShadowCopyTable(olapTable); } finally { - locker.unLockDatabase(db.getId(), LockType.READ); + locker.unLockDatabase(db.getId(), LockType.WRITE); } + } - // 2. use the copied table to create partitions - List newPartitions = Lists.newArrayListWithCapacity(origPartitions.size()); - // tabletIdSet to save all newly created tablet ids. - Set tabletIdSet = Sets.newHashSet(); - try { - for (Map.Entry entry : origPartitions.entrySet()) { - long oldPartitionId = entry.getValue().getId(); - long newPartitionId = getNextId(); - String newPartitionName = entry.getKey(); - - PartitionInfo partitionInfo = copiedTbl.getPartitionInfo(); - partitionInfo.setTabletType(newPartitionId, partitionInfo.getTabletType(oldPartitionId)); - partitionInfo.setIsInMemory(newPartitionId, partitionInfo.getIsInMemory(oldPartitionId)); - partitionInfo.setReplicationNum(newPartitionId, partitionInfo.getReplicationNum(oldPartitionId)); - partitionInfo.setDataProperty(newPartitionId, partitionInfo.getDataProperty(oldPartitionId)); - - if (copiedTbl.isCloudNativeTable()) { - partitionInfo.setDataCacheInfo(newPartitionId, - partitionInfo.getDataCacheInfo(oldPartitionId)); - } + @Override + public List getAllPhysicalPartition(Partition partition) { + return new ArrayList<>(partition.getSubPartitions()); + } - copiedTbl.setDefaultDistributionInfo(entry.getValue().getDistributionInfo()); + @Override + public PhysicalPartition getPhysicalPartition(Partition partition, Long physicalPartitionId) { + return partition.getSubPartition(physicalPartitionId); + } - Partition newPartition = - createPartition(db, copiedTbl, newPartitionId, newPartitionName, null, tabletIdSet, - ConnectContext.get().getCurrentWarehouseId()); - newPartitions.add(newPartition); - } - buildPartitions(db, copiedTbl, newPartitions.stream().map(Partition::getSubPartitions) - .flatMap(p -> p.stream()).collect(Collectors.toList()), ConnectContext.get().getCurrentWarehouseId()); - } catch (DdlException e) { - deleteUselessTablets(tabletIdSet); - throw e; - } - Preconditions.checkState(origPartitions.size() == newPartitions.size()); + @Override + public void addPhysicalPartition(Partition partition, PhysicalPartition physicalPartition) { + partition.addSubPartition(physicalPartition); + } - // all partitions are created successfully, try to replace the old partitions. - // before replacing, we need to check again. - // Things may be changed outside the database lock. - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - OlapTable olapTable = (OlapTable) getTable(db.getId(), copiedTbl.getId()); - if (olapTable == null) { - throw new DdlException("Table[" + copiedTbl.getName() + "] is dropped"); - } + @Override + public void dropPhysicalPartition(Partition partition, Long physicalPartitionId) { + partition.removeSubPartition(physicalPartitionId); + } - if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) { - throw InvalidOlapTableStateException.of(olapTable.getState(), olapTable.getName()); - } + @Override + public List getMaterializedIndices(PhysicalPartition physicalPartition, + MaterializedIndex.IndexExtState indexExtState) { + return physicalPartition.getMaterializedIndices(indexExtState); + } - // check partitions - for (Map.Entry entry : origPartitions.entrySet()) { - Partition partition = olapTable.getPartition(entry.getValue().getId()); - if (partition == null || !partition.getName().equalsIgnoreCase(entry.getKey())) { - throw new DdlException("Partition [" + entry.getKey() + "] is changed during truncating table, " + - "please retry"); - } - } + @Override + public MaterializedIndex getMaterializedIndex(PhysicalPartition physicalPartition, Long mIndexId) { + return physicalPartition.getIndex(mIndexId); + } - // check if meta changed - // rollup index may be added or dropped, and schema may be changed during creating partition operation. - boolean metaChanged = false; - if (olapTable.getIndexNameToId().size() != copiedTbl.getIndexNameToId().size()) { - metaChanged = true; - } else { - // compare schemaHash - Map copiedIndexIdToSchemaHash = copiedTbl.getIndexIdToSchemaHash(); - for (Map.Entry entry : olapTable.getIndexIdToSchemaHash().entrySet()) { - long indexId = entry.getKey(); - if (!copiedIndexIdToSchemaHash.containsKey(indexId)) { - metaChanged = true; - break; - } - if (!copiedIndexIdToSchemaHash.get(indexId).equals(entry.getValue())) { - metaChanged = true; - break; - } - } - } + @Override + public void addMaterializedIndex(PhysicalPartition physicalPartition, MaterializedIndex materializedIndex) { + physicalPartition.createRollupIndex(materializedIndex); + } - if (olapTable.getDefaultDistributionInfo().getType() != copiedTbl.getDefaultDistributionInfo().getType()) { - metaChanged = true; - } + @Override + public void dropMaterializedIndex(PhysicalPartition physicalPartition, Long mIndexId) { + physicalPartition.deleteRollupIndex(mIndexId); + } - if (metaChanged) { - throw new DdlException("Table[" + copiedTbl.getName() + "]'s meta has been changed. try again."); - } + @Override + public void dropRollup(DropInfo dropInfo) { + GlobalStateMgr.getCurrentState().getEditLog().logDropRollup(dropInfo); + } - // replace - truncateTableInternal(olapTable, newPartitions, truncateEntireTable, false); + public void replayDropRollup(DropInfo dropInfo, GlobalStateMgr globalStateMgr) { + Database db = globalStateMgr.getLocalMetastore().getDb(dropInfo.getDbId()); + long tableId = dropInfo.getTableId(); + long rollupIndexId = dropInfo.getIndexId(); + OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getId(), tableId); - try { - colocateTableIndex.updateLakeTableColocationInfo(olapTable, true /* isJoin */, - null /* expectGroupId */); - } catch (DdlException e) { - LOG.info("table {} update colocation info failed when truncate table, {}", olapTable.getId(), e.getMessage()); - } + try (AutoCloseableLock ignore = + new AutoCloseableLock(new Locker(), db.getId(), Lists.newArrayList(tableId), LockType.WRITE)) { + TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); - // write edit log - TruncateTableInfo info = new TruncateTableInfo(db.getId(), olapTable.getId(), newPartitions, - truncateEntireTable); - GlobalStateMgr.getCurrentState().getEditLog().logTruncateTable(info); - - // refresh mv - Set relatedMvs = olapTable.getRelatedMaterializedViews(); - for (MvId mvId : relatedMvs) { - MaterializedView materializedView = (MaterializedView) getTable(db.getId(), mvId.getId()); - if (materializedView == null) { - LOG.warn("Table related materialized view {} can not be found", mvId.getId()); - continue; - } - if (materializedView.isLoadTriggeredRefresh()) { - refreshMaterializedView(db.getFullName(), getTable(db.getId(), mvId.getId()).getName(), false, null, - Constants.TaskRunPriority.NORMAL.value(), true, false); + for (PhysicalPartition partition : olapTable.getPhysicalPartitions()) { + MaterializedIndex rollupIndex = partition.deleteRollupIndex(rollupIndexId); + + if (!GlobalStateMgr.isCheckpointThread()) { + // remove from inverted index + for (Tablet tablet : rollupIndex.getTablets()) { + invertedIndex.deleteTablet(tablet.getId()); + } } } - } catch (DdlException e) { - deleteUselessTablets(tabletIdSet); - throw e; - } catch (MetaNotFoundException e) { - LOG.warn("Table related materialized view can not be found", e); - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); - } - - LOG.info("finished to truncate table {}, partitions: {}", - tblRef.getName().toSql(), tblRef.getPartitionNames()); - } - private void deleteUselessTablets(Set tabletIdSet) { - // create partition failed, remove all newly created tablets. - // For lakeTable, shards cleanup is taken care in ShardDeleter. - for (Long tabletId : tabletIdSet) { - GlobalStateMgr.getCurrentState().getTabletInvertedIndex().deleteTablet(tabletId); + String rollupIndexName = olapTable.getIndexNameById(rollupIndexId); + olapTable.deleteIndexInfo(rollupIndexName); } + LOG.info("replay drop rollup {}", dropInfo.getIndexId()); } - private void truncateTableInternal(OlapTable olapTable, List newPartitions, - boolean isEntireTable, boolean isReplay) { - // use new partitions to replace the old ones. - Set oldTablets = Sets.newHashSet(); - for (Partition newPartition : newPartitions) { - Partition oldPartition = olapTable.replacePartition(newPartition); - for (PhysicalPartition physicalPartition : oldPartition.getSubPartitions()) { - // save old tablets to be removed - for (MaterializedIndex index : physicalPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { - // let HashSet do the deduplicate work - oldTablets.addAll(index.getTablets()); - } - } - } - - if (isEntireTable) { - // drop all temp partitions - olapTable.dropAllTempPartitions(); - } + @Override + public void batchDropRollup(BatchDropInfo batchDropInfo) { + GlobalStateMgr.getCurrentState().getEditLog().logBatchDropRollup(batchDropInfo); + } - // remove the tablets in old partitions - for (Tablet tablet : oldTablets) { - TabletInvertedIndex index = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); - index.deleteTablet(tablet.getId()); - // Ensure that only the leader records truncate information. - // TODO(yangzaorang): the information will be lost when failover occurs. The probability of this case - // happening is small, and the trash data will be deleted by BE anyway, but we need to find a better - // solution. - if (!isReplay) { - index.markTabletForceDelete(tablet); - } - } + @Override + public void renameRollup(TableInfo tableInfo) { + replayRenameRollup(tableInfo); + GlobalStateMgr.getCurrentState().getEditLog().logRollupRename(tableInfo); } - public void replayTruncateTable(TruncateTableInfo info) { - Database db = getDb(info.getDbId()); + public void replayRenameRollup(TableInfo tableInfo) { + long dbId = tableInfo.getDbId(); + long tableId = tableInfo.getTableId(); + long indexId = tableInfo.getIndexId(); + String newRollupName = tableInfo.getNewRollupName(); + + Database db = getDb(dbId); Locker locker = new Locker(); locker.lockDatabase(db.getId(), LockType.WRITE); try { - OlapTable olapTable = (OlapTable) getTable(db.getId(), info.getTblId()); - truncateTableInternal(olapTable, info.getPartitions(), info.isEntireTable(), true); + OlapTable table = (OlapTable) getTable(db.getId(), tableId); + String rollupName = table.getIndexNameById(indexId); + Map indexNameToIdMap = table.getIndexNameToId(); + indexNameToIdMap.remove(rollupName); + indexNameToIdMap.put(newRollupName, indexId); - if (!GlobalStateMgr.isCheckpointThread()) { - // add tablet to inverted index - TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); - for (Partition partition : info.getPartitions()) { - long partitionId = partition.getId(); - TStorageMedium medium = olapTable.getPartitionInfo().getDataProperty( - partitionId).getStorageMedium(); - for (PhysicalPartition physicalPartition : partition.getSubPartitions()) { - for (MaterializedIndex mIndex : physicalPartition.getMaterializedIndices( - MaterializedIndex.IndexExtState.ALL)) { - long indexId = mIndex.getId(); - int schemaHash = olapTable.getSchemaHashByIndexId(indexId); - TabletMeta tabletMeta = new TabletMeta(db.getId(), olapTable.getId(), - physicalPartition.getId(), indexId, schemaHash, medium, - olapTable.isCloudNativeTableOrMaterializedView()); - for (Tablet tablet : mIndex.getTablets()) { - long tabletId = tablet.getId(); - invertedIndex.addTablet(tabletId, tabletMeta); - if (olapTable.isOlapTable()) { - for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { - invertedIndex.addReplica(tabletId, replica); - } - } - } - } - } - } - } + LOG.info("replay rename rollup[{}] to {}", rollupName, newRollupName); } finally { locker.unLockDatabase(db.getId(), LockType.WRITE); } } - public void replayBackendTabletsInfo(BackendTabletsInfo backendTabletsInfo) { - List> tabletsWithSchemaHash = backendTabletsInfo.getTabletSchemaHash(); - if (!tabletsWithSchemaHash.isEmpty()) { - // In previous version, we save replica info in `tabletsWithSchemaHash`, - // but it is wrong because we can not get replica from `tabletInvertedIndex` when doing checkpoint, - // because when doing checkpoint, the tabletInvertedIndex is not initialized at all. - // - // So we can only discard this information, in this case, it is equivalent to losing the record of these operations. - // But it doesn't matter, these records are currently only used to record whether a replica is in a bad state. - // This state has little effect on the system, and it can be restored after the system has processed the bad state replica. - for (Pair tabletInfo : tabletsWithSchemaHash) { - LOG.warn("find an old backendTabletsInfo for tablet {}, ignore it", tabletInfo.first); - } + @Override + public List getAllTablets(MaterializedIndex materializedIndex) { + return materializedIndex.getTablets(); + } + + @Override + public List getAllTabletIDs(MaterializedIndex materializedIndex) { + return materializedIndex.getTabletIdsInOrder(); + } + + @Override + public Tablet getTablet(MaterializedIndex materializedIndex, Long tabletId) { + return materializedIndex.getTablet(tabletId); + } + + @Override + public List getAllReplicas(Tablet tablet) { + return tablet.getAllReplicas(); + } + + @Override + public Replica getReplica(LocalTablet tablet, Long replicaId) { + return tablet.getReplicaById(replicaId); + } + + @Override + public void addReplica(ReplicaPersistInfo info) { + replayAddReplica(info); + GlobalStateMgr.getCurrentState().getEditLog().logAddReplica(info); + } + + public void replayAddReplica(ReplicaPersistInfo info) { + StarRocksMetadata starRocksMetadata = GlobalStateMgr.getServingState().getStarRocksMetadata(); + + Database db = starRocksMetadata.getDbIncludeRecycleBin(info.getDbId()); + if (db == null) { + LOG.warn("replay add replica failed, db is null, info: {}", info); return; } - - // in new version, replica info is saved here. - // but we need to get replica from db->tbl->partition->... - List replicaPersistInfos = backendTabletsInfo.getReplicaPersistInfos(); - for (ReplicaPersistInfo info : replicaPersistInfos) { - long dbId = info.getDbId(); - Database db = getDb(dbId); - if (db == null) { - continue; + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + OlapTable olapTable = (OlapTable) starRocksMetadata.getTableIncludeRecycleBin(db, info.getTableId()); + if (olapTable == null) { + LOG.warn("replay add replica failed, table is null, info: {}", info); + return; } - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - OlapTable tbl = (OlapTable) getTable(db.getId(), info.getTableId()); - if (tbl == null) { - continue; - } - Partition partition = tbl.getPartition(info.getPartitionId()); - if (partition == null) { - continue; - } - MaterializedIndex mindex = partition.getIndex(info.getIndexId()); - if (mindex == null) { - continue; - } - LocalTablet tablet = (LocalTablet) mindex.getTablet(info.getTabletId()); - if (tablet == null) { - continue; - } - Replica replica = tablet.getReplicaById(info.getReplicaId()); - if (replica != null) { - replica.setBad(true); - LOG.debug("get replica {} of tablet {} on backend {} to bad when replaying", - info.getReplicaId(), info.getTabletId(), info.getBackendId()); - } - } finally { - locker.unLockDatabase(db.getId(), LockType.WRITE); + PhysicalPartition partition = starRocksMetadata + .getPhysicalPartitionIncludeRecycleBin(olapTable, info.getPartitionId()); + if (partition == null) { + LOG.warn("replay add replica failed, partition is null, info: {}", info); + return; + } + MaterializedIndex materializedIndex = partition.getIndex(info.getIndexId()); + if (materializedIndex == null) { + LOG.warn("replay add replica failed, materializedIndex is null, info: {}", info); + return; + } + LocalTablet tablet = (LocalTablet) materializedIndex.getTablet(info.getTabletId()); + if (tablet == null) { + LOG.warn("replay add replica failed, tablet is null, info: {}", info); + return; } - } - } - // Convert table's distribution type from random to hash. - // random distribution is no longer supported. - public void convertDistributionType(Database db, OlapTable tbl) throws DdlException { - TableInfo tableInfo = TableInfo.createForModifyDistribution(db.getId(), tbl.getId()); - GlobalStateMgr.getCurrentState().getEditLog().logModifyDistributionType(tableInfo); - LOG.info("finished to modify distribution type of table: " + tbl.getName()); - } + // for compatibility + int schemaHash = info.getSchemaHash(); + if (schemaHash == -1) { + schemaHash = olapTable.getSchemaHashByIndexId(info.getIndexId()); + } - public void replayConvertDistributionType(TableInfo tableInfo) { - Database db = getDb(tableInfo.getDbId()); - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.WRITE); - try { - OlapTable tbl = (OlapTable) getTable(db.getId(), tableInfo.getTableId()); - LOG.info("replay modify distribution type of table: " + tbl.getName()); + Replica replica = new Replica(info.getReplicaId(), info.getBackendId(), info.getVersion(), + schemaHash, info.getDataSize(), info.getRowCount(), + Replica.ReplicaState.NORMAL, + info.getLastFailedVersion(), + info.getLastSuccessVersion(), + info.getMinReadableVersion()); + tablet.addReplica(replica); } finally { locker.unLockDatabase(db.getId(), LockType.WRITE); } } - /* - * The entry of replacing partitions with temp partitions. - */ - public void replaceTempPartition(Database db, String tableName, ReplacePartitionClause clause) throws DdlException { - List partitionNames = clause.getPartitionNames(); - // duplicate temp partition will cause Incomplete transaction - List tempPartitionNames = - clause.getTempPartitionNames().stream().distinct().collect(Collectors.toList()); - - boolean isStrictRange = clause.isStrictRange(); - boolean useTempPartitionName = clause.useTempPartitionName(); + @Override + public void deleteReplica(ReplicaPersistInfo replicaPersistInfo) { + replayDeleteReplica(replicaPersistInfo); + GlobalStateMgr.getCurrentState().getEditLog().logDeleteReplica(replicaPersistInfo); + } + + public void replayDeleteReplica(ReplicaPersistInfo info) { + StarRocksMetadata starRocksMetadata = GlobalStateMgr.getServingState().getStarRocksMetadata(); + + Database db = starRocksMetadata.getDbIncludeRecycleBin(info.getDbId()); + if (db == null) { + LOG.warn("replay delete replica failed, db is null, info: {}", info); + return; + } Locker locker = new Locker(); locker.lockDatabase(db.getId(), LockType.WRITE); try { - Table table = getTable(db.getFullName(), tableName); - if (table == null) { - ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName); + OlapTable olapTable = (OlapTable) starRocksMetadata.getTableIncludeRecycleBin(db, info.getTableId()); + if (olapTable == null) { + LOG.warn("replay delete replica failed, table is null, info: {}", info); + return; } - - if (!table.isOlapOrCloudNativeTable()) { - throw new DdlException("Table[" + tableName + "] is not OLAP table or LAKE table"); + PhysicalPartition partition = starRocksMetadata + .getPhysicalPartitionIncludeRecycleBin(olapTable, info.getPartitionId()); + if (partition == null) { + LOG.warn("replay delete replica failed, partition is null, info: {}", info); + return; } - - OlapTable olapTable = (OlapTable) table; - // check partition exist - for (String partName : partitionNames) { - if (!olapTable.checkPartitionNameExist(partName, false)) { - throw new DdlException("Partition[" + partName + "] does not exist"); - } + MaterializedIndex materializedIndex = partition.getIndex(info.getIndexId()); + if (materializedIndex == null) { + LOG.warn("replay delete replica failed, materializedIndex is null, info: {}", info); + return; } - for (String partName : tempPartitionNames) { - if (!olapTable.checkPartitionNameExist(partName, true)) { - throw new DdlException("Temp partition[" + partName + "] does not exist"); - } + LocalTablet tablet = (LocalTablet) materializedIndex.getTablet(info.getTabletId()); + if (tablet == null) { + LOG.warn("replay delete replica failed, tablet is null, info: {}", info); + return; } - - partitionNames.stream().forEach(e -> - GlobalStateMgr.getCurrentState().getAnalyzeMgr().recordDropPartition(olapTable.getPartition(e).getId())); - olapTable.replaceTempPartitions(partitionNames, tempPartitionNames, isStrictRange, useTempPartitionName); - - // write log - ReplacePartitionOperationLog info = new ReplacePartitionOperationLog(db.getId(), olapTable.getId(), - partitionNames, tempPartitionNames, isStrictRange, useTempPartitionName); - GlobalStateMgr.getCurrentState().getEditLog().logReplaceTempPartition(info); - LOG.info("finished to replace partitions {} with temp partitions {} from table: {}", - clause.getPartitionNames(), clause.getTempPartitionNames(), tableName); + tablet.deleteReplicaByBackendId(info.getBackendId()); } finally { locker.unLockDatabase(db.getId(), LockType.WRITE); } } - public void replayReplaceTempPartition(ReplacePartitionOperationLog replaceTempPartitionLog) { - Database db = getDb(replaceTempPartitionLog.getDbId()); + @Override + public void batchDeleteReplicaInfo(BatchDeleteReplicaInfo replicaPersistInfo) { + GlobalStateMgr.getCurrentState().getEditLog().logBatchDeleteReplica(replicaPersistInfo); + } + + public void replayBatchDeleteReplica(BatchDeleteReplicaInfo info) { + if (info.getReplicaInfoList() != null) { + for (ReplicaPersistInfo persistInfo : info.getReplicaInfoList()) { + replayDeleteReplica(persistInfo); + } + } else { + LOG.warn("invalid BatchDeleteReplicaInfo, replicaInfoList is null"); + } + } + + @Override + public void updateReplica(ReplicaPersistInfo replicaPersistInfo) { + replayUpdateReplica(replicaPersistInfo); + GlobalStateMgr.getCurrentState().getEditLog().logUpdateReplica(replicaPersistInfo); + } + + public void replayUpdateReplica(ReplicaPersistInfo info) { + StarRocksMetadata starRocksMetadata = GlobalStateMgr.getServingState().getStarRocksMetadata(); + + Database db = starRocksMetadata.getDbIncludeRecycleBin(info.getDbId()); if (db == null) { + LOG.warn("replay update replica failed, db is null, info: {}", info); return; } Locker locker = new Locker(); locker.lockDatabase(db.getId(), LockType.WRITE); try { - OlapTable olapTable = (OlapTable) getTable(db.getId(), replaceTempPartitionLog.getTblId()); + OlapTable olapTable = (OlapTable) starRocksMetadata.getTableIncludeRecycleBin(db, info.getTableId()); if (olapTable == null) { + LOG.warn("replay update replica failed, table is null, info: {}", info); return; } - if (replaceTempPartitionLog.isUnPartitionedTable()) { - olapTable.replacePartition(replaceTempPartitionLog.getPartitions().get(0), - replaceTempPartitionLog.getTempPartitions().get(0)); + PhysicalPartition partition = starRocksMetadata + .getPhysicalPartitionIncludeRecycleBin(olapTable, info.getPartitionId()); + if (partition == null) { + LOG.warn("replay update replica failed, partition is null, info: {}", info); return; } - olapTable.replaceTempPartitions(replaceTempPartitionLog.getPartitions(), - replaceTempPartitionLog.getTempPartitions(), - replaceTempPartitionLog.isStrictRange(), - replaceTempPartitionLog.useTempPartitionName()); - } catch (DdlException e) { - LOG.warn("should not happen.", e); + MaterializedIndex materializedIndex = partition.getIndex(info.getIndexId()); + if (materializedIndex == null) { + LOG.warn("replay update replica failed, materializedIndex is null, info: {}", info); + return; + } + LocalTablet tablet = (LocalTablet) materializedIndex.getTablet(info.getTabletId()); + if (tablet == null) { + LOG.warn("replay update replica failed, tablet is null, info: {}", info); + return; + } + Replica replica = tablet.getReplicaByBackendId(info.getBackendId()); + if (replica == null) { + LOG.warn("replay update replica failed, replica is null, info: {}", info); + return; + } + replica.updateRowCount(info.getVersion(), info.getMinReadableVersion(), info.getDataSize(), info.getRowCount()); + replica.setBad(false); } finally { locker.unLockDatabase(db.getId(), LockType.WRITE); } } - // entry of checking tablets operation - public void checkTablets(AdminCheckTabletsStmt stmt) { - AdminCheckTabletsStmt.CheckType type = stmt.getType(); - if (type == AdminCheckTabletsStmt.CheckType.CONSISTENCY) { - stateMgr.getConsistencyChecker().addTabletsToCheck(stmt.getTabletIds()); - } - } - - // Set specified replica's status. If replica does not exist, just ignore it. - public void setReplicaStatus(AdminSetReplicaStatusStmt stmt) { - long tabletId = stmt.getTabletId(); - long backendId = stmt.getBackendId(); - Replica.ReplicaStatus status = stmt.getStatus(); - setReplicaStatusInternal(tabletId, backendId, status, false); + @Override + public void setReplicaStatus(SetReplicaStatusOperationLog log) { + GlobalStateMgr.getCurrentState().getEditLog().logSetReplicaStatus(log); } public void replaySetReplicaStatus(SetReplicaStatusOperationLog log) { - setReplicaStatusInternal(log.getTabletId(), log.getBackendId(), log.getReplicaStatus(), true); - } + long tabletId = log.getTabletId(); + long backendId = log.getBackendId(); + Replica.ReplicaStatus status = log.getReplicaStatus(); - private void setReplicaStatusInternal(long tabletId, long backendId, Replica.ReplicaStatus status, - boolean isReplay) { - TabletMeta meta = stateMgr.getTabletInvertedIndex().getTabletMeta(tabletId); + TabletInvertedIndex tabletInvertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); + TabletMeta meta = tabletInvertedIndex.getTabletMeta(tabletId); if (meta == null) { LOG.info("tablet {} does not exist", tabletId); return; } long dbId = meta.getDbId(); - Database db = getDb(dbId); + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); if (db == null) { LOG.info("database {} of tablet {} does not exist", dbId, tabletId); return; @@ -4590,248 +2012,151 @@ private void setReplicaStatusInternal(long tabletId, long backendId, Replica.Rep Locker locker = new Locker(); locker.lockDatabase(db.getId(), LockType.WRITE); try { - Replica replica = stateMgr.getTabletInvertedIndex().getReplica(tabletId, backendId); + Replica replica = tabletInvertedIndex.getReplica(tabletId, backendId); if (replica == null) { LOG.info("replica of tablet {} does not exist", tabletId); return; } if (status == Replica.ReplicaStatus.BAD || status == Replica.ReplicaStatus.OK) { - if (replica.setBadForce(status == Replica.ReplicaStatus.BAD)) { - if (!isReplay) { - // Put this tablet into urgent table so that it can be repaired ASAP. - stateMgr.getTabletChecker().setTabletForUrgentRepair(dbId, meta.getTableId(), - meta.getPartitionId()); - SetReplicaStatusOperationLog log = - new SetReplicaStatusOperationLog(backendId, tabletId, status); - GlobalStateMgr.getCurrentState().getEditLog().logSetReplicaStatus(log); - } - LOG.info("set replica {} of tablet {} on backend {} as {}. is replay: {}", - replica.getId(), tabletId, backendId, status, isReplay); - } + replica.setBadForce(status == Replica.ReplicaStatus.BAD); } } finally { locker.unLockDatabase(db.getId(), LockType.WRITE); } } - public void setPartitionVersion(AdminSetPartitionVersionStmt stmt) { - Database database = getDb(stmt.getTableName().getDb()); - if (database == null) { - throw ErrorReportException.report(ErrorCode.ERR_BAD_DB_ERROR, stmt.getTableName().getDb()); - } - Locker locker = new Locker(); - locker.lockDatabase(database.getId(), LockType.WRITE); - try { - Table table = getTable(database.getFullName(), stmt.getTableName().getTbl()); - if (table == null) { - throw ErrorReportException.report(ErrorCode.ERR_BAD_TABLE_ERROR, stmt.getTableName().getTbl()); - } - if (!table.isOlapTableOrMaterializedView()) { - throw ErrorReportException.report(ErrorCode.ERR_NOT_OLAP_TABLE, stmt.getTableName().getTbl()); + @Override + public void backendTabletsInfo(BackendTabletsInfo backendTabletsInfo) { + replayBackendTabletsInfo(backendTabletsInfo); + GlobalStateMgr.getCurrentState().getEditLog().logBackendTabletsInfo(backendTabletsInfo); + } + + public void replayBackendTabletsInfo(BackendTabletsInfo backendTabletsInfo) { + List> tabletsWithSchemaHash = backendTabletsInfo.getTabletSchemaHash(); + if (!tabletsWithSchemaHash.isEmpty()) { + // In previous version, we save replica info in `tabletsWithSchemaHash`, + // but it is wrong because we can not get replica from `tabletInvertedIndex` when doing checkpoint, + // because when doing checkpoint, the tabletInvertedIndex is not initialized at all. + // + // So we can only discard this information, in this case, it is equivalent to losing the record of these operations. + // But it doesn't matter, these records are currently only used to record whether a replica is in a bad state. + // This state has little effect on the system, and it can be restored after the system has processed the bad state replica. + for (Pair tabletInfo : tabletsWithSchemaHash) { + LOG.warn("find an old backendTabletsInfo for tablet {}, ignore it", tabletInfo.first); } + return; + } - PhysicalPartition physicalPartition; - OlapTable olapTable = (OlapTable) table; - if (stmt.getPartitionId() != -1) { - physicalPartition = olapTable.getPhysicalPartition(stmt.getPartitionId()); - if (physicalPartition == null) { - throw ErrorReportException.report(ErrorCode.ERR_NO_SUCH_PARTITION, stmt.getPartitionName()); + // in new version, replica info is saved here. + // but we need to get replica from db->tbl->partition->... + List replicaPersistInfos = backendTabletsInfo.getReplicaPersistInfos(); + for (ReplicaPersistInfo info : replicaPersistInfos) { + long dbId = info.getDbId(); + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbId); + if (db == null) { + continue; + } + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.WRITE); + try { + OlapTable tbl = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() + .getTable(db.getId(), info.getTableId()); + if (tbl == null) { + continue; } - } else { - Partition partition = olapTable.getPartition(stmt.getPartitionName()); + Partition partition = tbl.getPartition(info.getPartitionId()); if (partition == null) { - throw ErrorReportException.report(ErrorCode.ERR_NO_SUCH_PARTITION, stmt.getPartitionName()); + continue; + } + MaterializedIndex mindex = partition.getDefaultPhysicalPartition().getIndex(info.getIndexId()); + if (mindex == null) { + continue; } - if (partition.getSubPartitions().size() >= 2) { - throw ErrorReportException.report(ErrorCode.ERR_MULTI_SUB_PARTITION, stmt.getPartitionName()); + LocalTablet tablet = (LocalTablet) mindex.getTablet(info.getTabletId()); + if (tablet == null) { + continue; } - physicalPartition = partition; - } - - long visibleVersionTime = System.currentTimeMillis(); - physicalPartition.setVisibleVersion(stmt.getVersion(), visibleVersionTime); - physicalPartition.setNextVersion(stmt.getVersion() + 1); - - PartitionVersion partitionVersion = new PartitionVersion(database.getId(), table.getId(), - physicalPartition.getId(), stmt.getVersion()); - for (MaterializedIndex index : physicalPartition.getMaterializedIndices(IndexExtState.VISIBLE)) { - for (Tablet tablet : index.getTablets()) { - if (!(tablet instanceof LocalTablet)) { - continue; - } - - LocalTablet localTablet = (LocalTablet) tablet; - for (Replica replica : localTablet.getAllReplicas()) { - if (replica.getVersion() > stmt.getVersion() && localTablet.getAllReplicas().size() > 1) { - replica.setBad(true); - LOG.warn("set tablet: {} on backend: {} to bad, " + - "because its version: {} is higher than partition visible version: {}", - tablet.getId(), replica.getBackendId(), replica.getVersion(), stmt.getVersion()); - } - } + Replica replica = tablet.getReplicaById(info.getReplicaId()); + if (replica != null) { + replica.setBad(true); + LOG.debug("get replica {} of tablet {} on backend {} to bad when replaying", + info.getReplicaId(), info.getTabletId(), info.getBackendId()); } + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); } - GlobalStateMgr.getCurrentState().getEditLog().logRecoverPartitionVersion( - new PartitionVersionRecoveryInfo(Lists.newArrayList(partitionVersion), visibleVersionTime)); - LOG.info("Successfully set partition: {} version to {}, table: {}, db: {}", - stmt.getPartitionName(), stmt.getVersion(), table.getName(), database.getFullName()); - } finally { - locker.unLockDatabase(database.getId(), LockType.WRITE); } } - public void onEraseDatabase(long dbId) { - // remove database transaction manager - stateMgr.getGlobalTransactionMgr().removeDatabaseTransactionMgr(dbId); - // unbind db to storage volume - stateMgr.getStorageVolumeMgr().unbindDbToStorageVolume(dbId); - } - - public void onErasePartition(Partition partition) { - // remove tablet in inverted index - TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { - for (Tablet tablet : index.getTablets()) { - long tabletId = tablet.getId(); - invertedIndex.deleteTablet(tabletId); - } - } + @Override + public void finishConsistencyCheck(ConsistencyCheckInfo info) { + replayFinishConsistencyCheck(info); + GlobalStateMgr.getCurrentState().getEditLog().logFinishConsistencyCheckNoWait(info); } - // for test only - @VisibleForTesting - public void clear() { - if (idToDb != null) { - idToDb.clear(); + public void replayFinishConsistencyCheck(ConsistencyCheckInfo info) { + Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(info.getDbId()); + if (db == null) { + LOG.warn("replay finish consistency check failed, db is null, info: {}", info); + return; } - if (fullNameToDb != null) { - fullNameToDb.clear(); + OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() + .getTable(db.getId(), info.getTableId()); + if (table == null) { + LOG.warn("replay finish consistency check failed, table is null, info: {}", info); + return; } - System.gc(); - } - @VisibleForTesting - public OlapTable getCopiedTable(Database db, OlapTable olapTable, List sourcePartitionIds, - Map origPartitions, boolean isOptimize) { - OlapTable copiedTbl; - Locker locker = new Locker(); - locker.lockDatabase(db.getId(), LockType.READ); - try { - if (olapTable.getState() != OlapTable.OlapTableState.NORMAL) { - if (!isOptimize || olapTable.getState() != OlapTable.OlapTableState.SCHEMA_CHANGE) { - throw new RuntimeException("Table' state is not NORMAL: " + olapTable.getState() - + ", tableId:" + olapTable.getId() + ", tabletName:" + olapTable.getName()); - } - } - for (Long id : sourcePartitionIds) { - origPartitions.put(id, olapTable.getPartition(id).getName()); + try (AutoCloseableLock ignore + = new AutoCloseableLock(new Locker(), db.getId(), Lists.newArrayList(table.getId()), LockType.WRITE)) { + Partition partition = table.getPartition(info.getPartitionId()); + if (partition == null) { + LOG.warn("replay finish consistency check failed, partition is null, info: {}", info); + return; } - copiedTbl = getShadowCopyTable(olapTable); - } finally { - locker.unLockDatabase(db.getId(), LockType.READ); - } - return copiedTbl; - } - - @VisibleForTesting - public OlapTable getCopiedTable(Database db, OlapTable olapTable, List sourcePartitionIds, - Map origPartitions) { - return getCopiedTable(db, olapTable, sourcePartitionIds, origPartitions, false); - } - - private OlapTable getShadowCopyTable(OlapTable olapTable) { - OlapTable copiedTable; - if (olapTable instanceof LakeMaterializedView) { - copiedTable = new LakeMaterializedView(); - } else if (olapTable instanceof MaterializedView) { - copiedTable = new MaterializedView(); - } else if (olapTable instanceof LakeTable) { - copiedTable = new LakeTable(); - } else { - copiedTable = new OlapTable(); - } - - olapTable.copyOnlyForQuery(copiedTable); - return copiedTable; - } - - @VisibleForTesting - public List getNewPartitionsFromPartitions(Database db, OlapTable olapTable, - List sourcePartitionIds, - Map origPartitions, OlapTable copiedTbl, - String namePostfix, Set tabletIdSet, - List tmpPartitionIds, DistributionDesc distributionDesc, - long warehouseId) - throws DdlException { - List newPartitions = Lists.newArrayListWithCapacity(sourcePartitionIds.size()); - for (int i = 0; i < sourcePartitionIds.size(); ++i) { - long newPartitionId = tmpPartitionIds.get(i); - long sourcePartitionId = sourcePartitionIds.get(i); - String newPartitionName = origPartitions.get(sourcePartitionId) + namePostfix; - if (olapTable.checkPartitionNameExist(newPartitionName, true)) { - // to prevent creating the same partitions when failover - // this will happen when OverwriteJob crashed after created temp partitions, - // but before changing to PREPARED state - LOG.warn("partition:{} already exists in table:{}", newPartitionName, olapTable.getName()); - continue; + MaterializedIndex index = partition.getDefaultPhysicalPartition().getIndex(info.getIndexId()); + if (index == null) { + LOG.warn("replay finish consistency check failed, index is null, info: {}", info); + return; } - PartitionInfo partitionInfo = copiedTbl.getPartitionInfo(); - partitionInfo.setTabletType(newPartitionId, partitionInfo.getTabletType(sourcePartitionId)); - partitionInfo.setIsInMemory(newPartitionId, partitionInfo.getIsInMemory(sourcePartitionId)); - partitionInfo.setReplicationNum(newPartitionId, partitionInfo.getReplicationNum(sourcePartitionId)); - partitionInfo.setDataProperty(newPartitionId, partitionInfo.getDataProperty(sourcePartitionId)); - if (copiedTbl.isCloudNativeTableOrMaterializedView()) { - partitionInfo.setDataCacheInfo(newPartitionId, partitionInfo.getDataCacheInfo(sourcePartitionId)); + LocalTablet tablet = (LocalTablet) GlobalStateMgr.getCurrentState() + .getLocalMetastore().getTablet(index, info.getTabletId()); + if (tablet == null) { + LOG.warn("replay finish consistency check failed, tablet is null, info: {}", info); + return; } - Partition newPartition = null; - if (distributionDesc != null) { - DistributionInfo distributionInfo = distributionDesc.toDistributionInfo(olapTable.getColumns()); - if (distributionInfo.getBucketNum() == 0) { - Partition sourcePartition = olapTable.getPartition(sourcePartitionId); - olapTable.optimizeDistribution(distributionInfo, sourcePartition); - } - newPartition = createPartition( - db, copiedTbl, newPartitionId, newPartitionName, null, tabletIdSet, distributionInfo, warehouseId); - } else { - newPartition = createPartition(db, copiedTbl, newPartitionId, newPartitionName, null, tabletIdSet, warehouseId); - } + long lastCheckTime = info.getLastCheckTime(); + db.setLastCheckTime(lastCheckTime); + table.setLastCheckTime(lastCheckTime); + partition.setLastCheckTime(lastCheckTime); + index.setLastCheckTime(lastCheckTime); + tablet.setLastCheckTime(lastCheckTime); + tablet.setCheckedVersion(info.getCheckedVersion()); - newPartitions.add(newPartition); + tablet.setIsConsistent(info.isConsistent()); } - return newPartitions; } - // create new partitions from source partitions. - // new partitions have the same indexes as source partitions. - public List createTempPartitionsFromPartitions(Database db, Table table, - String namePostfix, List sourcePartitionIds, - List tmpPartitionIds, DistributionDesc distributionDesc, - long warehouseId) { - Preconditions.checkState(table instanceof OlapTable); - OlapTable olapTable = (OlapTable) table; - Map origPartitions = Maps.newHashMap(); - OlapTable copiedTbl = getCopiedTable(db, olapTable, sourcePartitionIds, origPartitions, distributionDesc != null); - copiedTbl.setDefaultDistributionInfo(olapTable.getDefaultDistributionInfo()); - - // 2. use the copied table to create partitions - List newPartitions = null; - // tabletIdSet to save all newly created tablet ids. - Set tabletIdSet = Sets.newHashSet(); + public void replayModifyHiveTableColumn(short opCode, ModifyTableColumnOperationLog info) { + if (info.getDbName() == null) { + return; + } + String hiveExternalDb = info.getDbName(); + String hiveExternalTable = info.getTableName(); + LOG.info("replayModifyTableColumn hiveDb:{},hiveTable:{}", hiveExternalDb, hiveExternalTable); + List columns = info.getColumns(); + Database db = getDb(hiveExternalDb); + HiveTable table; + Locker locker = new Locker(); + locker.lockDatabase(db.getId(), LockType.WRITE); try { - newPartitions = getNewPartitionsFromPartitions(db, olapTable, sourcePartitionIds, origPartitions, - copiedTbl, namePostfix, tabletIdSet, tmpPartitionIds, distributionDesc, warehouseId); - buildPartitions(db, copiedTbl, newPartitions.stream().map(Partition::getSubPartitions) - .flatMap(p -> p.stream()).collect(Collectors.toList()), warehouseId); - } catch (Exception e) { - // create partition failed, remove all newly created tablets - for (Long tabletId : tabletIdSet) { - GlobalStateMgr.getCurrentState().getTabletInvertedIndex().deleteTablet(tabletId); - } - LOG.warn("create partitions from partitions failed.", e); - throw new RuntimeException("create partitions failed: " + e.getMessage(), e); + Table tbl = getTable(db.getFullName(), hiveExternalTable); + table = (HiveTable) tbl; + table.setNewFullSchema(columns); + } finally { + locker.unLockDatabase(db.getId(), LockType.WRITE); } - return newPartitions; } public void replayDeleteAutoIncrementId(AutoIncrementInfo info) throws IOException { @@ -4967,7 +2292,7 @@ public void load(SRMetaBlockReader reader) throws IOException, SRMetaBlockExcept tableIdToIncrementId.put(tableId, id); } - recreateTabletInvertIndex(); + GlobalStateMgr.getCurrentState().getTabletInvertedIndex().recreateTabletInvertIndex(); GlobalStateMgr.getCurrentState().getEsRepository().loadTableFromCatalog(); } @@ -4976,6 +2301,18 @@ public void handleMVRepair(Database db, Table table, List, Long>> getSamples() { long totalCount = idToDb.values() diff --git a/fe/fe-core/src/main/java/com/starrocks/server/LocalMetastoreReplayer.java b/fe/fe-core/src/main/java/com/starrocks/server/LocalMetastoreReplayer.java new file mode 100644 index 00000000000000..8c030f2f5d0fd0 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/server/LocalMetastoreReplayer.java @@ -0,0 +1,23 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package com.starrocks.server; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +public class LocalMetastoreReplayer { + private static final Logger LOG = LogManager.getLogger(LocalMetastoreReplayer.class); + + +} diff --git a/fe/fe-core/src/main/java/com/starrocks/server/MetadataMgr.java b/fe/fe-core/src/main/java/com/starrocks/server/MetadataMgr.java index e10ada5c431d3d..f7a32bd4bcd458 100644 --- a/fe/fe-core/src/main/java/com/starrocks/server/MetadataMgr.java +++ b/fe/fe-core/src/main/java/com/starrocks/server/MetadataMgr.java @@ -62,6 +62,7 @@ import com.starrocks.connector.metadata.MetadataTable; import com.starrocks.connector.metadata.MetadataTableType; import com.starrocks.connector.statistics.ConnectorTableColumnStats; +import com.starrocks.meta.StarRocksMetadata; import com.starrocks.qe.ConnectContext; import com.starrocks.sql.ast.AlterTableStmt; import com.starrocks.sql.ast.CleanTemporaryTableStmt; @@ -118,7 +119,7 @@ public synchronized ConnectorMetadata getConnectorMetadata(String catalogName, S } } - private final LocalMetastore localMetastore; + private final StarRocksMetadata starRocksMetadata; private final TemporaryTableMgr temporaryTableMgr; private final ConnectorMgr connectorMgr; private final ConnectorTblMetaInfoMgr connectorTblMetaInfoMgr; @@ -148,11 +149,11 @@ public QueryMetadatas load(String key) throws Exception { } }); - public MetadataMgr(LocalMetastore localMetastore, TemporaryTableMgr temporaryTableMgr, ConnectorMgr connectorMgr, + public MetadataMgr(StarRocksMetadata starrocksMetadata, TemporaryTableMgr temporaryTableMgr, ConnectorMgr connectorMgr, ConnectorTblMetaInfoMgr connectorTblMetaInfoMgr) { - Preconditions.checkNotNull(localMetastore, "localMetastore is null"); + Preconditions.checkNotNull(starrocksMetadata, "localMetastore is null"); Preconditions.checkNotNull(temporaryTableMgr, "temporaryTableMgr is null"); - this.localMetastore = localMetastore; + this.starRocksMetadata = starrocksMetadata; this.temporaryTableMgr = temporaryTableMgr; this.connectorMgr = connectorMgr; this.connectorTblMetaInfoMgr = connectorTblMetaInfoMgr; @@ -180,7 +181,7 @@ public Optional getOptionalMetadata(String catalogName) { */ public Optional getOptionalMetadata(Optional queryId, String catalogName) { if (Strings.isNullOrEmpty(catalogName) || CatalogMgr.isInternalCatalog(catalogName)) { - return Optional.of(localMetastore); + return Optional.of(starRocksMetadata); } CatalogConnector connector = connectorMgr.getConnector(catalogName); @@ -253,7 +254,7 @@ public Database getDb(String catalogName, String dbName) { } public Database getDb(Long databaseId) { - return localMetastore.getDb(databaseId); + return starRocksMetadata.getDb(databaseId); } public List listTableNames(String catalogName, String dbName) { @@ -475,7 +476,7 @@ public void cleanTemporaryTables(UUID sessionId) { com.google.common.collect.Table allTables = temporaryTableMgr.getTemporaryTables(sessionId); for (Long databaseId : allTables.rowKeySet()) { - Database database = localMetastore.getDb(databaseId); + Database database = starRocksMetadata.getDb(databaseId); if (database == null) { // database maybe dropped by force, we should clean temporary tables on it. temporaryTableMgr.dropTemporaryTables(sessionId, databaseId); @@ -484,7 +485,8 @@ public void cleanTemporaryTables(UUID sessionId) { Map tables = allTables.row(databaseId); tables.forEach((tableName, tableId) -> { try { - database.dropTemporaryTable(tableId, tableName, true, true); + starRocksMetadata.dropTemporaryTable(database.getFullName(), + tableId, tableName, true, true); temporaryTableMgr.dropTemporaryTable(sessionId, database.getId(), tableName); } catch (DdlException e) { LOG.error("Failed to drop temporary table {}.{} in session {}", @@ -539,7 +541,7 @@ public Optional getDatabase(BaseTableInfo baseTableInfo) { public Optional
getTable(BaseTableInfo baseTableInfo) { if (baseTableInfo.isInternalCatalog()) { - return Optional.ofNullable(localMetastore.getTable(baseTableInfo.getDbId(), baseTableInfo.getTableId())); + return Optional.ofNullable(starRocksMetadata.getTable(baseTableInfo.getDbId(), baseTableInfo.getTableId())); } else { return Optional.ofNullable( getTable(baseTableInfo.getCatalogName(), baseTableInfo.getDbName(), baseTableInfo.getTableName())); @@ -576,11 +578,11 @@ public Table getTemporaryTable(UUID sessionId, String catalogName, Long database if (tableId == null) { return null; } - Database database = localMetastore.getDb(databaseId); + Database database = starRocksMetadata.getDb(databaseId); if (database == null) { return null; } - return localMetastore.getTable(database.getId(), tableId); + return starRocksMetadata.getTable(database.getId(), tableId); } public boolean tableExists(String catalogName, String dbName, String tblName) { diff --git a/fe/fe-core/src/main/java/com/starrocks/server/OlapTableFactory.java b/fe/fe-core/src/main/java/com/starrocks/server/OlapTableFactory.java index 65376890e29cba..e0a4c1f0bab543 100644 --- a/fe/fe-core/src/main/java/com/starrocks/server/OlapTableFactory.java +++ b/fe/fe-core/src/main/java/com/starrocks/server/OlapTableFactory.java @@ -94,8 +94,7 @@ private OlapTableFactory() { @Override @NotNull public Table createTable(LocalMetastore metastore, Database db, CreateTableStmt stmt) throws DdlException { - GlobalStateMgr stateMgr = metastore.getStateMgr(); - ColocateTableIndex colocateTableIndex = metastore.getColocateTableIndex(); + ColocateTableIndex colocateTableIndex = GlobalStateMgr.getCurrentState().getColocateTableIndex(); String tableName = stmt.getTableName(); if (stmt instanceof CreateTemporaryTableStmt) { @@ -107,7 +106,7 @@ public Table createTable(LocalMetastore metastore, Database db, CreateTableStmt // create columns List baseSchema = stmt.getColumns(); - metastore.validateColumns(baseSchema); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().validateColumns(baseSchema); // create partition info PartitionDesc partitionDesc = stmt.getPartitionDesc(); @@ -118,18 +117,18 @@ public Table createTable(LocalMetastore metastore, Database db, CreateTableStmt if (partitionDesc instanceof RangePartitionDesc) { RangePartitionDesc rangePartitionDesc = (RangePartitionDesc) partitionDesc; for (SingleRangePartitionDesc desc : rangePartitionDesc.getSingleRangePartitionDescs()) { - long partitionId = metastore.getNextId(); + long partitionId = GlobalStateMgr.getCurrentState().getNextId(); partitionNameToId.put(desc.getPartitionName(), partitionId); } } else if (partitionDesc instanceof ListPartitionDesc) { ListPartitionDesc listPartitionDesc = (ListPartitionDesc) partitionDesc; - listPartitionDesc.findAllPartitionNames() - .forEach(partitionName -> partitionNameToId.put(partitionName, metastore.getNextId())); + listPartitionDesc.findAllPartitionNames().forEach( + partitionName -> partitionNameToId.put(partitionName, GlobalStateMgr.getCurrentState().getNextId())); } else if (partitionDesc instanceof ExpressionPartitionDesc) { ExpressionPartitionDesc expressionPartitionDesc = (ExpressionPartitionDesc) partitionDesc; for (SingleRangePartitionDesc desc : expressionPartitionDesc.getRangePartitionDesc() .getSingleRangePartitionDescs()) { - long partitionId = metastore.getNextId(); + long partitionId = GlobalStateMgr.getCurrentState().getNextId(); partitionNameToId.put(desc.getPartitionName(), partitionId); } @@ -143,7 +142,7 @@ public Table createTable(LocalMetastore metastore, Database db, CreateTableStmt // Automatic partitioning needs to ensure that at least one tablet is opened. if (partitionInfo.isAutomaticPartition()) { - long partitionId = metastore.getNextId(); + long partitionId = GlobalStateMgr.getCurrentState().getNextId(); String replicateNum = String.valueOf(RunMode.defaultReplicationNum()); if (stmt.getProperties() != null) { replicateNum = stmt.getProperties().getOrDefault("replication_num", @@ -157,7 +156,7 @@ public Table createTable(LocalMetastore metastore, Database db, CreateTableStmt if (DynamicPartitionUtil.checkDynamicPartitionPropertiesExist(stmt.getProperties())) { throw new DdlException("Only support dynamic partition properties on range partition table"); } - long partitionId = metastore.getNextId(); + long partitionId = GlobalStateMgr.getCurrentState().getNextId(); // use table name as single partition name partitionNameToId.put(tableName, partitionId); partitionInfo = new SinglePartitionInfo(); @@ -232,7 +231,8 @@ public Table createTable(LocalMetastore metastore, Database db, CreateTableStmt throw new DdlException(String.format("Storage volume %s not exists", volume)); } String storageVolumeId = svm.getStorageVolumeIdOfTable(tableId); - metastore.setLakeStorageInfo(db, table, storageVolumeId, properties); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .setLakeStorageInfo(db, table, storageVolumeId, properties); } else { table = new OlapTable(tableId, tableName, baseSchema, keysType, partitionInfo, distributionInfo, indexes); } @@ -251,7 +251,7 @@ public Table createTable(LocalMetastore metastore, Database db, CreateTableStmt table.setComment(stmt.getComment()); // set base index id - long baseIndexId = metastore.getNextId(); + long baseIndexId = GlobalStateMgr.getCurrentState().getNextId(); table.setBaseIndexId(baseIndexId); // get use light schema change @@ -581,12 +581,12 @@ public Table createTable(LocalMetastore metastore, Database db, CreateTableStmt } Preconditions.checkNotNull(rollupIndexStorageType); // set rollup index meta to olap table - List rollupColumns = stateMgr.getRollupHandler().checkAndPrepareMaterializedView(addRollupClause, - table, baseRollupIndex); + List rollupColumns = GlobalStateMgr.getCurrentState() + .getRollupHandler().checkAndPrepareMaterializedView(addRollupClause, table, baseRollupIndex); short rollupShortKeyColumnCount = GlobalStateMgr.calcShortKeyColumnCount(rollupColumns, addRollupClause.getProperties()); int rollupSchemaHash = Util.schemaHash(schemaVersion, rollupColumns, bfColumns, bfFpp); - long rollupIndexId = metastore.getNextId(); + long rollupIndexId = GlobalStateMgr.getCurrentState().getNextId(); table.setIndexMeta(rollupIndexId, addRollupClause.getRollupName(), rollupColumns, schemaVersion, rollupSchemaHash, rollupShortKeyColumnCount, rollupIndexStorageType, keysType); } @@ -655,9 +655,12 @@ public Table createTable(LocalMetastore metastore, Database db, CreateTableStmt // this is a 1-level partitioned table, use table name as partition name long partitionId = partitionNameToId.get(tableName); - Partition partition = metastore.createPartition(db, table, partitionId, tableName, version, tabletIdSet, - warehouseId); - metastore.buildPartitions(db, table, partition.getSubPartitions().stream().collect(Collectors.toList()), + Partition partition = GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .createPartition(db, table, partitionId, tableName, version, tabletIdSet, warehouseId); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().buildPartitions( + db, + table, + partition.getSubPartitions().stream().collect(Collectors.toList()), warehouseId); table.addPartition(partition); } else if (partitionInfo.isRangePartition() || partitionInfo.getType() == PartitionType.LIST) { @@ -693,12 +696,14 @@ public Table createTable(LocalMetastore metastore, Database db, CreateTableStmt // this is a 2-level partitioned tables List partitions = new ArrayList<>(partitionNameToId.size()); for (Map.Entry entry : partitionNameToId.entrySet()) { - Partition partition = metastore.createPartition(db, table, entry.getValue(), entry.getKey(), version, + Partition partition = GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .createPartition(db, table, entry.getValue(), entry.getKey(), version, tabletIdSet, warehouseId); partitions.add(partition); } // It's ok if partitions is empty. - metastore.buildPartitions(db, table, partitions.stream().map(Partition::getSubPartitions) + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .buildPartitions(db, table, partitions.stream().map(Partition::getSubPartitions) .flatMap(p -> p.stream()).collect(Collectors.toList()), warehouseId); for (Partition partition : partitions) { table.addPartition(partition); diff --git a/fe/fe-core/src/main/java/com/starrocks/server/SharedDataStorageVolumeMgr.java b/fe/fe-core/src/main/java/com/starrocks/server/SharedDataStorageVolumeMgr.java index e42a83cd6299cb..49881a48ca30ef 100644 --- a/fe/fe-core/src/main/java/com/starrocks/server/SharedDataStorageVolumeMgr.java +++ b/fe/fe-core/src/main/java/com/starrocks/server/SharedDataStorageVolumeMgr.java @@ -325,10 +325,10 @@ protected List> getBindingsOfBuiltinStorageVolume() { List> bindings = new ArrayList<>(); List tableBindings = new ArrayList<>(); List dbBindings = new ArrayList<>(); - List dbIds = GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIdsIncludeRecycleBin().stream() + List dbIds = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIdsIncludeRecycleBin().stream() .filter(dbid -> dbid > NEXT_ID_INIT_VALUE).collect(Collectors.toList()); for (Long dbId : dbIds) { - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIncludeRecycleBin(dbId); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIncludeRecycleBin(dbId); Locker locker = new Locker(); locker.lockDatabase(db.getId(), LockType.READ); if (dbToStorageVolume.containsKey(dbId)) { @@ -336,7 +336,7 @@ protected List> getBindingsOfBuiltinStorageVolume() { } dbBindings.add(dbId); try { - List
tables = GlobalStateMgr.getCurrentState().getLocalMetastore().getTablesIncludeRecycleBin(db); + List
tables = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTablesIncludeRecycleBin(db); for (Table table : tables) { Long tableId = table.getId(); if (!tableToStorageVolume.containsKey(tableId) && table.isCloudNativeTableOrMaterializedView()) { diff --git a/fe/fe-core/src/main/java/com/starrocks/service/FrontendServiceImpl.java b/fe/fe-core/src/main/java/com/starrocks/service/FrontendServiceImpl.java index 3f2395790c1c69..85f9d1d35bb1e2 100644 --- a/fe/fe-core/src/main/java/com/starrocks/service/FrontendServiceImpl.java +++ b/fe/fe-core/src/main/java/com/starrocks/service/FrontendServiceImpl.java @@ -600,7 +600,7 @@ public TListPipesResult listPipes(TListPipesParams params) throws TException { Map pipes = pm.getPipesUnlock(); TListPipesResult result = new TListPipesResult(); for (Pipe pipe : pipes.values()) { - String databaseName = GlobalStateMgr.getCurrentState().getLocalMetastore().mayGetDb(pipe.getPipeId().getDbId()) + String databaseName = GlobalStateMgr.getCurrentState().getStarRocksMetadata().mayGetDb(pipe.getPipeId().getDbId()) .map(Database::getOriginName) .orElse(null); @@ -648,7 +648,7 @@ public TListPipeFilesResult listPipeFiles(TListPipeFilesParams params) throws TE file.setPipe_id(record.pipeId); file.setDatabase_name( mayPipe.flatMap(p -> - GlobalStateMgr.getCurrentState().getLocalMetastore().mayGetDb(p.getDbAndName().first) + GlobalStateMgr.getCurrentState().getStarRocksMetadata().mayGetDb(p.getDbAndName().first) .map(Database::getOriginName)) .orElse("")); file.setPipe_name(mayPipe.map(Pipe::getName).orElse("")); @@ -1965,7 +1965,7 @@ public synchronized TImmutablePartitionResult updateImmutablePartitionInternal(T locker.unLockDatabase(db.getId(), LockType.READ); } if (mutablePartitions.size() <= 0) { - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .addSubPartitions(db, olapTable, partition, 1, warehouseId); } } @@ -2236,7 +2236,7 @@ private static TCreatePartitionResult createPartitionProcess(TCreatePartitionReq try { if (olapTable.getState() == OlapTable.OlapTableState.ROLLUP) { LOG.info("cancel rollup for automatic create partition txn_id={}", request.getTxn_id()); - state.getLocalMetastore().cancelAlter( + state.getAlterJobMgr().cancelAlter( new CancelAlterTableStmt( ShowAlterStmt.AlterType.ROLLUP, new TableName(db.getFullName(), olapTable.getName())), @@ -2245,7 +2245,7 @@ private static TCreatePartitionResult createPartitionProcess(TCreatePartitionReq if (olapTable.getState() == OlapTable.OlapTableState.SCHEMA_CHANGE) { LOG.info("cancel schema change for automatic create partition txn_id={}", request.getTxn_id()); - state.getLocalMetastore().cancelAlter( + state.getAlterJobMgr().cancelAlter( new CancelAlterTableStmt( ShowAlterStmt.AlterType.COLUMN, new TableName(db.getFullName(), olapTable.getName())), @@ -2265,7 +2265,7 @@ private static TCreatePartitionResult createPartitionProcess(TCreatePartitionReq AlterTableClauseAnalyzer analyzer = new AlterTableClauseAnalyzer(olapTable); analyzer.analyze(ctx, addPartitionClause); } - state.getLocalMetastore().addPartitions(ctx, db, olapTable.getName(), addPartitionClause); + state.getStarRocksMetadata().addPartitions(ctx, db, olapTable.getName(), addPartitionClause); } catch (Exception e) { LOG.warn("failed to cancel alter operation", e); errorStatus.setError_msgs(Lists.newArrayList( @@ -2331,7 +2331,8 @@ private static TCreatePartitionResult buildCreatePartitionResponse(OlapTable ola buildPartitionInfo(olapTable, partitions, partition, tPartition, txnState); // tablet int quorum = olapTable.getPartitionInfo().getQuorumNum(partition.getId(), olapTable.writeQuorum()); - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + for (MaterializedIndex index : partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { if (olapTable.isCloudNativeTable()) { for (Tablet tablet : index.getTablets()) { LakeTablet cloudNativeTablet = (LakeTablet) tablet; @@ -2444,7 +2445,8 @@ private static void buildPartitionInfo(OlapTable olapTable, List createTab sb.append(entry.getValue().lowerEndpoint().toSql()); sb.append(", ").append(entry.getValue().upperEndpoint().toSql()).append(")"); sb.append("(\"version_info\" = \""); - sb.append(partition.getVisibleVersion()).append("\""); + sb.append(partition.getDefaultPhysicalPartition().getVisibleVersion()).append("\""); sb.append(");"); addPartitionStmt.add(sb.toString()); } diff --git a/fe/fe-core/src/main/java/com/starrocks/sql/analyzer/ExpressionAnalyzer.java b/fe/fe-core/src/main/java/com/starrocks/sql/analyzer/ExpressionAnalyzer.java index 814b9a06a2e584..508a92d31bb825 100644 --- a/fe/fe-core/src/main/java/com/starrocks/sql/analyzer/ExpressionAnalyzer.java +++ b/fe/fe-core/src/main/java/com/starrocks/sql/analyzer/ExpressionAnalyzer.java @@ -1566,7 +1566,7 @@ public Void visitDictQueryExpr(DictQueryExpr node, Scope context) { nullIfNotFoundIdx = params.size() - 1; } else { throw new SemanticException(String.format("dict_mapping function param size should be %d - %d", - keyColumns.size() + 1, keyColumns.size() + 3)); + keyColumns.size() + 1, keyColumns.size() + 3)); } String valueField; @@ -1630,7 +1630,7 @@ public Void visitDictQueryExpr(DictQueryExpr node, Scope context) { List actualTypeNames = actualTypes.stream().map(Type::canonicalName).collect(Collectors.toList()); throw new SemanticException( String.format("dict_mapping function params not match expected,\nExpect: %s\nActual: %s", - String.join(", ", expectTypeNames), String.join(", ", actualTypeNames))); + String.join(", ", expectTypeNames), String.join(", ", actualTypeNames))); } Expr castExpr = new CastExpr(expectedType, actual); @@ -1645,7 +1645,8 @@ public Void visitDictQueryExpr(DictQueryExpr node, Scope context) { dictQueryExpr.setTbl_name(tableName.getTbl()); Map partitionVersion = new HashMap<>(); - dictTable.getPartitions().forEach(p -> partitionVersion.put(p.getId(), p.getVisibleVersion())); + dictTable.getPartitions().forEach(p -> + partitionVersion.put(p.getId(), p.getDefaultPhysicalPartition().getVisibleVersion())); dictQueryExpr.setPartition_version(partitionVersion); List keyFields = keyColumns.stream().map(Column::getName).collect(Collectors.toList()); @@ -1705,19 +1706,19 @@ public Void visitDictionaryGetExpr(DictionaryGetExpr node, Scope context) { int paramDictionaryKeysSize = params.size() - 1; if (!(paramDictionaryKeysSize == dictionaryKeysSize || paramDictionaryKeysSize == dictionaryKeysSize + 1)) { throw new SemanticException("dictionary: " + dictionaryName + " has expected keys size: " + - Integer.toString(dictionaryKeysSize) + " keys: " + - "[" + String.join(", ", dictionaryKeys) + "]" + - " plus null_if_not_exist flag(optional)" + - " but param given: " + Integer.toString(paramDictionaryKeysSize)); + Integer.toString(dictionaryKeysSize) + " keys: " + + "[" + String.join(", ", dictionaryKeys) + "]" + + " plus null_if_not_exist flag(optional)" + + " but param given: " + Integer.toString(paramDictionaryKeysSize)); } if (paramDictionaryKeysSize == dictionaryKeysSize + 1 && !(params.get(params.size() - 1) instanceof BoolLiteral)) { throw new SemanticException("dictionary: " + dictionaryName + " has invalid parameter for `null_if_not_exist` " - + "invalid parameter: " + params.get(params.size() - 1).toString()); + + "invalid parameter: " + params.get(params.size() - 1).toString()); } Table table = GlobalStateMgr.getCurrentState().getMetadataMgr().getTable( - dictionary.getCatalogName(), dictionary.getDbName(), dictionary.getQueryableObject()); + dictionary.getCatalogName(), dictionary.getDbName(), dictionary.getQueryableObject()); if (table == null) { throw new SemanticException("dict table %s is not found", table.getName()); } @@ -1761,7 +1762,7 @@ public Void visitDictionaryGetExpr(DictionaryGetExpr node, Scope context) { } boolean nullIfNotExist = (paramDictionaryKeysSize == dictionaryKeysSize + 1) ? - ((BoolLiteral) params.get(params.size() - 1)).getValue() : false; + ((BoolLiteral) params.get(params.size() - 1)).getValue() : false; node.setNullIfNotExist(nullIfNotExist); node.setDictionaryId(dictionary.getDictionaryId()); node.setDictionaryTxnId(GlobalStateMgr.getCurrentState().getDictionaryMgr(). diff --git a/fe/fe-core/src/main/java/com/starrocks/sql/ast/pipe/ShowPipeStmt.java b/fe/fe-core/src/main/java/com/starrocks/sql/ast/pipe/ShowPipeStmt.java index 388185a5110125..a7359cdcc96975 100644 --- a/fe/fe-core/src/main/java/com/starrocks/sql/ast/pipe/ShowPipeStmt.java +++ b/fe/fe-core/src/main/java/com/starrocks/sql/ast/pipe/ShowPipeStmt.java @@ -69,7 +69,7 @@ public ShowPipeStmt(String dbName, String like, Expr where, List * NOTE: Must be consistent with the META_DATA */ public static void handleShow(List row, Pipe pipe) { - Optional db = GlobalStateMgr.getCurrentState().getLocalMetastore().mayGetDb(pipe.getPipeId().getDbId()); + Optional db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().mayGetDb(pipe.getPipeId().getDbId()); row.add(db.map(Database::getFullName).orElse("")); row.add(String.valueOf(pipe.getPipeId().getId())); row.add(pipe.getName()); diff --git a/fe/fe-core/src/main/java/com/starrocks/sql/optimizer/function/MetaFunctions.java b/fe/fe-core/src/main/java/com/starrocks/sql/optimizer/function/MetaFunctions.java index 4834bfa05e10f8..b4d6e432055e0f 100644 --- a/fe/fe-core/src/main/java/com/starrocks/sql/optimizer/function/MetaFunctions.java +++ b/fe/fe-core/src/main/java/com/starrocks/sql/optimizer/function/MetaFunctions.java @@ -101,9 +101,9 @@ public static Table inspectExternalTable(TableName tableName) { } public static Pair inspectTable(TableName tableName) { - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().mayGetDb(tableName.getDb()) + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().mayGetDb(tableName.getDb()) .orElseThrow(() -> ErrorReport.buildSemanticException(ErrorCode.ERR_BAD_DB_ERROR, tableName.getDb())); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().mayGetTable(tableName.getDb(), tableName.getTbl()) + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().mayGetTable(tableName.getDb(), tableName.getTbl()) .orElseThrow(() -> ErrorReport.buildSemanticException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName)); ConnectContext connectContext = ConnectContext.get(); try { @@ -167,7 +167,7 @@ public static ConstantOperator inspectRelatedMv(ConstantOperator name) { Optional mayDb; Table table = inspectExternalTable(tableName); if (table.isNativeTableOrMaterializedView()) { - mayDb = GlobalStateMgr.getCurrentState().getLocalMetastore().mayGetDb(tableName.getDb()); + mayDb = GlobalStateMgr.getCurrentState().getStarRocksMetadata().mayGetDb(tableName.getDb()); } else { mayDb = Optional.empty(); } @@ -179,7 +179,7 @@ public static ConstantOperator inspectRelatedMv(ConstantOperator name) { Set relatedMvs = table.getRelatedMaterializedViews(); JsonArray array = new JsonArray(); for (MvId mv : SetUtils.emptyIfNull(relatedMvs)) { - String mvName = GlobalStateMgr.getCurrentState().getLocalMetastore().mayGetTable(mv.getDbId(), mv.getId()) + String mvName = GlobalStateMgr.getCurrentState().getStarRocksMetadata().mayGetTable(mv.getDbId(), mv.getId()) .map(Table::getName) .orElse(null); JsonObject obj = new JsonObject(); @@ -246,7 +246,7 @@ public static ConstantOperator inspectAllPipes() { ConnectContext connectContext = ConnectContext.get(); authOperatorPrivilege(); String currentDb = connectContext.getDatabase(); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().mayGetDb(connectContext.getDatabase()) + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().mayGetDb(connectContext.getDatabase()) .orElseThrow(() -> ErrorReport.buildSemanticException(ErrorCode.ERR_BAD_DB_ERROR, currentDb)); String json = GlobalStateMgr.getCurrentState().getPipeManager().getPipesOfDb(db.getId()); return ConstantOperator.createVarchar(json); diff --git a/fe/fe-core/src/main/java/com/starrocks/sql/optimizer/rule/mv/MaterializedViewRule.java b/fe/fe-core/src/main/java/com/starrocks/sql/optimizer/rule/mv/MaterializedViewRule.java index f82b0cf3bedb99..8cfeea2d30f070 100644 --- a/fe/fe-core/src/main/java/com/starrocks/sql/optimizer/rule/mv/MaterializedViewRule.java +++ b/fe/fe-core/src/main/java/com/starrocks/sql/optimizer/rule/mv/MaterializedViewRule.java @@ -504,7 +504,7 @@ private long selectBestRowCountIndex(Set indexesMatchingBestPrefixIndex, O for (Long indexId : indexesMatchingBestPrefixIndex) { long rowCount = 0; for (Partition partition : olapTable.getPartitions()) { - rowCount += partition.getIndex(indexId).getRowCount(); + rowCount += partition.getDefaultPhysicalPartition().getIndex(indexId).getRowCount(); } if (rowCount < minRowCount) { minRowCount = rowCount; @@ -783,7 +783,7 @@ private boolean aggFunctionsMatchAggColumns(Map columnToIds, return queryExprList.stream() .allMatch(x -> canRewriteQueryAggFunc(x, mvColumnExprList, indexId, - keyColumns, aggregateColumns, usedBaseColumnIds)); + keyColumns, aggregateColumns, usedBaseColumnIds)); } private boolean canRewriteQueryAggFunc(CallOperator queryExpr, diff --git a/fe/fe-core/src/main/java/com/starrocks/sql/optimizer/rule/tree/AddDecodeNodeForDictStringRule.java b/fe/fe-core/src/main/java/com/starrocks/sql/optimizer/rule/tree/AddDecodeNodeForDictStringRule.java index 3748e1c97c4cea..519f7cc995ffa9 100644 --- a/fe/fe-core/src/main/java/com/starrocks/sql/optimizer/rule/tree/AddDecodeNodeForDictStringRule.java +++ b/fe/fe-core/src/main/java/com/starrocks/sql/optimizer/rule/tree/AddDecodeNodeForDictStringRule.java @@ -26,7 +26,6 @@ import com.starrocks.catalog.FunctionSet; import com.starrocks.catalog.KeysType; import com.starrocks.catalog.OlapTable; -import com.starrocks.catalog.Partition; import com.starrocks.catalog.Type; import com.starrocks.common.FeConstants; import com.starrocks.common.Pair; @@ -931,8 +930,8 @@ public OptExpression rewrite(OptExpression root, TaskContext taskContext) { for (PhysicalOlapScanOperator scanOperator : scanOperators) { OlapTable table = (OlapTable) scanOperator.getTable(); - long version = table.getPartitions().stream().map(Partition::getVisibleVersionTime).max(Long::compareTo) - .orElse(0L); + long version = table.getPartitions().stream().map(p -> p.getDefaultPhysicalPartition().getVisibleVersionTime()) + .max(Long::compareTo).orElse(0L); if ((table.getKeysType().equals(KeysType.PRIMARY_KEYS))) { continue; diff --git a/fe/fe-core/src/main/java/com/starrocks/sql/optimizer/rule/tree/lowcardinality/DecodeCollector.java b/fe/fe-core/src/main/java/com/starrocks/sql/optimizer/rule/tree/lowcardinality/DecodeCollector.java index e98409a33246fb..a7aa8681e216ad 100644 --- a/fe/fe-core/src/main/java/com/starrocks/sql/optimizer/rule/tree/lowcardinality/DecodeCollector.java +++ b/fe/fe-core/src/main/java/com/starrocks/sql/optimizer/rule/tree/lowcardinality/DecodeCollector.java @@ -25,7 +25,6 @@ import com.starrocks.catalog.FunctionSet; import com.starrocks.catalog.KeysType; import com.starrocks.catalog.OlapTable; -import com.starrocks.catalog.Partition; import com.starrocks.catalog.Type; import com.starrocks.common.FeConstants; import com.starrocks.qe.SessionVariable; @@ -521,8 +520,8 @@ public DecodeInfo visitPhysicalDistribution(OptExpression optExpression, DecodeI public DecodeInfo visitPhysicalOlapScan(OptExpression optExpression, DecodeInfo context) { PhysicalOlapScanOperator scan = optExpression.getOp().cast(); OlapTable table = (OlapTable) scan.getTable(); - long version = table.getPartitions().stream().map(Partition::getVisibleVersionTime).max(Long::compareTo) - .orElse(0L); + long version = table.getPartitions().stream().map(p -> p.getDefaultPhysicalPartition().getVisibleVersionTime()) + .max(Long::compareTo).orElse(0L); if ((table.getKeysType().equals(KeysType.PRIMARY_KEYS))) { return DecodeInfo.EMPTY; diff --git a/fe/fe-core/src/main/java/com/starrocks/sql/plan/PlanFragmentBuilder.java b/fe/fe-core/src/main/java/com/starrocks/sql/plan/PlanFragmentBuilder.java index 20a6fff10d70e3..0bc30eead1979b 100644 --- a/fe/fe-core/src/main/java/com/starrocks/sql/plan/PlanFragmentBuilder.java +++ b/fe/fe-core/src/main/java/com/starrocks/sql/plan/PlanFragmentBuilder.java @@ -117,7 +117,6 @@ import com.starrocks.qe.ConnectContext; import com.starrocks.qe.SessionVariable; import com.starrocks.server.GlobalStateMgr; -import com.starrocks.server.LocalMetastore; import com.starrocks.server.RunMode; import com.starrocks.service.FrontendOptions; import com.starrocks.sql.analyzer.DecimalV3FunctionAnalyzer; @@ -220,6 +219,7 @@ import java.util.stream.Collectors; import static com.starrocks.catalog.Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF; +import static com.starrocks.meta.StarRocksMetadata.buildPartitionInfo; import static com.starrocks.sql.common.ErrorType.INTERNAL_ERROR; import static com.starrocks.sql.common.UnsupportedException.unsupportedException; import static com.starrocks.sql.optimizer.operator.scalar.ScalarOperator.isColumnEqualConstant; @@ -265,7 +265,7 @@ public static ExecPlan createPhysicalPlanForMV(ConnectContext connectContext, Collections.reverse(execPlan.getFragments()); // Create a fake table sink here, replaced it after created the MV - PartitionInfo partitionInfo = LocalMetastore.buildPartitionInfo(createStmt); + PartitionInfo partitionInfo = buildPartitionInfo(createStmt); long mvId = GlobalStateMgr.getCurrentState().getNextId(); long dbId = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(createStmt.getTableName().getDb()).getId(); MaterializedView view = GlobalStateMgr.getCurrentState().getMaterializedViewMgr() @@ -870,8 +870,13 @@ public PlanFragment visitPhysicalOlapScan(OptExpression optExpr, ExecPlan contex tabletId2BucketSeq.put(allTabletIds.get(i), i); } scanNode.setTabletId2BucketSeq(tabletId2BucketSeq); - List tablets = - selectTabletIds.stream().map(selectedTable::getTablet).collect(Collectors.toList()); + + List tablets = new ArrayList<>(); + for (Long selectTabletId : selectTabletIds) { + Tablet tablet = GlobalStateMgr.getCurrentState().getLocalMetastore().getTablet( + selectedTable, selectTabletId); + tablets.add(tablet); + } scanNode.addScanRangeLocations(partition, physicalPartition, selectedTable, tablets, localBeId); } } diff --git a/fe/fe-core/src/main/java/com/starrocks/statistic/StatisticExecutor.java b/fe/fe-core/src/main/java/com/starrocks/statistic/StatisticExecutor.java index 69a4f2329ed1ee..cefdbc23ed93fc 100644 --- a/fe/fe-core/src/main/java/com/starrocks/statistic/StatisticExecutor.java +++ b/fe/fe-core/src/main/java/com/starrocks/statistic/StatisticExecutor.java @@ -20,7 +20,6 @@ import com.starrocks.catalog.Database; import com.starrocks.catalog.InternalCatalog; import com.starrocks.catalog.OlapTable; -import com.starrocks.catalog.Partition; import com.starrocks.catalog.Table; import com.starrocks.catalog.Type; import com.starrocks.common.AnalysisException; @@ -203,7 +202,7 @@ public static Pair, Status> queryDictSync(Long dbId, Long t } OlapTable olapTable = (OlapTable) table; - long version = olapTable.getPartitions().stream().map(Partition::getVisibleVersionTime) + long version = olapTable.getPartitions().stream().map(p -> p.getDefaultPhysicalPartition().getVisibleVersionTime()) .max(Long::compareTo).orElse(0L); String columnName = MetaUtils.getColumnNameByColumnId(dbId, tableId, columnId); String catalogName = InternalCatalog.DEFAULT_INTERNAL_CATALOG_NAME; diff --git a/fe/fe-core/src/main/java/com/starrocks/statistic/StatisticUtils.java b/fe/fe-core/src/main/java/com/starrocks/statistic/StatisticUtils.java index 430c31ba04c9e0..cbb6d35daaac44 100644 --- a/fe/fe-core/src/main/java/com/starrocks/statistic/StatisticUtils.java +++ b/fe/fe-core/src/main/java/com/starrocks/statistic/StatisticUtils.java @@ -173,7 +173,7 @@ public static void triggerCollectionOnFirstLoad( PhysicalPartition physicalPartition = table.getPhysicalPartition(physicalPartitionId); if (physicalPartition != null) { Partition partition = table.getPartition(physicalPartition.getParentId()); - if (partition != null && partition.isFirstLoad()) { + if (partition != null && partition.getDefaultPhysicalPartition().isFirstLoad()) { collectPartitionIds.add(partition.getId()); } } @@ -283,7 +283,7 @@ public static boolean checkStatisticTableStateNormal() { // check replicate miss for (Partition partition : table.getPartitions()) { - if (partition.getBaseIndex().getTablets().stream() + if (partition.getDefaultPhysicalPartition().getBaseIndex().getTablets().stream() .anyMatch(t -> ((LocalTablet) t).getNormalReplicaBackendIds().isEmpty())) { return false; } @@ -295,7 +295,7 @@ public static boolean checkStatisticTableStateNormal() { public static LocalDateTime getTableLastUpdateTime(Table table) { if (table.isNativeTableOrMaterializedView()) { - long maxTime = table.getPartitions().stream().map(Partition::getVisibleVersionTime) + long maxTime = table.getPartitions().stream().map(p -> p.getDefaultPhysicalPartition().getVisibleVersionTime()) .max(Long::compareTo).orElse(0L); return LocalDateTime.ofInstant(Instant.ofEpochMilli(maxTime), Clock.systemDefaultZone().getZone()); } else if (table.isHiveTable()) { @@ -319,14 +319,14 @@ public static LocalDateTime getTableLastUpdateTime(Table table) { IcebergTable icebergTable = (IcebergTable) table; Optional snapshot = Optional.ofNullable(icebergTable.getNativeTable().currentSnapshot()); return snapshot.map(value -> LocalDateTime.ofInstant(Instant.ofEpochMilli(value.timestampMillis()). - plusSeconds(60), Clock.systemDefaultZone().getZone())).orElse(null); + plusSeconds(60), Clock.systemDefaultZone().getZone())).orElse(null); } else { return null; } } public static LocalDateTime getPartitionLastUpdateTime(Partition partition) { - long time = partition.getVisibleVersionTime(); + long time = partition.getDefaultPhysicalPartition().getVisibleVersionTime(); return LocalDateTime.ofInstant(Instant.ofEpochMilli(time), Clock.systemDefaultZone().getZone()); } @@ -593,7 +593,7 @@ public static Type getQueryStatisticsColumnType(Table table, String column) { Preconditions.checkState(parts.length >= 1); Column base = table.getColumn(parts[0]); if (base == null) { - ErrorReport.reportSemanticException(ErrorCode.ERR_BAD_FIELD_ERROR, column, table.getName()); + ErrorReport.reportSemanticException(ErrorCode.ERR_BAD_FIELD_ERROR, column); } Type baseColumnType = base.getType(); diff --git a/fe/fe-core/src/main/java/com/starrocks/statistic/StatisticsMetaManager.java b/fe/fe-core/src/main/java/com/starrocks/statistic/StatisticsMetaManager.java index e62b42d68f429a..88e537e5b7c2fd 100644 --- a/fe/fe-core/src/main/java/com/starrocks/statistic/StatisticsMetaManager.java +++ b/fe/fe-core/src/main/java/com/starrocks/statistic/StatisticsMetaManager.java @@ -23,6 +23,7 @@ import com.starrocks.catalog.LocalTablet; import com.starrocks.catalog.OlapTable; import com.starrocks.catalog.Partition; +import com.starrocks.catalog.PhysicalPartition; import com.starrocks.common.Config; import com.starrocks.common.DdlException; import com.starrocks.common.Pair; @@ -68,7 +69,7 @@ private boolean createDatabase() { LOG.info("create statistics db start"); CreateDbStmt dbStmt = new CreateDbStmt(false, StatsConstants.STATISTICS_DB_NAME); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(dbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(dbStmt.getFullDbName()); } catch (UserException e) { LOG.warn("Failed to create database ", e); return false; @@ -103,10 +104,12 @@ private boolean checkReplicateNormal(String tableName) { boolean check = true; for (Partition partition : table.getPartitions()) { // check replicate miss - if (partition.getBaseIndex().getTablets().stream() - .anyMatch(t -> ((LocalTablet) t).getNormalReplicaBackendIds().isEmpty())) { - check = false; - break; + for (PhysicalPartition physicalPartition : partition.getSubPartitions()) { + if (physicalPartition.getBaseIndex().getTablets().stream() + .anyMatch(t -> ((LocalTablet) t).getNormalReplicaBackendIds().isEmpty())) { + check = false; + break; + } } } @@ -160,7 +163,7 @@ private boolean createSampleStatisticsTable(ConnectContext context) { ""); Analyzer.analyze(stmt, context); - GlobalStateMgr.getCurrentState().getLocalMetastore().createTable(stmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createTable(stmt); } catch (UserException e) { LOG.warn("Failed to create sample statistics, ", e); return false; @@ -192,7 +195,7 @@ private boolean createFullStatisticsTable(ConnectContext context) { ""); Analyzer.analyze(stmt, context); - GlobalStateMgr.getCurrentState().getLocalMetastore().createTable(stmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createTable(stmt); } catch (UserException e) { LOG.warn("Failed to create full statistics table", e); return false; @@ -223,7 +226,7 @@ private boolean createHistogramStatisticsTable(ConnectContext context) { ""); Analyzer.analyze(stmt, context); - GlobalStateMgr.getCurrentState().getLocalMetastore().createTable(stmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createTable(stmt); } catch (UserException e) { LOG.warn("Failed to create histogram statistics table", e); return false; @@ -261,7 +264,7 @@ private boolean createExternalFullStatisticsTable(ConnectContext context) { ""); Analyzer.analyze(stmt, context); - GlobalStateMgr.getCurrentState().getLocalMetastore().createTable(stmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createTable(stmt); } catch (UserException e) { LOG.warn("Failed to create full statistics table", e); return false; @@ -291,7 +294,7 @@ private boolean createExternalHistogramStatisticsTable(ConnectContext context) { ""); Analyzer.analyze(stmt, context); - GlobalStateMgr.getCurrentState().getLocalMetastore().createTable(stmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createTable(stmt); } catch (UserException e) { LOG.warn("Failed to create external histogram statistics table", e); return false; @@ -330,7 +333,7 @@ private boolean dropTable(String tableName) { new TableName(StatsConstants.STATISTICS_DB_NAME, tableName), true); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(stmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(stmt); } catch (DdlException e) { LOG.warn("Failed to drop table", e); return false; diff --git a/fe/fe-core/src/main/java/com/starrocks/statistic/sample/TabletSampleManager.java b/fe/fe-core/src/main/java/com/starrocks/statistic/sample/TabletSampleManager.java index 8b76a132674b27..48051dc4721b49 100644 --- a/fe/fe-core/src/main/java/com/starrocks/statistic/sample/TabletSampleManager.java +++ b/fe/fe-core/src/main/java/com/starrocks/statistic/sample/TabletSampleManager.java @@ -14,7 +14,9 @@ package com.starrocks.statistic.sample; +import com.starrocks.catalog.OlapTable; import com.starrocks.catalog.Partition; +import com.starrocks.catalog.PhysicalPartition; import com.starrocks.catalog.Table; import com.starrocks.catalog.Tablet; import com.starrocks.common.Config; @@ -83,23 +85,27 @@ private TabletSampleManager(double highSampleRatio, double mediumHighRatio, doub } private void classifyTablet(Table table) { - for (Partition p : table.getPartitions()) { - if (!p.hasData()) { - continue; - } - long partitionId = p.getId(); - for (Tablet tablet : p.getBaseIndex().getTablets()) { - long tabletId = tablet.getId(); - long rowCount = tablet.getFuzzyRowCount(); - TabletStats tabletStats = new TabletStats(tabletId, partitionId, rowCount); - if (rowCount >= HIGH_WEIGHT_ROWS_THRESHOLD) { - highWeight.addTabletStats(tabletStats); - } else if (rowCount >= MEDIUM_HIGH_WEIGHT_ROWS_THRESHOLD) { - mediumHighWeight.addTabletStats(tabletStats); - } else if (rowCount >= MEDIUM_LOW_WEIGHT_ROWS_THRESHOLD) { - mediumLowWeight.addTabletStats(tabletStats); - } else { - lowWeight.addTabletStats(tabletStats); + if (table instanceof OlapTable) { + OlapTable olapTable = (OlapTable) table; + for (Partition logicalPartition : olapTable.getPartitions()) { + if (!logicalPartition.hasData()) { + continue; + } + for (PhysicalPartition physicalPartition : logicalPartition.getSubPartitions()) { + for (Tablet tablet : physicalPartition.getBaseIndex().getTablets()) { + long tabletId = tablet.getId(); + long rowCount = tablet.getFuzzyRowCount(); + TabletStats tabletStats = new TabletStats(tabletId, physicalPartition.getId(), rowCount); + if (rowCount >= HIGH_WEIGHT_ROWS_THRESHOLD) { + highWeight.addTabletStats(tabletStats); + } else if (rowCount >= MEDIUM_HIGH_WEIGHT_ROWS_THRESHOLD) { + mediumHighWeight.addTabletStats(tabletStats); + } else if (rowCount >= MEDIUM_LOW_WEIGHT_ROWS_THRESHOLD) { + mediumLowWeight.addTabletStats(tabletStats); + } else { + lowWeight.addTabletStats(tabletStats); + } + } } } } diff --git a/fe/fe-core/src/main/java/com/starrocks/task/AlterReplicaTask.java b/fe/fe-core/src/main/java/com/starrocks/task/AlterReplicaTask.java index f76cc401e7053d..2c205a478cc654 100644 --- a/fe/fe-core/src/main/java/com/starrocks/task/AlterReplicaTask.java +++ b/fe/fe-core/src/main/java/com/starrocks/task/AlterReplicaTask.java @@ -322,7 +322,7 @@ public void handleFinishAlterTask() throws Exception { if (index == null) { throw new MetaNotFoundException("index " + getIndexId() + " does not exist"); } - Tablet tablet = index.getTablet(getTabletId()); + Tablet tablet = GlobalStateMgr.getCurrentState().getLocalMetastore().getTablet(index, getTabletId()); Preconditions.checkNotNull(tablet, getTabletId()); if (!tbl.isCloudNativeTableOrMaterializedView()) { Replica replica = ((LocalTablet) tablet).getReplicaById(getNewReplicaId()); @@ -347,7 +347,7 @@ public void handleFinishAlterTask() throws Exception { replica.getDataSize(), replica.getRowCount(), replica.getLastFailedVersion(), replica.getLastSuccessVersion(), 0); - GlobalStateMgr.getCurrentState().getEditLog().logUpdateReplica(info); + GlobalStateMgr.getCurrentState().getLocalMetastore().updateReplica(info); } LOG.info("after handle alter task tablet: {}, replica: {}", getSignature(), replica); diff --git a/fe/fe-core/src/main/java/com/starrocks/task/DropReplicaTask.java b/fe/fe-core/src/main/java/com/starrocks/task/DropReplicaTask.java index c641c3512af984..ba987f8821fc62 100644 --- a/fe/fe-core/src/main/java/com/starrocks/task/DropReplicaTask.java +++ b/fe/fe-core/src/main/java/com/starrocks/task/DropReplicaTask.java @@ -38,8 +38,8 @@ import com.starrocks.thrift.TTaskType; public class DropReplicaTask extends AgentTask { - private int schemaHash; // set -1L as unknown - private boolean force; + private final int schemaHash; // set -1L as unknown + private final boolean force; public DropReplicaTask(long backendId, long tabletId, int schemaHash, boolean force) { super(null, backendId, TTaskType.DROP, -1L, -1L, -1L, -1L, tabletId); diff --git a/fe/fe-core/src/main/java/com/starrocks/task/TabletTaskExecutor.java b/fe/fe-core/src/main/java/com/starrocks/task/TabletTaskExecutor.java index e2994e93aeb8c9..babde30ffabd84 100644 --- a/fe/fe-core/src/main/java/com/starrocks/task/TabletTaskExecutor.java +++ b/fe/fe-core/src/main/java/com/starrocks/task/TabletTaskExecutor.java @@ -167,7 +167,7 @@ public static void buildPartitionsConcurrently(long dbId, OlapTable table, List< } private static List buildCreateReplicaTasks(long dbId, OlapTable table, List partitions, - long warehouseId, boolean enableTabletCreationOptimization) + long warehouseId, boolean enableTabletCreationOptimization) throws DdlException { List tasks = new ArrayList<>(); for (PhysicalPartition partition : partitions) { @@ -178,7 +178,7 @@ private static List buildCreateReplicaTasks(long dbId, OlapTa } private static List buildCreateReplicaTasks(long dbId, OlapTable table, PhysicalPartition partition, - long warehouseId, boolean enableTabletCreationOptimization) + long warehouseId, boolean enableTabletCreationOptimization) throws DdlException { ArrayList tasks = new ArrayList<>((int) partition.storageReplicaCount()); for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { @@ -188,8 +188,8 @@ private static List buildCreateReplicaTasks(long dbId, OlapTa } private static List buildCreateReplicaTasks(long dbId, OlapTable table, PhysicalPartition partition, - MaterializedIndex index, long warehouseId, - boolean enableTabletCreationOptimization) throws DdlException { + MaterializedIndex index, long warehouseId, + boolean enableTabletCreationOptimization) throws DdlException { LOG.info("build create replica tasks for index {} db {} table {} partition {}", index, dbId, table.getId(), partition); boolean isCloudNativeTable = table.isCloudNativeTableOrMaterializedView(); @@ -268,7 +268,7 @@ private static void sendCreateReplicaTasksAndWaitForFinished(List tasks, - MarkedCountDownLatch countDownLatch) { + MarkedCountDownLatch countDownLatch) { HashMap> backendToBatchTask = new HashMap<>(); for (CreateReplicaTask task : tasks) { @@ -297,6 +297,26 @@ private static void sendCreateReplicaTasks(List tasks, } } + public static void sendTask(AgentTask agentTask) { + AgentBatchTask batchTask = new AgentBatchTask(); + batchTask.addTask(agentTask); + AgentTaskExecutor.submit(batchTask); + LOG.info("Send agent task : {" + agentTask.toString() + "}"); + } + + public static void sendTask(List agentTaskList) { + HashMap> backendToBatchTask = new HashMap<>(); + + for (AgentTask task : agentTaskList) { + List batchTask = backendToBatchTask.computeIfAbsent(task.getBackendId(), k -> new ArrayList<>()); + batchTask.add(task); + } + + for (Map.Entry> entry : backendToBatchTask.entrySet()) { + sendTask(entry.getKey(), entry.getValue()); + } + } + private static CompletableFuture sendTask(Long backendId, List agentBatchTask) { return CompletableFuture.supplyAsync(() -> { try { @@ -378,7 +398,6 @@ public static int countMaxTasksPerBackend(List tasks) { public static void deleteAllReplicas(OlapTable olapTable) { HashMap batchTaskMap = new HashMap<>(); - // drop all replicas for (Partition partition : olapTable.getAllPartitions()) { for (PhysicalPartition physicalPartition : partition.getSubPartitions()) { @@ -387,10 +406,12 @@ public static void deleteAllReplicas(OlapTable olapTable) { for (MaterializedIndex materializedIndex : allIndices) { long indexId = materializedIndex.getId(); int schemaHash = olapTable.getSchemaHashByIndexId(indexId); - for (Tablet tablet : materializedIndex.getTablets()) { + List tabletList = GlobalStateMgr.getCurrentState().getLocalMetastore() + .getAllTablets(materializedIndex); + for (Tablet tablet : tabletList) { long tabletId = tablet.getId(); - List replicas = ((LocalTablet) tablet).getImmutableReplicas(); - for (Replica replica : replicas) { + List replicaList = GlobalStateMgr.getCurrentState().getLocalMetastore().getAllReplicas(tablet); + for (Replica replica : replicaList) { long backendId = replica.getBackendId(); DropReplicaTask dropTask = new DropReplicaTask(backendId, tabletId, schemaHash, true); AgentBatchTask batchTask = batchTaskMap.get(backendId); @@ -402,7 +423,7 @@ public static void deleteAllReplicas(OlapTable olapTable) { LOG.info("delete tablet[{}] from backend[{}] because table {}-{} is dropped", tabletId, backendId, olapTable.getId(), olapTable.getName()); } // end for replicas - } // end for tablets + } } } // end for indices } // end for partitions @@ -431,4 +452,6 @@ public static void deleteAllReplicas(OlapTable olapTable) { } } } + + } diff --git a/fe/fe-core/src/main/java/com/starrocks/transaction/LakeTableTxnStateListener.java b/fe/fe-core/src/main/java/com/starrocks/transaction/LakeTableTxnStateListener.java index fba3791c29520e..63f5ca9e864c47 100644 --- a/fe/fe-core/src/main/java/com/starrocks/transaction/LakeTableTxnStateListener.java +++ b/fe/fe-core/src/main/java/com/starrocks/transaction/LakeTableTxnStateListener.java @@ -145,9 +145,9 @@ public void preCommit(TransactionState txnState, List finished PhysicalPartition partition = table.getPhysicalPartition(partitionId); List allIndices = txnState.getPartitionLoadedTblIndexes(table.getId(), partition); for (MaterializedIndex index : allIndices) { - Optional unfinishedTablet = - index.getTablets().stream().filter(t -> !finishedTabletsOfThisTable.contains(t.getId())) - .findAny(); + List tabletList = GlobalStateMgr.getCurrentState().getLocalMetastore().getAllTablets(index); + Optional unfinishedTablet = tabletList.stream().filter( + t -> !finishedTabletsOfThisTable.contains(t.getId())).findAny(); if (!unfinishedTablet.isPresent()) { continue; } diff --git a/fe/fe-core/src/main/java/com/starrocks/transaction/OlapTableTxnStateListener.java b/fe/fe-core/src/main/java/com/starrocks/transaction/OlapTableTxnStateListener.java index 49d502ddbb76e3..3b1a0e3c9710ab 100644 --- a/fe/fe-core/src/main/java/com/starrocks/transaction/OlapTableTxnStateListener.java +++ b/fe/fe-core/src/main/java/com/starrocks/transaction/OlapTableTxnStateListener.java @@ -180,7 +180,9 @@ public void preCommit(TransactionState txnState, List tabletCo List allIndices = txnState.getPartitionLoadedTblIndexes(table.getId(), partition); int quorumReplicaNum = table.getPartitionInfo().getQuorumNum(partition.getParentId(), table.writeQuorum()); for (MaterializedIndex index : allIndices) { - for (Tablet tablet : index.getTablets()) { + List tabletList = GlobalStateMgr.getCurrentState().getLocalMetastore().getAllTablets(index); + + for (Tablet tablet : tabletList) { long tabletId = tablet.getId(); Set commitBackends = tabletToBackends.get(tabletId); diff --git a/fe/fe-core/src/main/java/com/starrocks/transaction/PublishVersionDaemon.java b/fe/fe-core/src/main/java/com/starrocks/transaction/PublishVersionDaemon.java index ffd8543b10af0e..49f8131d61c841 100644 --- a/fe/fe-core/src/main/java/com/starrocks/transaction/PublishVersionDaemon.java +++ b/fe/fe-core/src/main/java/com/starrocks/transaction/PublishVersionDaemon.java @@ -57,6 +57,7 @@ import com.starrocks.rpc.BrpcProxy; import com.starrocks.rpc.LakeService; import com.starrocks.server.GlobalStateMgr; +import com.starrocks.server.LocalMetastore; import com.starrocks.server.RunMode; import com.starrocks.server.WarehouseManager; import com.starrocks.system.ComputeNode; @@ -509,16 +510,20 @@ public boolean publishPartitionBatch(Database db, long tableId, PartitionPublish txnState.getTransactionId()); continue; } + + LocalMetastore localMetastore = GlobalStateMgr.getCurrentState().getLocalMetastore(); + List tablets = localMetastore.getAllTablets(index); + if (index.getState() == MaterializedIndex.IndexState.SHADOW) { if (shadowTabletsMap.containsKey(versions.get(i))) { - shadowTabletsMap.get(versions.get(i)).addAll(index.getTablets()); + shadowTabletsMap.get(versions.get(i)).addAll(tablets); } else { - Set tabletsNew = new HashSet<>(index.getTablets()); + Set tabletsNew = new HashSet<>(tablets); shadowTabletsMap.put(versions.get(i), tabletsNew); } } else { normalTablets = (normalTablets == null) ? Sets.newHashSet() : normalTablets; - normalTablets.addAll(index.getTablets()); + normalTablets.addAll(tablets); } } } @@ -791,12 +796,15 @@ private boolean publishPartition(@NotNull Database db, @NotNull TableCommitInfo LOG.info("Ignored index {} for transaction {}", table.getIndexNameById(index.getId()), txnId); continue; } + + List tabletList = GlobalStateMgr.getCurrentState().getLocalMetastore().getAllTablets(index); + if (index.getState() == MaterializedIndex.IndexState.SHADOW) { shadowTablets = (shadowTablets == null) ? Lists.newArrayList() : shadowTablets; - shadowTablets.addAll(index.getTablets()); + shadowTablets.addAll(tabletList); } else { normalTablets = (normalTablets == null) ? Lists.newArrayList() : normalTablets; - normalTablets.addAll(index.getTablets()); + normalTablets.addAll(tabletList); } } } finally { diff --git a/fe/fe-core/src/main/java/com/starrocks/transaction/TransactionChecker.java b/fe/fe-core/src/main/java/com/starrocks/transaction/TransactionChecker.java index 3bb98a819381dd..bf05ac004556be 100644 --- a/fe/fe-core/src/main/java/com/starrocks/transaction/TransactionChecker.java +++ b/fe/fe-core/src/main/java/com/starrocks/transaction/TransactionChecker.java @@ -24,6 +24,7 @@ import com.starrocks.common.util.concurrent.lock.LockType; import com.starrocks.common.util.concurrent.lock.Locker; import com.starrocks.server.GlobalStateMgr; +import com.starrocks.server.LocalMetastore; import java.util.ArrayList; import java.util.List; @@ -99,7 +100,7 @@ public static TransactionChecker create(TransactionState txn, Database db) { List partitions = new ArrayList<>(); for (TableCommitInfo tableCommitInfo : txn.getIdToTableCommitInfos().values()) { OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getId(), tableCommitInfo.getTableId()); + .getTable(db.getId(), tableCommitInfo.getTableId()); if (table == null || table.isCloudNativeTableOrMaterializedView()) { continue; } @@ -119,7 +120,9 @@ public static TransactionChecker create(TransactionState txn, Database db) { List allIndices = txn.getPartitionLoadedTblIndexes(tableCommitInfo.getTableId(), partition); for (MaterializedIndex index : allIndices) { - for (Tablet tablet : index.getTablets()) { + LocalMetastore localMetastore = GlobalStateMgr.getCurrentState().getLocalMetastore(); + List tablets = localMetastore.getAllTablets(index); + for (Tablet tablet : tablets) { partitionChecker.tablets.add((LocalTablet) tablet); } } diff --git a/fe/fe-core/src/test/java/com/starrocks/alter/AlterJobV2Test.java b/fe/fe-core/src/test/java/com/starrocks/alter/AlterJobV2Test.java index 3f896604efd7de..7439310a979930 100644 --- a/fe/fe-core/src/test/java/com/starrocks/alter/AlterJobV2Test.java +++ b/fe/fe-core/src/test/java/com/starrocks/alter/AlterJobV2Test.java @@ -187,7 +187,7 @@ public void testModifyRelatedColumnWithMv() { String sql = "CREATE MATERIALIZED VIEW test.mv2 DISTRIBUTED BY HASH(k1) " + " BUCKETS 10 REFRESH ASYNC properties('replication_num' = '1') AS SELECT k1, k2 FROM modify_column_test"; StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .createMaterializedView((CreateMaterializedViewStatement) statementBase); // modify column which define in mv @@ -197,7 +197,7 @@ public void testModifyRelatedColumnWithMv() { waitForSchemaChangeAlterJobFinish(); MaterializedView mv2 = - (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test").getTable("mv2"); + (MaterializedView) GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test").getTable("mv2"); Assert.assertFalse(mv2.isActive()); } catch (Exception e) { e.printStackTrace(); @@ -215,7 +215,7 @@ public void testModifyWithSelectStarMV1() throws Exception { " BUCKETS 10 REFRESH ASYNC properties('replication_num' = '1') " + "AS SELECT * FROM modify_column_test3"; StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .createMaterializedView((CreateMaterializedViewStatement) statementBase); String alterStmtStr = "alter table test.modify_column_test3 modify column k2 varchar(20)"; @@ -223,7 +223,7 @@ public void testModifyWithSelectStarMV1() throws Exception { DDLStmtExecutor.execute(alterTableStmt, connectContext); waitForSchemaChangeAlterJobFinish(); - MaterializedView mv = (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() + MaterializedView mv = (MaterializedView) GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getDb("test").getTable("mv3"); Assert.assertTrue(!mv.isActive()); } finally { @@ -241,7 +241,7 @@ public void testModifyWithSelectStarMV2() throws Exception { " BUCKETS 10 REFRESH ASYNC properties('replication_num' = '1') " + "AS SELECT * FROM testModifyWithSelectStarMV2"; StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .createMaterializedView((CreateMaterializedViewStatement) statementBase); String alterStmtStr = "alter table test.testModifyWithSelectStarMV2 add column k4 bigint"; @@ -250,7 +250,7 @@ public void testModifyWithSelectStarMV2() throws Exception { waitForSchemaChangeAlterJobFinish(); MaterializedView mv = (MaterializedView) GlobalStateMgr.getCurrentState() - .getLocalMetastore().getDb("test").getTable("mv6"); + .getStarRocksMetadata().getDb("test").getTable("mv6"); Assert.assertTrue(mv.isActive()); } catch (Exception e) { e.printStackTrace(); @@ -270,7 +270,7 @@ public void testModifyWithSelectStarMV3() throws Exception { " BUCKETS 10 REFRESH ASYNC properties('replication_num' = '1') " + "AS SELECT * FROM modify_column_test5"; StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .createMaterializedView((CreateMaterializedViewStatement) statementBase); String alterStmtStr = "alter table test.modify_column_test5 drop column k2"; @@ -279,7 +279,7 @@ public void testModifyWithSelectStarMV3() throws Exception { waitForSchemaChangeAlterJobFinish(); MaterializedView mv = (MaterializedView) GlobalStateMgr.getCurrentState() - .getLocalMetastore().getDb("test").getTable("mv5"); + .getStarRocksMetadata().getDb("test").getTable("mv5"); Assert.assertTrue(!mv.isActive()); } catch (Exception e) { Assert.fail(); @@ -297,7 +297,7 @@ public void testModifyWithExpr() throws Exception { " BUCKETS 10 REFRESH ASYNC properties('replication_num' = '1')" + " AS SELECT k1, k2 + 1 FROM modify_column_test4"; StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .createMaterializedView((CreateMaterializedViewStatement) statementBase); { @@ -309,7 +309,7 @@ public void testModifyWithExpr() throws Exception { waitForSchemaChangeAlterJobFinish(); MaterializedView mv = (MaterializedView) GlobalStateMgr - .getCurrentState().getLocalMetastore().getDb("test").getTable("mv4"); + .getCurrentState().getStarRocksMetadata().getDb("test").getTable("mv4"); Assert.assertTrue(mv.isActive()); } @@ -322,7 +322,7 @@ public void testModifyWithExpr() throws Exception { waitForSchemaChangeAlterJobFinish(); MaterializedView mv = (MaterializedView) GlobalStateMgr - .getCurrentState().getLocalMetastore().getDb("test").getTable("mv4"); + .getCurrentState().getStarRocksMetadata().getDb("test").getTable("mv4"); Assert.assertFalse(mv.isActive()); System.out.println(mv.getInactiveReason()); Assert.assertTrue(mv.getInactiveReason().contains("base table schema changed for columns: k2")); @@ -341,7 +341,7 @@ public void testModifyUnRelatedColumnWithMv() { String sql = "CREATE MATERIALIZED VIEW test.mv1 DISTRIBUTED BY HASH(k1) " + " BUCKETS 10 REFRESH ASYNC properties('replication_num' = '1') AS SELECT k1, k2 FROM modify_column_test"; StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .createMaterializedView((CreateMaterializedViewStatement) statementBase); // modify column which not define in mv @@ -351,7 +351,7 @@ public void testModifyUnRelatedColumnWithMv() { waitForSchemaChangeAlterJobFinish(); MaterializedView mv = - (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test").getTable("mv1"); + (MaterializedView) GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test").getTable("mv1"); Assert.assertTrue(mv.isActive()); } catch (Exception e) { e.printStackTrace(); diff --git a/fe/fe-core/src/test/java/com/starrocks/alter/AlterTableTest.java b/fe/fe-core/src/test/java/com/starrocks/alter/AlterTableTest.java index b481dc1bd495c8..28214b2378f346 100644 --- a/fe/fe-core/src/test/java/com/starrocks/alter/AlterTableTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/alter/AlterTableTest.java @@ -93,7 +93,7 @@ public void testAlterTableBucketSize() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String sql = "ALTER TABLE test_alter_bucket_size SET (\"bucket_size\" = \"-1\");"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(ctx, alterTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(ctx, alterTableStmt); } @Test @@ -121,9 +121,9 @@ public void testAlterTableStorageCoolDownTTL() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String sql = "ALTER TABLE test_alter_cool_down_ttl SET (\"storage_cooldown_ttl\" = \"3 day\");"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(ctx, alterTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(ctx, alterTableStmt); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test").getTable("test_alter_cool_down_ttl"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test").getTable("test_alter_cool_down_ttl"); OlapTable olapTable = (OlapTable) table; String storageCooldownTtl = olapTable.getTableProperty().getProperties().get("storage_cooldown_ttl"); Assert.assertEquals("3 day", storageCooldownTtl); @@ -156,7 +156,7 @@ public void testAlterTableStorageCoolDownTTLExcept() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String sql = "ALTER TABLE test_alter_cool_down_ttl_2 SET (\"storage_cooldown_ttl\" = \"abc\");"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(ctx, alterTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(ctx, alterTableStmt); } @Test @@ -182,7 +182,7 @@ public void testAlterTableStorageCoolDownTTLPartition() throws Exception { " \"storage_cooldown_ttl\" = \"1 day\"\n" + ");"); ConnectContext ctx = starRocksAssert.getCtx(); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") .getTable("test_alter_cool_down_ttl_partition"); OlapTable olapTable = (OlapTable) table; RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) olapTable.getPartitionInfo(); @@ -237,16 +237,16 @@ public void testAlterTablePartitionTTLInvalid() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String sql = "ALTER TABLE test_partition_live_number SET(\"partition_live_number\" = \"-1\");"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(ctx, alterTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(ctx, alterTableStmt); Set> ttlPartitionInfo = GlobalStateMgr.getCurrentState() .getDynamicPartitionScheduler().getTtlPartitionInfo(); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); - Table table = - GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "test_partition_live_number"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .getTable(db.getFullName(), "test_partition_live_number"); Assert.assertFalse(ttlPartitionInfo.contains(new Pair<>(db.getId(), table.getId()))); sql = "ALTER TABLE test_partition_live_number SET(\"partition_live_number\" = \"1\");"; alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(ctx, alterTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(ctx, alterTableStmt); Assert.assertTrue(ttlPartitionInfo.contains(new Pair<>(db.getId(), table.getId()))); } @@ -273,9 +273,9 @@ public void testAlterTablePartitionStorageMedium() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String sql = "ALTER TABLE test_partition_storage_medium SET(\"default.storage_medium\" = \"SSD\");"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(ctx, alterTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); - OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(ctx, alterTableStmt); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); + OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getTable(db.getFullName(), "test_partition_storage_medium"); Assert.assertTrue(olapTable.getStorageMedium().equals("SSD")); } @@ -306,14 +306,14 @@ public void testAlterTableStorageType() throws Exception { Assert.assertThrows(AnalysisException.class, () -> UtFrameUtils.parseStmtWithNewParser(sql2, ctx)); Assert.assertTrue(e2.getMessage().contains("Can't change storage type")); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); - OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); + OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getTable(db.getFullName(), "test_storage_type"); Assert.assertTrue(olapTable.getStorageType().equals(TStorageType.COLUMN_WITH_ROW)); } public void testAlterTableLocationProp() throws Exception { - Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database testDb = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); // add label to backend SystemInfoService systemInfoService = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo(); @@ -348,10 +348,10 @@ public void testAlterTableLocationProp() throws Exception { PropertyAnalyzer.PROPERTIES_LABELS_LOCATION + "' = 'rack:*');"; DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser(sql, connectContext), connectContext); - Assert.assertEquals("rack", ((OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() + Assert.assertEquals("rack", ((OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getTable(testDb.getFullName(), "test_location_alter")) .getLocation().keySet().stream().findFirst().get()); - Assert.assertEquals("*", ((OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() + Assert.assertEquals("*", ((OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getTable(testDb.getFullName(), "test_location_alter")) .getLocation().get("rack").stream().findFirst().get()); @@ -360,12 +360,12 @@ public void testAlterTableLocationProp() throws Exception { PropertyAnalyzer.PROPERTIES_LABELS_LOCATION + "' = '');"; DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser(sql, connectContext), connectContext); - Assert.assertNull(((OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() + Assert.assertNull(((OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getTable(testDb.getFullName(), "test_location_alter")) .getLocation()); // ** test replay from edit log: alter to rack:* - LocalMetastore localMetastoreFollower = new LocalMetastore(GlobalStateMgr.getCurrentState(), null, null); + LocalMetastore localMetastoreFollower = new LocalMetastore(GlobalStateMgr.getCurrentState()); localMetastoreFollower.load(initialImage.getMetaBlockReader()); ModifyTablePropertyOperationLog info = (ModifyTablePropertyOperationLog) UtFrameUtils.PseudoJournalReplayer.replayNextJournal(OperationType.OP_ALTER_TABLE_PROPERTIES); @@ -388,7 +388,7 @@ public void testAlterTableLocationProp() throws Exception { @Test public void testAlterColocateTableLocationProp() throws Exception { - Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database testDb = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); // add label to backend SystemInfoService systemInfoService = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo(); @@ -414,7 +414,7 @@ public void testAlterColocateTableLocationProp() throws Exception { ");"; starRocksAssert.withTable(sql); - OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() + OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getTable(testDb.getFullName(), "test_location_colocate_alter1"); Assert.assertNull(olapTable.getLocation()); @@ -429,7 +429,7 @@ public void testAlterColocateTableLocationProp() throws Exception { @Test public void testAlterLocationPropTableToColocate() throws Exception { - Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database testDb = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); // add label to backend SystemInfoService systemInfoService = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo(); @@ -454,7 +454,7 @@ public void testAlterLocationPropTableToColocate() throws Exception { ");"; starRocksAssert.withTable(sql); - OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() + OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getTable(testDb.getFullName(), "test_location_colocate_alter2"); Assert.assertTrue(olapTable.getLocation().containsKey("*")); diff --git a/fe/fe-core/src/test/java/com/starrocks/alter/AlterTest.java b/fe/fe-core/src/test/java/com/starrocks/alter/AlterTest.java index f9934a38334b98..94382baf84aa00 100644 --- a/fe/fe-core/src/test/java/com/starrocks/alter/AlterTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/alter/AlterTest.java @@ -150,107 +150,107 @@ public static void beforeClass() throws Exception { starRocksAssert = new StarRocksAssert(connectContext); starRocksAssert.withDatabase("test").useDatabase("test") - .withTable("CREATE TABLE test.tbl1\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int sum\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 values less than('2020-02-01'),\n" + - " PARTITION p2 values less than('2020-03-01')\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');") - - .withTable("CREATE TABLE test.tbl2\n" + - "(\n" + - " k1 date,\n" + - " v1 int sum\n" + - ")\n" + - "DISTRIBUTED BY HASH (k1) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');") - - .withTable("CREATE TABLE test.tbl3\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int sum\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 values less than('2020-02-01'),\n" + - " PARTITION p2 values less than('2020-03-01')\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');") - - .withTable("CREATE TABLE test.tbl4\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int sum\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 values less than('2020-02-01'),\n" + - " PARTITION p2 values less than('2020-03-01'),\n" + - " PARTITION p3 values less than('2020-04-01'),\n" + - " PARTITION p4 values less than('2020-05-01')\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES" + - "(" + - " 'replication_num' = '1',\n" + - " 'in_memory' = 'false',\n" + - " 'storage_medium' = 'SSD',\n" + - " 'storage_cooldown_time' = '9999-12-31 00:00:00'\n" + - ");") - .withTable("CREATE TABLE t_recharge_detail(\n" + - " id bigint ,\n" + - " user_id bigint ,\n" + - " recharge_money decimal(32,2) , \n" + - " province varchar(20) not null,\n" + - " dt varchar(20) not null\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(id)\n" + - "PARTITION BY LIST (dt,province) (\n" + - " PARTITION p1 VALUES IN ((\"2022-04-01\", \"beijing\")),\n" + - " PARTITION p2 VALUES IN ((\"2022-04-01\", \"shanghai\"))\n" + - ")\n" + - "DISTRIBUTED BY HASH(`id`) BUCKETS 10 \n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"in_memory\" = \"false\"\n" + - ");") - .withTable("CREATE TABLE test.site_access_date_trunc (\n" + - " event_day DATETIME NOT NULL,\n" + - " site_id INT DEFAULT '10',\n" + - " city_code VARCHAR(100),\n" + - " user_name VARCHAR(32) DEFAULT '',\n" + - " pv BIGINT DEFAULT '0'\n" + - ")\n" + - "DUPLICATE KEY(event_day, site_id, city_code, user_name)\n" + - "PARTITION BY date_trunc('day', event_day)\n" + - "DISTRIBUTED BY HASH(event_day, site_id)\n" + - "PROPERTIES(\n" + - " \"replication_num\" = \"1\"\n" + - ");") - .withTable("CREATE TABLE site_access_time_slice (\n" + - " event_day datetime,\n" + - " site_id INT DEFAULT '10',\n" + - " city_code VARCHAR(100),\n" + - " user_name VARCHAR(32) DEFAULT '',\n" + - " pv BIGINT DEFAULT '0'\n" + - ")\n" + - "DUPLICATE KEY(event_day, site_id, city_code, user_name)\n" + - "PARTITION BY time_slice(event_day, interval 1 day)\n" + - "DISTRIBUTED BY HASH(event_day, site_id) BUCKETS 32\n" + - "PROPERTIES(\n" + - " \"partition_live_number\" = \"3\",\n" + - " \"replication_num\" = \"1\"\n" + - ");"); + .withTable("CREATE TABLE test.tbl1\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');") + + .withTable("CREATE TABLE test.tbl2\n" + + "(\n" + + " k1 date,\n" + + " v1 int sum\n" + + ")\n" + + "DISTRIBUTED BY HASH (k1) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');") + + .withTable("CREATE TABLE test.tbl3\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');") + + .withTable("CREATE TABLE test.tbl4\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01'),\n" + + " PARTITION p3 values less than('2020-04-01'),\n" + + " PARTITION p4 values less than('2020-05-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES" + + "(" + + " 'replication_num' = '1',\n" + + " 'in_memory' = 'false',\n" + + " 'storage_medium' = 'SSD',\n" + + " 'storage_cooldown_time' = '9999-12-31 00:00:00'\n" + + ");") + .withTable("CREATE TABLE t_recharge_detail(\n" + + " id bigint ,\n" + + " user_id bigint ,\n" + + " recharge_money decimal(32,2) , \n" + + " province varchar(20) not null,\n" + + " dt varchar(20) not null\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(id)\n" + + "PARTITION BY LIST (dt,province) (\n" + + " PARTITION p1 VALUES IN ((\"2022-04-01\", \"beijing\")),\n" + + " PARTITION p2 VALUES IN ((\"2022-04-01\", \"shanghai\"))\n" + + ")\n" + + "DISTRIBUTED BY HASH(`id`) BUCKETS 10 \n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\"\n" + + ");") + .withTable("CREATE TABLE test.site_access_date_trunc (\n" + + " event_day DATETIME NOT NULL,\n" + + " site_id INT DEFAULT '10',\n" + + " city_code VARCHAR(100),\n" + + " user_name VARCHAR(32) DEFAULT '',\n" + + " pv BIGINT DEFAULT '0'\n" + + ")\n" + + "DUPLICATE KEY(event_day, site_id, city_code, user_name)\n" + + "PARTITION BY date_trunc('day', event_day)\n" + + "DISTRIBUTED BY HASH(event_day, site_id)\n" + + "PROPERTIES(\n" + + " \"replication_num\" = \"1\"\n" + + ");") + .withTable("CREATE TABLE site_access_time_slice (\n" + + " event_day datetime,\n" + + " site_id INT DEFAULT '10',\n" + + " city_code VARCHAR(100),\n" + + " user_name VARCHAR(32) DEFAULT '',\n" + + " pv BIGINT DEFAULT '0'\n" + + ")\n" + + "DUPLICATE KEY(event_day, site_id, city_code, user_name)\n" + + "PARTITION BY time_slice(event_day, interval 1 day)\n" + + "DISTRIBUTED BY HASH(event_day, site_id) BUCKETS 32\n" + + "PROPERTIES(\n" + + " \"partition_live_number\" = \"3\",\n" + + " \"replication_num\" = \"1\"\n" + + ");"); } @AfterClass @@ -259,7 +259,7 @@ public static void tearDown() throws Exception { String dropSQL = "drop table test_partition_exception"; try { DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } catch (Exception ex) { } @@ -282,21 +282,21 @@ private static void createTable(String sql) throws Exception { private static void createMaterializedView(String sql) throws Exception { CreateMaterializedViewStatement createMaterializedViewStatement = - (CreateMaterializedViewStatement) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createMaterializedViewStatement); + (CreateMaterializedViewStatement) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createMaterializedViewStatement); } private static void dropMaterializedView(String sql) throws Exception { DropMaterializedViewStmt dropMaterializedViewStmt = - (DropMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropMaterializedView(dropMaterializedViewStmt); + (DropMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropMaterializedView(dropMaterializedViewStmt); } private static void alterMaterializedView(String sql, boolean expectedException) throws Exception { AlterMaterializedViewStmt alterMaterializedViewStmt = - (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); + (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().alterMaterializedView(alterMaterializedViewStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterMaterializedView(alterMaterializedViewStmt); if (expectedException) { Assert.fail(); } @@ -310,12 +310,12 @@ private static void alterMaterializedView(String sql, boolean expectedException) private static void refreshMaterializedView(String sql) throws Exception { RefreshMaterializedViewStatement refreshMaterializedViewStatement = - (RefreshMaterializedViewStatement) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); + (RefreshMaterializedViewStatement) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); try { - GlobalStateMgr.getCurrentState().getLocalMetastore() - .refreshMaterializedView(refreshMaterializedViewStatement.getMvName().getDb(), - refreshMaterializedViewStatement.getMvName().getTbl(), false, null, - Constants.TaskRunPriority.LOWEST.value(), false, true); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .refreshMaterializedView(refreshMaterializedViewStatement.getMvName().getDb(), + refreshMaterializedViewStatement.getMvName().getTbl(), false, null, + Constants.TaskRunPriority.LOWEST.value(), false, true); } catch (Exception e) { e.printStackTrace(); Assert.fail(); @@ -324,9 +324,9 @@ private static void refreshMaterializedView(String sql) throws Exception { private static void cancelRefreshMaterializedView(String sql, boolean expectedException) throws Exception { CancelRefreshMaterializedViewStmt cancelRefresh = - (CancelRefreshMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); + (CancelRefreshMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().cancelRefreshMaterializedView(cancelRefresh); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().cancelRefreshMaterializedView(cancelRefresh); if (expectedException) { Assert.fail(); } @@ -356,7 +356,7 @@ private static void alterTableWithNewParser(String sql, boolean expectedExceptio private static void alterTableWithNewParserAndExceptionMsg(String sql, String msg) throws Exception { AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(connectContext, alterTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(connectContext, alterTableStmt); } catch (Exception e) { Assert.assertEquals(msg, e.getMessage()); } @@ -365,36 +365,36 @@ private static void alterTableWithNewParserAndExceptionMsg(String sql, String ms @Test public void testRenameMaterializedView() throws Exception { starRocksAssert.useDatabase("test") - .withTable("CREATE TABLE test.testTable1\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int sum\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 values less than('2020-02-01'),\n" + - " PARTITION p2 values less than('2020-03-01')\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');"); + .withTable("CREATE TABLE test.testTable1\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); String sql = "create materialized view mv1 " + - "partition by k1 " + - "distributed by hash(k2) " + - "refresh async START('2122-12-31') EVERY(INTERVAL 1 HOUR) " + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ") " + - "as select k1, k2 from test.testTable1;"; + "partition by k1 " + + "distributed by hash(k2) " + + "refresh async START('2122-12-31') EVERY(INTERVAL 1 HOUR) " + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ") " + + "as select k1, k2 from test.testTable1;"; createMaterializedView(sql); String alterStmt = "alter materialized view test.mv1 rename mv2"; alterMaterializedView(alterStmt, false); - MaterializedView materializedView = (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore(). - getDb("test").getTable("mv2"); + MaterializedView materializedView = (MaterializedView) GlobalStateMgr.getCurrentState().getStarRocksMetadata(). + getDb("test").getTable("mv2"); TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager(); Task task = taskManager.getTask(TaskBuilder.getMvTaskName(materializedView.getId())); Assert.assertEquals("insert overwrite `mv2` SELECT `test`.`testTable1`.`k1`, `test`.`testTable1`.`k2`\n" + - "FROM `test`.`testTable1`", task.getDefinition()); + "FROM `test`.`testTable1`", task.getDefinition()); ConnectContext.get().setCurrentUserIdentity(UserIdentity.ROOT); ConnectContext.get().setCurrentRoleIds(UserIdentity.ROOT); dropMaterializedView("drop materialized view test.mv2"); @@ -403,45 +403,46 @@ public void testRenameMaterializedView() throws Exception { @Test public void testCouldNotFindMaterializedView() throws Exception { starRocksAssert.useDatabase("test") - .withTable("CREATE TABLE test.testTable1\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int sum\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 values less than('2020-02-01'),\n" + - " PARTITION p2 values less than('2020-03-01')\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');") - .withTable("CREATE TABLE test.testTable2\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int sum\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 values less than('2020-02-01'),\n" + - " PARTITION p2 values less than('2020-03-01')\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');"); + .withTable("CREATE TABLE test.testTable1\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');") + .withTable("CREATE TABLE test.testTable2\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); String sql = "create materialized view mv1 " + - "partition by k1 " + - "distributed by hash(k2) " + - "refresh async START('2122-12-31') EVERY(INTERVAL 1 HOUR) " + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ") " + - "as select k1, k2 from test.testTable1;"; + "partition by k1 " + + "distributed by hash(k2) " + + "refresh async START('2122-12-31') EVERY(INTERVAL 1 HOUR) " + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ") " + + "as select k1, k2 from test.testTable1;"; createMaterializedView(sql); starRocksAssert.getCtx().setCurrentRoleIds(GlobalStateMgr.getCurrentState().getAuthorizationMgr().getRoleIdsByUser( - starRocksAssert.getCtx().getCurrentUserIdentity())); + starRocksAssert.getCtx().getCurrentUserIdentity())); dropMaterializedView("drop materialized view test.mv1"); - OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test").getTable("testTable1"); + OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .getDb("test").getTable("testTable1"); // this for mock olapTable.getIndexNameById(mvIdx.getId()) == Null table.deleteIndexInfo("testTable1"); try { @@ -455,19 +456,19 @@ public void testCouldNotFindMaterializedView() throws Exception { @Test public void testRenameTable() throws Exception { starRocksAssert.useDatabase("test") - .withTable("CREATE TABLE test.testRenameTable1\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int sum\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 values less than('2020-02-01'),\n" + - " PARTITION p2 values less than('2020-03-01')\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');"); + .withTable("CREATE TABLE test.testRenameTable1\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); String alterStmt = "alter table test.testRenameTable1 rename testRenameTable2"; alterTableWithNewParser(alterStmt, false); } @@ -475,27 +476,27 @@ public void testRenameTable() throws Exception { @Test public void testChangeMaterializedViewRefreshScheme() throws Exception { starRocksAssert.useDatabase("test") - .withTable("CREATE TABLE test.testTable2\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int sum\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 values less than('2020-02-01'),\n" + - " PARTITION p2 values less than('2020-03-01')\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');"); + .withTable("CREATE TABLE test.testTable2\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); String sql = "create materialized view mv1 " + - "partition by k1 " + - "distributed by hash(k2) " + - "refresh async START('2122-12-31') EVERY(INTERVAL 1 HOUR) " + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ") " + - "as select k1, k2 from test.testTable2;"; + "partition by k1 " + + "distributed by hash(k2) " + + "refresh async START('2122-12-31') EVERY(INTERVAL 1 HOUR) " + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ") " + + "as select k1, k2 from test.testTable2;"; createMaterializedView(sql); String alterStmt = "alter materialized view mv1 refresh async EVERY(INTERVAL 1 minute)"; alterMaterializedView(alterStmt, false); @@ -509,61 +510,61 @@ public void testChangeMaterializedViewRefreshScheme() throws Exception { @Test public void testRefreshMaterializedView() throws Exception { starRocksAssert.useDatabase("test") - .withTable("CREATE TABLE test.testTable3\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int sum\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 values less than('2020-02-01'),\n" + - " PARTITION p2 values less than('2020-03-01')\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');"); + .withTable("CREATE TABLE test.testTable3\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); String sql = "create materialized view mv1 " + - "partition by k1 " + - "distributed by hash(k2) " + - "refresh async START('2122-12-31') EVERY(INTERVAL 1 HOUR) " + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ") " + - "as select k1, k2 from test.testTable3;"; + "partition by k1 " + + "distributed by hash(k2) " + + "refresh async START('2122-12-31') EVERY(INTERVAL 1 HOUR) " + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ") " + + "as select k1, k2 from test.testTable3;"; createMaterializedView(sql); String alterStmt = "refresh materialized view test.mv1"; refreshMaterializedView(alterStmt); starRocksAssert.getCtx().setCurrentRoleIds(GlobalStateMgr.getCurrentState().getAuthorizationMgr().getRoleIdsByUser( - starRocksAssert.getCtx().getCurrentUserIdentity())); + starRocksAssert.getCtx().getCurrentUserIdentity())); dropMaterializedView("drop materialized view test.mv1"); } @Test public void testCancelRefreshMaterializedView() throws Exception { starRocksAssert.useDatabase("test") - .withTable("CREATE TABLE test.testTable4\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int sum\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - " PARTITION p1 values less than('2020-02-01'),\n" + - " PARTITION p2 values less than('2020-03-01')\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');"); + .withTable("CREATE TABLE test.testTable4\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + " PARTITION p1 values less than('2020-02-01'),\n" + + " PARTITION p2 values less than('2020-03-01')\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); String sql = "create materialized view mv1 " + - "partition by k1 " + - "distributed by hash(k2) " + - "refresh async START('2122-12-31') EVERY(INTERVAL 1 HOUR) " + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ") " + - "as select k1, k2 from test.testTable4;"; + "partition by k1 " + + "distributed by hash(k2) " + + "refresh async START('2122-12-31') EVERY(INTERVAL 1 HOUR) " + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ") " + + "as select k1, k2 from test.testTable4;"; starRocksAssert.getCtx().setCurrentRoleIds(GlobalStateMgr.getCurrentState().getAuthorizationMgr().getRoleIdsByUser( - starRocksAssert.getCtx().getCurrentUserIdentity())); + starRocksAssert.getCtx().getCurrentUserIdentity())); createMaterializedView(sql); String alterStmt = "refresh materialized view test.mv1"; refreshMaterializedView(alterStmt); @@ -573,12 +574,12 @@ public void testCancelRefreshMaterializedView() throws Exception { @Test public void testConflictAlterOperations() throws Exception { - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); - OlapTable tbl = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "tbl1"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); + OlapTable tbl = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTable(db.getFullName(), "tbl1"); String stmt = - "alter table test.tbl1 add partition p3 values less than('2020-04-01'), " + - "add partition p4 values less than('2020-05-01')"; + "alter table test.tbl1 add partition p3 values less than('2020-04-01'), " + + "add partition p4 values less than('2020-05-01')"; alterTableWithNewParser(stmt, true); stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01'), drop partition p4"; @@ -606,12 +607,12 @@ public void testConflictAlterOperations() throws Exception { // enable dynamic partition // not adding the `start` property so that it won't drop the origin partition p1, p2 and p3 stmt = "alter table test.tbl1 set (\n" + - "'dynamic_partition.enable' = 'true',\n" + - "'dynamic_partition.time_unit' = 'DAY',\n" + - "'dynamic_partition.end' = '3',\n" + - "'dynamic_partition.prefix' = 'p',\n" + - "'dynamic_partition.buckets' = '3'\n" + - " );"; + "'dynamic_partition.enable' = 'true',\n" + + "'dynamic_partition.time_unit' = 'DAY',\n" + + "'dynamic_partition.end' = '3',\n" + + "'dynamic_partition.prefix' = 'p',\n" + + "'dynamic_partition.buckets' = '3'\n" + + " );"; alterTableWithNewParser(stmt, false); Assert.assertTrue(tbl.getTableProperty().getDynamicPartitionProperty().isEnabled()); @@ -619,12 +620,12 @@ public void testConflictAlterOperations() throws Exception { // add partition when dynamic partition is enable stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01') " + - "distributed by hash(k2) buckets 4 PROPERTIES ('replication_num' = '1')"; + "distributed by hash(k2) buckets 4 PROPERTIES ('replication_num' = '1')"; alterTableWithNewParser(stmt, true); // add temp partition when dynamic partition is enable stmt = "alter table test.tbl1 add temporary partition tp3 values less than('2020-04-01') " + - "distributed by hash(k2) buckets 4 PROPERTIES ('replication_num' = '1')"; + "distributed by hash(k2) buckets 4 PROPERTIES ('replication_num' = '1')"; alterTableWithNewParser(stmt, false); Assert.assertEquals(1, tbl.getTempPartitions().size()); @@ -635,7 +636,7 @@ public void testConflictAlterOperations() throws Exception { // add partition when dynamic partition is disable stmt = "alter table test.tbl1 add partition p3 values less than('2020-04-01') " + - "distributed by hash(k2) buckets 4"; + "distributed by hash(k2) buckets 4"; alterTableWithNewParser(stmt, false); // set table's default replication num @@ -652,25 +653,25 @@ public void testConflictAlterOperations() throws Exception { Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl.getPartitionInfo().getReplicationNum(p1.getId()))); // set un-partitioned table's real replication num - OlapTable tbl2 = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "tbl2"); + OlapTable tbl2 = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTable(db.getFullName(), "tbl2"); Partition partition = tbl2.getPartition(tbl2.getName()); Assert.assertEquals(Short.valueOf("1"), - Short.valueOf(tbl2.getPartitionInfo().getReplicationNum(partition.getId()))); + Short.valueOf(tbl2.getPartitionInfo().getReplicationNum(partition.getId()))); // partition replication num and table default replication num are updated at the same time in unpartitioned table stmt = "alter table test.tbl2 set ('replication_num' = '3');"; alterTableWithNewParser(stmt, false); Assert.assertEquals(Short.valueOf("3"), - Short.valueOf(tbl2.getPartitionInfo().getReplicationNum(partition.getId()))); + Short.valueOf(tbl2.getPartitionInfo().getReplicationNum(partition.getId()))); Assert.assertEquals(Short.valueOf("3"), tbl2.getDefaultReplicationNum()); stmt = "alter table test.tbl2 set ('default.replication_num' = '2');"; alterTableWithNewParser(stmt, false); Assert.assertEquals(Short.valueOf("2"), - Short.valueOf(tbl2.getPartitionInfo().getReplicationNum(partition.getId()))); + Short.valueOf(tbl2.getPartitionInfo().getReplicationNum(partition.getId()))); Assert.assertEquals(Short.valueOf("2"), tbl2.getDefaultReplicationNum()); stmt = "alter table test.tbl2 modify partition tbl2 set ('replication_num' = '1');"; alterTableWithNewParser(stmt, false); Assert.assertEquals(Short.valueOf("1"), - Short.valueOf(tbl2.getPartitionInfo().getReplicationNum(partition.getId()))); + Short.valueOf(tbl2.getPartitionInfo().getReplicationNum(partition.getId()))); Assert.assertEquals(Short.valueOf("1"), tbl2.getDefaultReplicationNum()); Thread.sleep(5000); // sleep to wait dynamic partition scheduler run @@ -683,8 +684,8 @@ public void testConflictAlterOperations() throws Exception { alterTableWithNewParser(stmt, false); stmt = "alter table test.tbl1 " + - "add TEMPORARY partition p5 values [('2020-04-10'), ('2020-05-10')) ('replication_num' = '1') " + - "DISTRIBUTED BY HASH(k2) BUCKETS 3 PROPERTIES('replication_num' = '1');"; + "add TEMPORARY partition p5 values [('2020-04-10'), ('2020-05-10')) ('replication_num' = '1') " + + "DISTRIBUTED BY HASH(k2) BUCKETS 3 PROPERTIES('replication_num' = '1');"; alterTableWithNewParser(stmt, false); //rename table stmt = "alter table test.tbl1 rename newTableName"; @@ -694,8 +695,8 @@ public void testConflictAlterOperations() throws Exception { // test batch update range partitions' properties @Test public void testBatchUpdatePartitionProperties() throws Exception { - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); - OlapTable tbl4 = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "tbl4"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); + OlapTable tbl4 = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTable(db.getFullName(), "tbl4"); Partition p1 = tbl4.getPartition("p1"); Partition p2 = tbl4.getPartition("p2"); Partition p3 = tbl4.getPartition("p3"); @@ -706,12 +707,12 @@ public void testBatchUpdatePartitionProperties() throws Exception { List partitionList = Lists.newArrayList(p1, p2, p4); for (Partition partition : partitionList) { Assert.assertEquals(Short.valueOf("1"), - Short.valueOf(tbl4.getPartitionInfo().getReplicationNum(partition.getId()))); + Short.valueOf(tbl4.getPartitionInfo().getReplicationNum(partition.getId()))); } alterTableWithNewParser(stmt, false); for (Partition partition : partitionList) { Assert.assertEquals(Short.valueOf("3"), - Short.valueOf(tbl4.getPartitionInfo().getReplicationNum(partition.getId()))); + Short.valueOf(tbl4.getPartitionInfo().getReplicationNum(partition.getId()))); } Assert.assertEquals(Short.valueOf("1"), Short.valueOf(tbl4.getPartitionInfo().getReplicationNum(p3.getId()))); @@ -749,7 +750,7 @@ public void testBatchUpdatePartitionProperties() throws Exception { alterTableWithNewParser(stmt, false); for (Partition partition : partitionList) { Assert.assertEquals(Short.valueOf("1"), - Short.valueOf(tbl4.getPartitionInfo().getReplicationNum(partition.getId()))); + Short.valueOf(tbl4.getPartitionInfo().getReplicationNum(partition.getId()))); } } @@ -769,8 +770,8 @@ public void testDynamicPartitionDropAndAdd() throws Exception { alterTable(stmt, false); Thread.sleep(5000); // sleep to wait dynamic partition scheduler run - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); - OlapTable tbl = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "tbl3"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksConnector().getDb("test"); + OlapTable tbl = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksConnector().getTable(db.getFullName(), "tbl3"); Assert.assertEquals(4, tbl.getPartitionNames().size()); Assert.assertNull(tbl.getPartition("p1")); Assert.assertNull(tbl.getPartition("p2")); @@ -784,11 +785,11 @@ private void waitSchemaChangeJobDone(boolean rollupJob, OlapTable tb) throws Int for (AlterJobV2 alterJobV2 : alterJobs.values()) { while (!alterJobV2.getJobState().isFinalState()) { System.out.println( - "alter job " + alterJobV2.getJobId() + " is running. state: " + alterJobV2.getJobState()); + "alter job " + alterJobV2.getJobId() + " is running. state: " + alterJobV2.getJobState()); Thread.sleep(1000); } System.out.println(alterJobV2.getType() + " alter job " + alterJobV2.getJobId() + " is done. state: " + - alterJobV2.getJobState()); + alterJobV2.getJobState()); Assert.assertEquals(AlterJobV2.JobState.FINISHED, alterJobV2.getJobState()); } checkTableStateToNormal(tb); @@ -798,36 +799,36 @@ private void waitSchemaChangeJobDone(boolean rollupJob, OlapTable tb) throws Int public void testSetDynamicPropertiesInNormalTable() throws Exception { String tableName = "no_dynamic_table"; String createOlapTblStmt = "CREATE TABLE test.`" + tableName + "` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ");"; + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); String alterStmt = "alter table test." + tableName + " set (\"dynamic_partition.enable\" = \"true\");"; alterTableWithNewParserAndExceptionMsg(alterStmt, "Table test.no_dynamic_table is not a dynamic partition table."); // test set dynamic properties in a no dynamic partition table String stmt = "alter table test." + tableName + " set (\n" + - "'dynamic_partition.enable' = 'true',\n" + - "'dynamic_partition.time_unit' = 'DAY',\n" + - "'dynamic_partition.start' = '-3',\n" + - "'dynamic_partition.end' = '3',\n" + - "'dynamic_partition.prefix' = 'p',\n" + - "'dynamic_partition.buckets' = '3'\n" + - " );"; + "'dynamic_partition.enable' = 'true',\n" + + "'dynamic_partition.time_unit' = 'DAY',\n" + + "'dynamic_partition.start' = '-3',\n" + + "'dynamic_partition.end' = '3',\n" + + "'dynamic_partition.prefix' = 'p',\n" + + "'dynamic_partition.buckets' = '3'\n" + + " );"; alterTableWithNewParser(stmt, false); } @@ -835,30 +836,30 @@ public void testSetDynamicPropertiesInNormalTable() throws Exception { public void testSetDynamicPropertiesInDynamicPartitionTable() throws Exception { String tableName = "dynamic_table"; String createOlapTblStmt = "CREATE TABLE test.`" + tableName + "` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); String alterStmt1 = "alter table test." + tableName + " set (\"dynamic_partition.enable\" = \"false\");"; @@ -879,32 +880,33 @@ public void testSetDynamicPropertiesInDynamicPartitionTable() throws Exception { public void testDynamicPartitionTableMetaFailed() throws Exception { String tableName = "dynamic_table_test"; String createOlapTblStmt = "CREATE TABLE test.`" + tableName + "` (\n" + - " `k1` date NULL COMMENT \"\",\n" + - " `k2` int NULL COMMENT \"\",\n" + - " `k3` smallint NULL COMMENT \"\",\n" + - " `v1` varchar(2048) NULL COMMENT \"\",\n" + - " `v2` datetime NULL COMMENT \"\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + - "COMMENT \"OLAP\"\n" + - "PARTITION BY RANGE (k1)\n" + - "(\n" + - "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + - "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + - "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"dynamic_partition.enable\" = \"true\",\n" + - "\"dynamic_partition.start\" = \"-3\",\n" + - "\"dynamic_partition.end\" = \"3\",\n" + - "\"dynamic_partition.time_unit\" = \"day\",\n" + - "\"dynamic_partition.prefix\" = \"p\",\n" + - "\"dynamic_partition.buckets\" = \"1\"\n" + - ");"; + " `k1` date NULL COMMENT \"\",\n" + + " `k2` int NULL COMMENT \"\",\n" + + " `k3` smallint NULL COMMENT \"\",\n" + + " `v1` varchar(2048) NULL COMMENT \"\",\n" + + " `v2` datetime NULL COMMENT \"\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n" + + "COMMENT \"OLAP\"\n" + + "PARTITION BY RANGE (k1)\n" + + "(\n" + + "PARTITION p1 VALUES LESS THAN (\"2014-01-01\"),\n" + + "PARTITION p2 VALUES LESS THAN (\"2014-06-01\"),\n" + + "PARTITION p3 VALUES LESS THAN (\"2014-12-01\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"dynamic_partition.enable\" = \"true\",\n" + + "\"dynamic_partition.start\" = \"-3\",\n" + + "\"dynamic_partition.end\" = \"3\",\n" + + "\"dynamic_partition.time_unit\" = \"day\",\n" + + "\"dynamic_partition.prefix\" = \"p\",\n" + + "\"dynamic_partition.buckets\" = \"1\"\n" + + ");"; createTable(createOlapTblStmt); - OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test").getTable(tableName); + OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .getDb("test").getTable(tableName); olapTable.getTableProperty().getProperties().remove("dynamic_partition.end"); olapTable.getTableProperty().gsonPostProcess(); } @@ -912,52 +914,52 @@ public void testDynamicPartitionTableMetaFailed() throws Exception { @Test public void testSwapTable() throws Exception { String stmt1 = "CREATE TABLE test.replace1\n" + - "(\n" + - " k1 int, k2 int, k3 int sum\n" + - ")\n" + - "AGGREGATE KEY(k1, k2)\n" + - "DISTRIBUTED BY HASH(k1) BUCKETS 10\n" + - "rollup (\n" + - "r1(k1),\n" + - "r2(k2, k3)\n" + - ")\n" + - "PROPERTIES(\"replication_num\" = \"1\");"; + "(\n" + + " k1 int, k2 int, k3 int sum\n" + + ")\n" + + "AGGREGATE KEY(k1, k2)\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 10\n" + + "rollup (\n" + + "r1(k1),\n" + + "r2(k2, k3)\n" + + ")\n" + + "PROPERTIES(\"replication_num\" = \"1\");"; String stmt2 = "CREATE TABLE test.r1\n" + - "(\n" + - " k1 int, k2 int\n" + - ")\n" + - "DISTRIBUTED BY HASH(k1) BUCKETS 11\n" + - "PROPERTIES(\"replication_num\" = \"1\");"; + "(\n" + + " k1 int, k2 int\n" + + ")\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 11\n" + + "PROPERTIES(\"replication_num\" = \"1\");"; String stmt3 = "CREATE TABLE test.replace2\n" + - "(\n" + - " k1 int, k2 int\n" + - ")\n" + - "DISTRIBUTED BY HASH(k1) BUCKETS 11\n" + - "PROPERTIES(\"replication_num\" = \"1\");"; + "(\n" + + " k1 int, k2 int\n" + + ")\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 11\n" + + "PROPERTIES(\"replication_num\" = \"1\");"; String stmt4 = "CREATE TABLE test.replace3\n" + - "(\n" + - " k1 int, k2 int, k3 int sum\n" + - ")\n" + - "PARTITION BY RANGE(k1)\n" + - "(\n" + - "\tPARTITION p1 values less than(\"100\"),\n" + - "\tPARTITION p2 values less than(\"200\")\n" + - ")\n" + - "DISTRIBUTED BY HASH(k1) BUCKETS 1\n" + - "rollup (\n" + - "r3(k1),\n" + - "r4(k2, k3)\n" + - ")\n" + - "PROPERTIES(\"replication_num\" = \"1\");"; + "(\n" + + " k1 int, k2 int, k3 int sum\n" + + ")\n" + + "PARTITION BY RANGE(k1)\n" + + "(\n" + + "\tPARTITION p1 values less than(\"100\"),\n" + + "\tPARTITION p2 values less than(\"200\")\n" + + ")\n" + + "DISTRIBUTED BY HASH(k1) BUCKETS 1\n" + + "rollup (\n" + + "r3(k1),\n" + + "r4(k2, k3)\n" + + ")\n" + + "PROPERTIES(\"replication_num\" = \"1\");"; createTable(stmt1); createTable(stmt2); createTable(stmt3); createTable(stmt4); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); // name conflict String replaceStmt = "ALTER TABLE test.replace1 SWAP WITH r1"; @@ -966,26 +968,30 @@ public void testSwapTable() throws Exception { // replace1 with replace2 replaceStmt = "ALTER TABLE test.replace1 SWAP WITH replace2"; OlapTable replace1 = - (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "replace1"); + (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTable(db.getFullName(), "replace1"); OlapTable replace2 = - (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "replace2"); + (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTable(db.getFullName(), "replace2"); Assert.assertEquals(3, - replace1.getPartition("replace1").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE) - .size()); + replace1.getPartition("replace1").getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE) + .size()); Assert.assertEquals(1, - replace2.getPartition("replace2").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE) - .size()); + replace2.getPartition("replace2").getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE) + .size()); alterTableWithNewParser(replaceStmt, false); - replace1 = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "replace1"); - replace2 = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "replace2"); + replace1 = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTable(db.getFullName(), "replace1"); + replace2 = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTable(db.getFullName(), "replace2"); Assert.assertEquals(1, - replace1.getPartition("replace1").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE) - .size()); + replace1.getPartition("replace1").getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE) + .size()); Assert.assertEquals(3, - replace2.getPartition("replace2").getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE) - .size()); + replace2.getPartition("replace2").getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE) + .size()); Assert.assertEquals("replace1", replace1.getIndexNameById(replace1.getBaseIndexId())); Assert.assertEquals("replace2", replace2.getIndexNameById(replace2.getBaseIndexId())); } @@ -1009,7 +1015,7 @@ public void testSwapTableWithUniqueConstraints() throws Exception { createTable(s1); createTable(s2); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); String replaceStmt = "ALTER TABLE s1 SWAP WITH s2"; alterTableWithNewParser(replaceStmt, false); @@ -1055,7 +1061,7 @@ public void testSwapTableWithForeignConstraints1() throws Exception { createTable(s1); createTable(s2); createTable(s3); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); // swap child tables String replaceStmt = "ALTER TABLE s2 SWAP WITH s3"; @@ -1130,7 +1136,7 @@ public void testSwapTableWithForeignConstraints2() throws Exception { createTable(s1); createTable(s2); createTable(s3); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); // swap parent tables String replaceStmt = "ALTER TABLE s2 SWAP WITH s1"; @@ -1178,36 +1184,36 @@ public void testCatalogAddPartitionsDay() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String dropSQL = "drop table if exists test_partition"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); String createSQL = "CREATE TABLE test.test_partition (\n" + - " k2 DATE,\n" + - " k3 SMALLINT,\n" + - " v1 VARCHAR(2048),\n" + - " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(k2, k3)\n" + - "PARTITION BY RANGE (k2) (\n" + - " START (\"20140101\") END (\"20140104\") EVERY (INTERVAL 1 DAY)\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " k2 DATE,\n" + + " k3 SMALLINT,\n" + + " v1 VARCHAR(2048),\n" + + " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(k2, k3)\n" + + "PARTITION BY RANGE (k2) (\n" + + " START (\"20140101\") END (\"20140104\") EVERY (INTERVAL 1 DAY)\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); String alterSQL = "ALTER TABLE test_partition ADD\n" + - " PARTITIONS START (\"2017-01-03\") END (\"2017-01-07\") EVERY (interval 1 day)"; + " PARTITIONS START (\"2017-01-03\") END (\"2017-01-07\") EVERY (interval 1 day)"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL, ctx); AddPartitionClause addPartitionClause = (AddPartitionClause) alterTableStmt.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") - .getTable("test_partition"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") + .getTable("test_partition"); Assert.assertNotNull(table.getPartition("p20170103")); Assert.assertNotNull(table.getPartition("p20170104")); @@ -1217,7 +1223,7 @@ public void testCatalogAddPartitionsDay() throws Exception { dropSQL = "drop table test_partition"; dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @@ -1226,36 +1232,36 @@ public void testAddPhysicalPartition() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String dropSQL = "drop table if exists test_partition"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); String createSQL = "CREATE TABLE test.test_partition (\n" + - " k2 DATE,\n" + - " k3 SMALLINT,\n" + - " v1 VARCHAR(2048),\n" + - " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(k2, k3)\n" + - "DISTRIBUTED BY RANDOM BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " k2 DATE,\n" + + " k3 SMALLINT,\n" + + " v1 VARCHAR(2048),\n" + + " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(k2, k3)\n" + + "DISTRIBUTED BY RANDOM BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); - OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") - .getTable("test_partition"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); + OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") + .getTable("test_partition"); Optional partition = table.getPartitions().stream().findFirst(); Assert.assertTrue(partition.isPresent()); Assert.assertEquals(table.getPhysicalPartitions().size(), 1); - GlobalStateMgr.getCurrentState().getLocalMetastore().addSubPartitions(db, table, partition.get(), 1, - WarehouseManager.DEFAULT_WAREHOUSE_ID); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().addSubPartitions(db, table, partition.get(), 1, + WarehouseManager.DEFAULT_WAREHOUSE_ID); Assert.assertEquals(partition.get().getSubPartitions().size(), 2); Assert.assertEquals(table.getPhysicalPartitions().size(), 2); - GlobalStateMgr.getCurrentState().getLocalMetastore().addSubPartitions(db, table, partition.get(), 2, - WarehouseManager.DEFAULT_WAREHOUSE_ID); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().addSubPartitions(db, table, partition.get(), 2, + WarehouseManager.DEFAULT_WAREHOUSE_ID); Assert.assertEquals(partition.get().getSubPartitions().size(), 4); Assert.assertEquals(table.getPhysicalPartitions().size(), 4); @@ -1271,7 +1277,7 @@ public void testAddPhysicalPartition() throws Exception { dropSQL = "drop table test_partition"; dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @Test @@ -1279,49 +1285,49 @@ public void testAddRangePhysicalPartition() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String dropSQL = "drop table if exists test_partition"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); String createSQL = "CREATE TABLE test.test_partition (\n" + - " k2 DATE,\n" + - " k3 SMALLINT,\n" + - " v1 VARCHAR(2048),\n" + - " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(k2, k3)\n" + - "PARTITION BY RANGE (k2) (\n" + - " START (\"20140101\") END (\"20140104\") EVERY (INTERVAL 1 DAY)\n" + - ")\n" + - "DISTRIBUTED BY RANDOM BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " k2 DATE,\n" + + " k3 SMALLINT,\n" + + " v1 VARCHAR(2048),\n" + + " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(k2, k3)\n" + + "PARTITION BY RANGE (k2) (\n" + + " START (\"20140101\") END (\"20140104\") EVERY (INTERVAL 1 DAY)\n" + + ")\n" + + "DISTRIBUTED BY RANDOM BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); - OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") - .getTable("test_partition"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); + OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") + .getTable("test_partition"); Assert.assertEquals(table.getPhysicalPartitions().size(), 3); Partition partition = table.getPartition("p20140101"); Assert.assertNotNull(partition); - GlobalStateMgr.getCurrentState().getLocalMetastore().addSubPartitions(db, table, partition, 1, - WarehouseManager.DEFAULT_WAREHOUSE_ID); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().addSubPartitions(db, table, partition, 1, + WarehouseManager.DEFAULT_WAREHOUSE_ID); Assert.assertEquals(table.getPhysicalPartitions().size(), 4); Assert.assertEquals(partition.getSubPartitions().size(), 2); partition = table.getPartition("p20140103"); Assert.assertNotNull(partition); - GlobalStateMgr.getCurrentState().getLocalMetastore().addSubPartitions(db, table, partition, 2, - WarehouseManager.DEFAULT_WAREHOUSE_ID); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().addSubPartitions(db, table, partition, 2, + WarehouseManager.DEFAULT_WAREHOUSE_ID); Assert.assertEquals(table.getPhysicalPartitions().size(), 6); Assert.assertEquals(partition.getSubPartitions().size(), 3); dropSQL = "drop table test_partition"; dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @Test(expected = DdlException.class) @@ -1329,31 +1335,31 @@ public void testAddPhysicalPartitionForHash() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String dropSQL = "drop table if exists test_partition"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); String createSQL = "CREATE TABLE test.test_partition (\n" + - " k2 DATE,\n" + - " k3 SMALLINT,\n" + - " v1 VARCHAR(2048),\n" + - " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(k2, k3)\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " k2 DATE,\n" + + " k3 SMALLINT,\n" + + " v1 VARCHAR(2048),\n" + + " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(k2, k3)\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); - OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") - .getTable("test_partition"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); + OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") + .getTable("test_partition"); Optional partition = table.getPartitions().stream().findFirst(); Assert.assertTrue(partition.isPresent()); Assert.assertEquals(table.getPhysicalPartitions().size(), 1); - GlobalStateMgr.getCurrentState().getLocalMetastore().addSubPartitions(db, table, partition.get(), 1, - WarehouseManager.DEFAULT_WAREHOUSE_ID); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().addSubPartitions(db, table, partition.get(), 1, + WarehouseManager.DEFAULT_WAREHOUSE_ID); } @Test @@ -1383,33 +1389,33 @@ public void testAddBackend() throws Exception { public void testCatalogAddPartitions5Day() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String createSQL = "CREATE TABLE test.test_partition (\n" + - " k2 DATE,\n" + - " k3 SMALLINT,\n" + - " v1 VARCHAR(2048),\n" + - " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(k2, k3)\n" + - "PARTITION BY RANGE (k2) (\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " k2 DATE,\n" + + " k3 SMALLINT,\n" + + " v1 VARCHAR(2048),\n" + + " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(k2, k3)\n" + + "PARTITION BY RANGE (k2) (\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); String alterSQL = "ALTER TABLE test_partition ADD\n" + - " PARTITIONS START (\"2017-01-03\") END (\"2017-01-15\") EVERY (interval 5 day)"; + " PARTITIONS START (\"2017-01-03\") END (\"2017-01-15\") EVERY (interval 5 day)"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL, ctx); AddPartitionClause addPartitionClause = (AddPartitionClause) alterTableStmt.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") - .getTable("test_partition"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") + .getTable("test_partition"); Assert.assertNotNull(table.getPartition("p20170103")); Assert.assertNotNull(table.getPartition("p20170108")); @@ -1417,38 +1423,38 @@ public void testCatalogAddPartitions5Day() throws Exception { String dropSQL = "drop table test_partition"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @Test(expected = AnalysisException.class) public void testCatalogAddPartitionsDayConflictException() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String createSQL = "CREATE TABLE test.test_partition_exception (\n" + - " k2 DATE,\n" + - " k3 SMALLINT,\n" + - " v1 VARCHAR(2048),\n" + - " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(k2, k3)\n" + - "PARTITION BY RANGE (k2) (\n" + - " START (\"20140101\") END (\"20140104\") EVERY (INTERVAL 1 DAY)\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " k2 DATE,\n" + + " k3 SMALLINT,\n" + + " v1 VARCHAR(2048),\n" + + " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(k2, k3)\n" + + "PARTITION BY RANGE (k2) (\n" + + " START (\"20140101\") END (\"20140104\") EVERY (INTERVAL 1 DAY)\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); String alterSQL = "ALTER TABLE test_partition_exception ADD\n" + - " PARTITIONS START (\"2014-01-01\") END (\"2014-01-04\") EVERY (interval 1 day)"; + " PARTITIONS START (\"2014-01-01\") END (\"2014-01-04\") EVERY (interval 1 day)"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL, ctx); AddPartitionClause addPartitionClause = (AddPartitionClause) alterTableStmt.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_exception", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_exception", addPartitionClause); } @Test @@ -1456,33 +1462,33 @@ public void testCatalogAddPartitionsWeekWithoutCheck() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); Config.enable_create_partial_partition_in_batch = true; String createSQL = "CREATE TABLE test.test_partition_week (\n" + - " k2 DATE,\n" + - " k3 SMALLINT,\n" + - " v1 VARCHAR(2048),\n" + - " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(k2, k3)\n" + - "PARTITION BY RANGE (k2) (\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " k2 DATE,\n" + + " k3 SMALLINT,\n" + + " v1 VARCHAR(2048),\n" + + " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(k2, k3)\n" + + "PARTITION BY RANGE (k2) (\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); String alterSQL = "ALTER TABLE test_partition_week ADD\n" + - " PARTITIONS START (\"2017-03-25\") END (\"2017-04-10\") EVERY (interval 1 week)"; + " PARTITIONS START (\"2017-03-25\") END (\"2017-04-10\") EVERY (interval 1 week)"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL, ctx); AddPartitionClause addPartitionClause = (AddPartitionClause) alterTableStmt.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_week", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_week", addPartitionClause); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") - .getTable("test_partition_week"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") + .getTable("test_partition_week"); Assert.assertNotNull(table.getPartition("p2017_12")); Assert.assertNotNull(table.getPartition("p2017_13")); @@ -1490,7 +1496,7 @@ public void testCatalogAddPartitionsWeekWithoutCheck() throws Exception { String dropSQL = "drop table test_partition_week"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); Config.enable_create_partial_partition_in_batch = false; } @@ -1498,33 +1504,33 @@ public void testCatalogAddPartitionsWeekWithoutCheck() throws Exception { public void testCatalogAddPartitionsWeekWithCheck() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String createSQL = "CREATE TABLE test.test_partition_week (\n" + - " k2 DATE,\n" + - " k3 SMALLINT,\n" + - " v1 VARCHAR(2048),\n" + - " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(k2, k3)\n" + - "PARTITION BY RANGE (k2) (\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " k2 DATE,\n" + + " k3 SMALLINT,\n" + + " v1 VARCHAR(2048),\n" + + " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(k2, k3)\n" + + "PARTITION BY RANGE (k2) (\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); String alterSQL = "ALTER TABLE test_partition_week ADD\n" + - " PARTITIONS START (\"2017-03-20\") END (\"2017-04-10\") EVERY (interval 1 week)"; + " PARTITIONS START (\"2017-03-20\") END (\"2017-04-10\") EVERY (interval 1 week)"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL, ctx); AddPartitionClause addPartitionClause = (AddPartitionClause) alterTableStmt.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_week", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_week", addPartitionClause); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") - .getTable("test_partition_week"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") + .getTable("test_partition_week"); Assert.assertNotNull(table.getPartition("p2017_12")); Assert.assertNotNull(table.getPartition("p2017_13")); @@ -1532,7 +1538,7 @@ public void testCatalogAddPartitionsWeekWithCheck() throws Exception { String dropSQL = "drop table test_partition_week"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @Test @@ -1540,35 +1546,35 @@ public void testCatalogAddPartitionsMonth() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String dropSQL = "drop table if exists test_partition"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); String createSQL = "CREATE TABLE test.test_partition (\n" + - " k2 DATE,\n" + - " k3 SMALLINT,\n" + - " v1 VARCHAR(2048),\n" + - " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(k2, k3)\n" + - "PARTITION BY RANGE (k2) (\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " k2 DATE,\n" + + " k3 SMALLINT,\n" + + " v1 VARCHAR(2048),\n" + + " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(k2, k3)\n" + + "PARTITION BY RANGE (k2) (\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); String alterSQL = "ALTER TABLE test_partition ADD\n" + - " PARTITIONS START (\"2017-01-01\") END (\"2017-04-01\") EVERY (interval 1 month)"; + " PARTITIONS START (\"2017-01-01\") END (\"2017-04-01\") EVERY (interval 1 month)"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL, ctx); AddPartitionClause addPartitionClause = (AddPartitionClause) alterTableStmt.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") - .getTable("test_partition"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") + .getTable("test_partition"); Assert.assertNotNull(table.getPartition("p201701")); Assert.assertNotNull(table.getPartition("p201702")); @@ -1577,7 +1583,7 @@ public void testCatalogAddPartitionsMonth() throws Exception { dropSQL = "drop table test_partition"; dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @Test @@ -1585,35 +1591,35 @@ public void testCatalogAddPartitionsYear() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String dropSQL = "drop table if exists test_partition"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); String createSQL = "CREATE TABLE test.test_partition (\n" + - " k2 DATE,\n" + - " k3 SMALLINT,\n" + - " v1 VARCHAR(2048),\n" + - " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(k2, k3)\n" + - "PARTITION BY RANGE (k2) (\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " k2 DATE,\n" + + " k3 SMALLINT,\n" + + " v1 VARCHAR(2048),\n" + + " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(k2, k3)\n" + + "PARTITION BY RANGE (k2) (\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); String alterSQL = "ALTER TABLE test_partition ADD\n" + - " PARTITIONS START (\"2017-01-01\") END (\"2020-01-01\") EVERY (interval 1 YEAR)"; + " PARTITIONS START (\"2017-01-01\") END (\"2020-01-01\") EVERY (interval 1 YEAR)"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL, ctx); AddPartitionClause addPartitionClause = (AddPartitionClause) alterTableStmt.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") - .getTable("test_partition"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") + .getTable("test_partition"); Assert.assertNotNull(table.getPartition("p2017")); Assert.assertNotNull(table.getPartition("p2018")); @@ -1622,7 +1628,7 @@ public void testCatalogAddPartitionsYear() throws Exception { dropSQL = "drop table test_partition"; dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @Test @@ -1630,35 +1636,35 @@ public void testCatalogAddPartitionsNumber() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String dropSQL = "drop table if exists test_partition"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); String createSQL = "CREATE TABLE test.test_partition (\n" + - " k2 INT,\n" + - " k3 SMALLINT,\n" + - " v1 VARCHAR(2048),\n" + - " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(k2, k3)\n" + - "PARTITION BY RANGE (k2) (\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " k2 INT,\n" + + " k3 SMALLINT,\n" + + " v1 VARCHAR(2048),\n" + + " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(k2, k3)\n" + + "PARTITION BY RANGE (k2) (\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); String alterSQL = "ALTER TABLE test_partition ADD\n" + - " PARTITIONS START (\"1\") END (\"4\") EVERY (1)"; + " PARTITIONS START (\"1\") END (\"4\") EVERY (1)"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL, ctx); AddPartitionClause addPartitionClause = (AddPartitionClause) alterTableStmt.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") - .getTable("test_partition"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") + .getTable("test_partition"); Assert.assertNotNull(table.getPartition("p1")); Assert.assertNotNull(table.getPartition("p2")); @@ -1667,7 +1673,7 @@ public void testCatalogAddPartitionsNumber() throws Exception { dropSQL = "drop table test_partition"; dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @Test @@ -1675,41 +1681,41 @@ public void testCatalogAddPartitionsAtomicRange() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String dropSQL = "drop table if exists test_partition"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); String createSQL = "CREATE TABLE test_partition (\n" + - " k2 DATE,\n" + - " k3 SMALLINT,\n" + - " v1 VARCHAR(2048),\n" + - " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(k2, k3)\n" + - "PARTITION BY RANGE (k2) (\n" + - " START (\"20140101\") END (\"20150101\") EVERY (INTERVAL 1 YEAR)\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " k2 DATE,\n" + + " k3 SMALLINT,\n" + + " v1 VARCHAR(2048),\n" + + " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(k2, k3)\n" + + "PARTITION BY RANGE (k2) (\n" + + " START (\"20140101\") END (\"20150101\") EVERY (INTERVAL 1 YEAR)\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); try { String alterSQL = "ALTER TABLE test_partition ADD\n" + - " PARTITIONS START (\"2014-01-01\") END (\"2014-01-06\") EVERY (interval 1 day);"; + " PARTITIONS START (\"2014-01-01\") END (\"2014-01-06\") EVERY (interval 1 day);"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL, ctx); AddPartitionClause addPartitionClause = (AddPartitionClause) alterTableStmt.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); Assert.fail(); } catch (AnalysisException ex) { } - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") - .getTable("test_partition"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") + .getTable("test_partition"); Assert.assertNull(table.getPartition("p20140101")); Assert.assertNull(table.getPartition("p20140102")); @@ -1717,7 +1723,7 @@ public void testCatalogAddPartitionsAtomicRange() throws Exception { dropSQL = "drop table test_partition"; dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @@ -1725,39 +1731,39 @@ public void testCatalogAddPartitionsZeroDay() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String createSQL = "CREATE TABLE test_partition_0day (\n" + - " k2 DATE,\n" + - " k3 SMALLINT,\n" + - " v1 VARCHAR(2048),\n" + - " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(k2, k3)\n" + - "PARTITION BY RANGE (k2) (\n" + - " START (\"20140104\") END (\"20150104\") EVERY (INTERVAL 1 YEAR)\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " k2 DATE,\n" + + " k3 SMALLINT,\n" + + " v1 VARCHAR(2048),\n" + + " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(k2, k3)\n" + + "PARTITION BY RANGE (k2) (\n" + + " START (\"20140104\") END (\"20150104\") EVERY (INTERVAL 1 YEAR)\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); try { String alterSQL = "ALTER TABLE test_partition_0day ADD\n" + - " PARTITIONS START (\"2014-01-01\") END (\"2014-01-06\") EVERY (interval 0 day);"; + " PARTITIONS START (\"2014-01-01\") END (\"2014-01-06\") EVERY (interval 0 day);"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL, ctx); AddPartitionClause addPartitionClause = (AddPartitionClause) alterTableStmt.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_0day", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_0day", addPartitionClause); Assert.fail(); } catch (AnalysisException ex) { } - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") - .getTable("test_partition_0day"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") + .getTable("test_partition_0day"); Assert.assertNull(table.getPartition("p20140101")); Assert.assertNull(table.getPartition("p20140102")); @@ -1765,7 +1771,7 @@ public void testCatalogAddPartitionsZeroDay() throws Exception { String dropSQL = "drop table test_partition_0day"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @@ -1774,41 +1780,41 @@ public void testCatalogAddPartitionsWithoutPartitions() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String dropSQL = "drop table if exists test_partition"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); String createSQL = "CREATE TABLE test_partition (\n" + - " k2 DATE,\n" + - " k3 SMALLINT,\n" + - " v1 VARCHAR(2048),\n" + - " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(k2, k3)\n" + - "PARTITION BY RANGE (k2) (\n" + - " START (\"20140101\") END (\"20150101\") EVERY (INTERVAL 1 YEAR)\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " k2 DATE,\n" + + " k3 SMALLINT,\n" + + " v1 VARCHAR(2048),\n" + + " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(k2, k3)\n" + + "PARTITION BY RANGE (k2) (\n" + + " START (\"20140101\") END (\"20150101\") EVERY (INTERVAL 1 YEAR)\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); String alterSQL = "ALTER TABLE test_partition ADD\n" + - " START (\"2015-01-01\") END (\"2015-01-06\") EVERY (interval 1 day);"; + " START (\"2015-01-01\") END (\"2015-01-06\") EVERY (interval 1 day);"; try { AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL, ctx); AddPartitionClause addPartitionClause = (AddPartitionClause) alterTableStmt.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); Assert.fail(); } catch (AnalysisException ex) { } - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") - .getTable("test_partition"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") + .getTable("test_partition"); Assert.assertNull(table.getPartition("p20140101")); Assert.assertNull(table.getPartition("p20140102")); @@ -1816,160 +1822,160 @@ public void testCatalogAddPartitionsWithoutPartitions() throws Exception { dropSQL = "drop table test_partition"; dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @Test public void testCatalogAddPartitionsIfNotExist() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String createSQL = "CREATE TABLE test_partition_exists (\n" + - " k2 DATE,\n" + - " k3 SMALLINT,\n" + - " v1 VARCHAR(2048),\n" + - " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(k2, k3)\n" + - "PARTITION BY RANGE (k2) (\n" + - " START (\"20140101\") END (\"20150101\") EVERY (INTERVAL 1 YEAR)\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " k2 DATE,\n" + + " k3 SMALLINT,\n" + + " v1 VARCHAR(2048),\n" + + " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(k2, k3)\n" + + "PARTITION BY RANGE (k2) (\n" + + " START (\"20140101\") END (\"20150101\") EVERY (INTERVAL 1 YEAR)\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); String alterSQL = - "ALTER TABLE test_partition_exists ADD PARTITION IF NOT EXISTS p20210701 VALUES LESS THAN ('2021-07-01')"; + "ALTER TABLE test_partition_exists ADD PARTITION IF NOT EXISTS p20210701 VALUES LESS THAN ('2021-07-01')"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL, ctx); AddPartitionClause addPartitionClause = (AddPartitionClause) alterTableStmt.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_exists", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_exists", addPartitionClause); String alterSQL2 = - "ALTER TABLE test_partition_exists ADD PARTITION IF NOT EXISTS p20210701 VALUES LESS THAN ('2021-07-02')"; + "ALTER TABLE test_partition_exists ADD PARTITION IF NOT EXISTS p20210701 VALUES LESS THAN ('2021-07-02')"; AlterTableStmt alterTableStmt2 = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL2, ctx); AddPartitionClause addPartitionClause2 = (AddPartitionClause) alterTableStmt2.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_exists", addPartitionClause2); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_exists", addPartitionClause2); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") - .getTable("test_partition_exists"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") + .getTable("test_partition_exists"); Assert.assertEquals(2, table.getPartitions().size()); String dropSQL = "drop table test_partition_exists"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @Test public void testCatalogAddPartitionsSameNameShouldNotThrowError() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String createSQL = "CREATE TABLE test_partition_exists2 (\n" + - " k2 DATE,\n" + - " k3 SMALLINT,\n" + - " v1 VARCHAR(2048),\n" + - " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(k2, k3)\n" + - "PARTITION BY RANGE (k2) (\n" + - " START (\"20140101\") END (\"20150101\") EVERY (INTERVAL 1 YEAR)\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " k2 DATE,\n" + + " k3 SMALLINT,\n" + + " v1 VARCHAR(2048),\n" + + " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(k2, k3)\n" + + "PARTITION BY RANGE (k2) (\n" + + " START (\"20140101\") END (\"20150101\") EVERY (INTERVAL 1 YEAR)\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); String alterSQL = - "ALTER TABLE test_partition_exists2 ADD PARTITION IF NOT EXISTS p20210701 VALUES LESS THAN ('2021-07-01')"; + "ALTER TABLE test_partition_exists2 ADD PARTITION IF NOT EXISTS p20210701 VALUES LESS THAN ('2021-07-01')"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL, ctx); AddPartitionClause addPartitionClause = (AddPartitionClause) alterTableStmt.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_exists2", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_exists2", addPartitionClause); String alterSQL2 = - "ALTER TABLE test_partition_exists2 ADD PARTITION IF NOT EXISTS p20210701 VALUES LESS THAN ('2021-07-01')"; + "ALTER TABLE test_partition_exists2 ADD PARTITION IF NOT EXISTS p20210701 VALUES LESS THAN ('2021-07-01')"; AlterTableStmt alterTableStmt2 = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL2, ctx); AddPartitionClause addPartitionClause2 = (AddPartitionClause) alterTableStmt2.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_exists2", addPartitionClause2); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_exists2", addPartitionClause2); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") - .getTable("test_partition_exists2"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") + .getTable("test_partition_exists2"); Assert.assertEquals(2, table.getPartitions().size()); String dropSQL = "drop table test_partition_exists2"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @Test(expected = AnalysisException.class) public void testCatalogAddPartitionsShouldThrowError() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String createSQL = "CREATE TABLE test_partition_exists3 (\n" + - " k2 DATE,\n" + - " k3 SMALLINT,\n" + - " v1 VARCHAR(2048),\n" + - " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(k2, k3)\n" + - "PARTITION BY RANGE (k2) (\n" + - " START (\"20140101\") END (\"20150101\") EVERY (INTERVAL 1 YEAR)\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " k2 DATE,\n" + + " k3 SMALLINT,\n" + + " v1 VARCHAR(2048),\n" + + " v2 DATETIME DEFAULT \"2014-02-04 15:36:00\"\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(k2, k3)\n" + + "PARTITION BY RANGE (k2) (\n" + + " START (\"20140101\") END (\"20150101\") EVERY (INTERVAL 1 YEAR)\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); String alterSQL = "ALTER TABLE test_partition_exists3 ADD PARTITION p20210701 VALUES LESS THAN ('2021-07-01')"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL, ctx); AddPartitionClause addPartitionClause = (AddPartitionClause) alterTableStmt.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_exists3", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_exists3", addPartitionClause); String alterSQL2 = "ALTER TABLE test_partition_exists3 ADD PARTITION p20210701 VALUES LESS THAN ('2021-07-01')"; AlterTableStmt alterTableStmt2 = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL2, ctx); AddPartitionClause addPartitionClause2 = (AddPartitionClause) alterTableStmt2.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_exists3", addPartitionClause2); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_exists3", addPartitionClause2); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") - .getTable("test_partition_exists3"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") + .getTable("test_partition_exists3"); Assert.assertEquals(2, ((OlapTable) table).getPartitions().size()); String dropSQL = "drop table test_partition_exists3"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @Test public void testRenameDb() throws Exception { String createUserSql = "CREATE USER 'testuser' IDENTIFIED BY ''"; CreateUserStmt createUserStmt = - (CreateUserStmt) UtFrameUtils.parseStmtWithNewParser(createUserSql, starRocksAssert.getCtx()); + (CreateUserStmt) UtFrameUtils.parseStmtWithNewParser(createUserSql, starRocksAssert.getCtx()); AuthenticationMgr authenticationManager = - starRocksAssert.getCtx().getGlobalStateMgr().getAuthenticationMgr(); + starRocksAssert.getCtx().getGlobalStateMgr().getAuthenticationMgr(); authenticationManager.createUser(createUserStmt); String sql = "grant ALTER on database test to testuser"; DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser(sql, starRocksAssert.getCtx()), - starRocksAssert.getCtx()); + starRocksAssert.getCtx()); UserIdentity testUser = new UserIdentity("testuser", "%"); testUser.analyze(); @@ -1977,13 +1983,13 @@ public void testRenameDb() throws Exception { starRocksAssert.getCtx().setQualifiedUser("testuser"); starRocksAssert.getCtx().setCurrentUserIdentity(testUser); starRocksAssert.getCtx().setCurrentRoleIds( - GlobalStateMgr.getCurrentState().getAuthorizationMgr().getRoleIdsByUser(testUser)); + GlobalStateMgr.getCurrentState().getAuthorizationMgr().getRoleIdsByUser(testUser)); starRocksAssert.getCtx().setRemoteIP("%"); starRocksAssert.withDatabase("test_to_rename"); String renameDb = "alter database test_to_rename rename test_to_rename_2"; AlterDatabaseRenameStatement renameDbStmt = - (AlterDatabaseRenameStatement) UtFrameUtils.parseStmtWithNewParser(renameDb, starRocksAssert.getCtx()); + (AlterDatabaseRenameStatement) UtFrameUtils.parseStmtWithNewParser(renameDb, starRocksAssert.getCtx()); DDLStmtExecutor.execute(renameDbStmt, starRocksAssert.getCtx()); } @@ -1991,38 +1997,38 @@ public void testRenameDb() throws Exception { public void testAddMultiItemListPartition() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String createSQL = "CREATE TABLE test.test_partition (\n" + - " id BIGINT,\n" + - " age SMALLINT,\n" + - " dt VARCHAR(10) not null,\n" + - " province VARCHAR(64) not null\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(id)\n" + - "PARTITION BY LIST (dt,province) (\n" + - " PARTITION p1 VALUES IN ((\"2022-04-01\", \"beijing\"),(\"2022-04-01\", \"chongqing\")),\n" + - " PARTITION p2 VALUES IN ((\"2022-04-01\", \"shanghai\")) \n" + - ")\n" + - "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " id BIGINT,\n" + + " age SMALLINT,\n" + + " dt VARCHAR(10) not null,\n" + + " province VARCHAR(64) not null\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(id)\n" + + "PARTITION BY LIST (dt,province) (\n" + + " PARTITION p1 VALUES IN ((\"2022-04-01\", \"beijing\"),(\"2022-04-01\", \"chongqing\")),\n" + + " PARTITION p2 VALUES IN ((\"2022-04-01\", \"shanghai\")) \n" + + ")\n" + + "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); List values = Lists.newArrayList("2022-04-01", "shandong"); List> multiValues = Lists.newArrayList(); multiValues.add(values); PartitionDesc partitionDesc = new MultiItemListPartitionDesc(false, "p3", multiValues, new HashMap<>()); AddPartitionClause addPartitionClause = new AddPartitionClause(partitionDesc, null, new HashMap<>(), false); - OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") - .getTable("test_partition"); + OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test") + .getTable("test_partition"); AlterTableClauseAnalyzer analyzer = new AlterTableClauseAnalyzer(table); analyzer.analyze(Util.getOrCreateConnectContext(), addPartitionClause); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); ListPartitionInfo partitionInfo = (ListPartitionInfo) table.getPartitionInfo(); Map>> idToValues = partitionInfo.getIdToMultiValues(); @@ -2034,62 +2040,63 @@ public void testAddMultiItemListPartition() throws Exception { String dropSQL = "drop table test_partition"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @Test(expected = AlterJobException.class) public void testModifyPartitionBucket() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String createSQL = "CREATE TABLE modify_bucket (\n" + - " chuangyi varchar(65533) NULL COMMENT \"创意\",\n" + - " guanggao varchar(65533) NULL COMMENT \"广告\"\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(chuangyi, guanggao)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(chuangyi, guanggao) BUCKETS 3\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ");"; + " chuangyi varchar(65533) NULL COMMENT \"创意\",\n" + + " guanggao varchar(65533) NULL COMMENT \"广告\"\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(chuangyi, guanggao)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(chuangyi, guanggao) BUCKETS 3\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ");"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); String stmt = "alter table modify_bucket set (\"dynamic_partition.buckets\" = \"10\");\n"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(stmt, starRocksAssert.getCtx()); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(ctx, alterTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(ctx, alterTableStmt); } @Test public void testAddSingleItemListPartition() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String createSQL = "CREATE TABLE test.test_partition (\n" + - " id BIGINT,\n" + - " age SMALLINT,\n" + - " dt VARCHAR(10),\n" + - " province VARCHAR(64) not null\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(id)\n" + - "PARTITION BY LIST (province) (\n" + - " PARTITION p1 VALUES IN (\"beijing\",\"chongqing\") ,\n" + - " PARTITION p2 VALUES IN (\"guangdong\") \n" + - ")\n" + - "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " id BIGINT,\n" + + " age SMALLINT,\n" + + " dt VARCHAR(10),\n" + + " province VARCHAR(64) not null\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(id)\n" + + "PARTITION BY LIST (province) (\n" + + " PARTITION p1 VALUES IN (\"beijing\",\"chongqing\") ,\n" + + " PARTITION p2 VALUES IN (\"guangdong\") \n" + + ")\n" + + "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); List values = Lists.newArrayList("shanxi", "shanghai"); PartitionDesc partitionDesc = new SingleItemListPartitionDesc(false, "p3", values, new HashMap<>()); - AddPartitionClause addPartitionClause = new AddPartitionClause(partitionDesc, null, new HashMap<>(), false); + AddPartitionClause addPartitionClause = + new AddPartitionClause(partitionDesc, null, new HashMap<>(), false); OlapTable table = - (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "test_partition"); + (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTable(db.getFullName(), "test_partition"); AlterTableClauseAnalyzer analyzer = new AlterTableClauseAnalyzer(table); analyzer.analyze(Util.getOrCreateConnectContext(), addPartitionClause); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition", addPartitionClause); ListPartitionInfo partitionInfo = (ListPartitionInfo) table.getPartitionInfo(); Map> idToValues = partitionInfo.getIdToValues(); @@ -2100,33 +2107,33 @@ public void testAddSingleItemListPartition() throws Exception { String dropSQL = "drop table test_partition"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @Test public void testSingleItemPartitionPersistInfo() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String createSQL = "CREATE TABLE test.test_partition (\n" + - " id BIGINT,\n" + - " age SMALLINT,\n" + - " dt VARCHAR(10),\n" + - " province VARCHAR(64) not null\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(id)\n" + - "PARTITION BY LIST (province) (\n" + - " PARTITION p1 VALUES IN (\"beijing\",\"chongqing\") \n" + - ")\n" + - "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " id BIGINT,\n" + + " age SMALLINT,\n" + + " dt VARCHAR(10),\n" + + " province VARCHAR(64) not null\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(id)\n" + + "PARTITION BY LIST (province) (\n" + + " PARTITION p1 VALUES IN (\"beijing\",\"chongqing\") \n" + + ")\n" + + "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); - OlapTable table = - (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "test_partition"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); + OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .getTable(db.getFullName(), "test_partition"); ListPartitionInfo partitionInfo = (ListPartitionInfo) table.getPartitionInfo(); long dbId = db.getId(); @@ -2139,8 +2146,8 @@ public void testSingleItemPartitionPersistInfo() throws Exception { boolean isInMemory = partitionInfo.getIsInMemory(partitionId); boolean isTempPartition = false; ListPartitionPersistInfo partitionPersistInfoOut = new ListPartitionPersistInfo(dbId, tableId, partition, - dataProperty, replicationNum, isInMemory, isTempPartition, values, new ArrayList<>(), - partitionInfo.getDataCacheInfo(partitionId)); + dataProperty, replicationNum, isInMemory, isTempPartition, values, new ArrayList<>(), + partitionInfo.getDataCacheInfo(partitionId)); // write log File file = new File("./test_serial.log"); @@ -2177,7 +2184,7 @@ public void testSingleItemPartitionPersistInfo() throws Exception { String dropSQL = "drop table test_partition"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); file.delete(); } @@ -2185,26 +2192,26 @@ public void testSingleItemPartitionPersistInfo() throws Exception { public void testMultiItemPartitionPersistInfo() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String createSQL = "CREATE TABLE test.test_partition (\n" + - " id BIGINT,\n" + - " age SMALLINT,\n" + - " dt VARCHAR(10) not null,\n" + - " province VARCHAR(64) not null\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(id)\n" + - "PARTITION BY LIST (dt , province) (\n" + - " PARTITION p1 VALUES IN ((\"2022-04-01\", \"beijing\"),(\"2022-04-01\", \"chongqing\"))\n" + - ")\n" + - "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " id BIGINT,\n" + + " age SMALLINT,\n" + + " dt VARCHAR(10) not null,\n" + + " province VARCHAR(64) not null\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(id)\n" + + "PARTITION BY LIST (dt , province) (\n" + + " PARTITION p1 VALUES IN ((\"2022-04-01\", \"beijing\"),(\"2022-04-01\", \"chongqing\"))\n" + + ")\n" + + "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); - OlapTable table = - (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "test_partition"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); + OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .getTable(db.getFullName(), "test_partition"); ListPartitionInfo partitionInfo = (ListPartitionInfo) table.getPartitionInfo(); long dbId = db.getId(); @@ -2217,8 +2224,8 @@ public void testMultiItemPartitionPersistInfo() throws Exception { boolean isInMemory = partitionInfo.getIsInMemory(partitionId); boolean isTempPartition = false; ListPartitionPersistInfo partitionPersistInfoOut = new ListPartitionPersistInfo(dbId, tableId, partition, - dataProperty, replicationNum, isInMemory, isTempPartition, new ArrayList<>(), multiValues, - partitionInfo.getDataCacheInfo(partitionId)); + dataProperty, replicationNum, isInMemory, isTempPartition, new ArrayList<>(), multiValues, + partitionInfo.getDataCacheInfo(partitionId)); // write log File file = new File("./test_serial.log"); @@ -2259,7 +2266,7 @@ public void testMultiItemPartitionPersistInfo() throws Exception { String dropSQL = "drop table test_partition"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); file.delete(); } @@ -2267,61 +2274,61 @@ public void testMultiItemPartitionPersistInfo() throws Exception { public void testAddSingleListPartitionSamePartitionNameShouldThrowError() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String createSQL = "CREATE TABLE test.test_partition_1 (\n" + - " id BIGINT,\n" + - " age SMALLINT,\n" + - " dt VARCHAR(10),\n" + - " province VARCHAR(64) not null\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(id)\n" + - "PARTITION BY LIST (province) (\n" + - " PARTITION p1 VALUES IN (\"beijing\",\"chongqing\") ,\n" + - " PARTITION p2 VALUES IN (\"guangdong\") \n" + - ")\n" + - "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " id BIGINT,\n" + + " age SMALLINT,\n" + + " dt VARCHAR(10),\n" + + " province VARCHAR(64) not null\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(id)\n" + + "PARTITION BY LIST (province) (\n" + + " PARTITION p1 VALUES IN (\"beijing\",\"chongqing\") ,\n" + + " PARTITION p2 VALUES IN (\"guangdong\") \n" + + ")\n" + + "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); List values = Lists.newArrayList("shanxi", "heilongjiang"); PartitionDesc partitionDesc = new SingleItemListPartitionDesc(false, "p1", values, new HashMap<>()); AddPartitionClause addPartitionClause = new AddPartitionClause(partitionDesc, null, new HashMap<>(), false); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "test_partition_1"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTable(db.getFullName(), "test_partition_1"); AlterTableClauseAnalyzer analyzer = new AlterTableClauseAnalyzer(table); analyzer.analyze(Util.getOrCreateConnectContext(), addPartitionClause); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_1", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_1", addPartitionClause); } @Test(expected = SemanticException.class) public void testAddMultiListPartitionSamePartitionNameShouldThrowError() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String createSQL = "CREATE TABLE test.test_partition_2 (\n" + - " id BIGINT,\n" + - " age SMALLINT,\n" + - " dt VARCHAR(10) not null,\n" + - " province VARCHAR(64) not null\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(id)\n" + - "PARTITION BY LIST (dt,province) (\n" + - " PARTITION p1 VALUES IN ((\"2022-04-01\", \"beijing\"),(\"2022-04-01\", \"chongqing\")),\n" + - " PARTITION p2 VALUES IN ((\"2022-04-01\", \"shanghai\")) \n" + - ")\n" + - "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " id BIGINT,\n" + + " age SMALLINT,\n" + + " dt VARCHAR(10) not null,\n" + + " province VARCHAR(64) not null\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(id)\n" + + "PARTITION BY LIST (dt,province) (\n" + + " PARTITION p1 VALUES IN ((\"2022-04-01\", \"beijing\"),(\"2022-04-01\", \"chongqing\")),\n" + + " PARTITION p2 VALUES IN ((\"2022-04-01\", \"shanghai\")) \n" + + ")\n" + + "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); List values1 = Lists.newArrayList("2022-04-01", "beijing"); List values2 = Lists.newArrayList("2022-04-01", "chongqing"); @@ -2331,41 +2338,41 @@ public void testAddMultiListPartitionSamePartitionNameShouldThrowError() throws PartitionDesc partitionDesc = new MultiItemListPartitionDesc(false, "p1", multiValues, new HashMap<>()); AddPartitionClause addPartitionClause = new AddPartitionClause(partitionDesc, null, new HashMap<>(), false); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "test_partition_2"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTable(db.getFullName(), "test_partition_2"); AlterTableClauseAnalyzer analyzer = new AlterTableClauseAnalyzer(table); analyzer.analyze(Util.getOrCreateConnectContext(), addPartitionClause); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_2", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "test_partition_2", addPartitionClause); } @Test(expected = SemanticException.class) public void testAddSingleListPartitionSamePartitionValueShouldThrowError() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String createSQL = "CREATE TABLE test.test_partition_3 (\n" + - " id BIGINT,\n" + - " age SMALLINT,\n" + - " dt VARCHAR(10),\n" + - " province VARCHAR(64) not null\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(id)\n" + - "PARTITION BY LIST (province) (\n" + - " PARTITION p1 VALUES IN (\"beijing\",\"chongqing\") ,\n" + - " PARTITION p2 VALUES IN (\"guangdong\") \n" + - ")\n" + - "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " id BIGINT,\n" + + " age SMALLINT,\n" + + " dt VARCHAR(10),\n" + + " province VARCHAR(64) not null\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(id)\n" + + "PARTITION BY LIST (province) (\n" + + " PARTITION p1 VALUES IN (\"beijing\",\"chongqing\") ,\n" + + " PARTITION p2 VALUES IN (\"guangdong\") \n" + + ")\n" + + "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); List values = Lists.newArrayList("beijing", "chongqing"); PartitionDesc partitionDesc = new SingleItemListPartitionDesc(false, "p3", values, new HashMap<>()); AddPartitionClause addPartitionClause = new AddPartitionClause(partitionDesc, null, new HashMap<>(), false); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "test_partition_3"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTable(db.getFullName(), "test_partition_3"); AlterTableClauseAnalyzer analyzer = new AlterTableClauseAnalyzer(table); analyzer.analyze(Util.getOrCreateConnectContext(), addPartitionClause); } @@ -2374,25 +2381,25 @@ public void testAddSingleListPartitionSamePartitionValueShouldThrowError() throw public void testAddMultiItemListPartitionSamePartitionValueShouldThrowError() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String createSQL = "CREATE TABLE test.test_partition_4 (\n" + - " id BIGINT,\n" + - " age SMALLINT,\n" + - " dt VARCHAR(10) not null,\n" + - " province VARCHAR(64) not null\n" + - ")\n" + - "ENGINE=olap\n" + - "DUPLICATE KEY(id)\n" + - "PARTITION BY LIST (dt, province) (\n" + - " PARTITION p1 VALUES IN ((\"2022-04-01\", \"beijing\"),(\"2022-04-01\", \"chongqing\")),\n" + - " PARTITION p2 VALUES IN ((\"2022-04-01\", \"shanghai\")) \n" + - ")\n" + - "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"1\"\n" + - ")"; + " id BIGINT,\n" + + " age SMALLINT,\n" + + " dt VARCHAR(10) not null,\n" + + " province VARCHAR(64) not null\n" + + ")\n" + + "ENGINE=olap\n" + + "DUPLICATE KEY(id)\n" + + "PARTITION BY LIST (dt, province) (\n" + + " PARTITION p1 VALUES IN ((\"2022-04-01\", \"beijing\"),(\"2022-04-01\", \"chongqing\")),\n" + + " PARTITION p2 VALUES IN ((\"2022-04-01\", \"shanghai\")) \n" + + ")\n" + + "DISTRIBUTED BY HASH(id) BUCKETS 10\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"1\"\n" + + ")"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); List values = Lists.newArrayList("2022-04-01", "shanghai"); List> multiValues = Lists.newArrayList(); @@ -2400,7 +2407,7 @@ public void testAddMultiItemListPartitionSamePartitionValueShouldThrowError() th PartitionDesc partitionDesc = new MultiItemListPartitionDesc(false, "p3", multiValues, new HashMap<>()); AddPartitionClause addPartitionClause = new AddPartitionClause(partitionDesc, null, new HashMap<>(), false); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "test_partition_4"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTable(db.getFullName(), "test_partition_4"); AlterTableClauseAnalyzer analyzer = new AlterTableClauseAnalyzer(table); analyzer.analyze(Util.getOrCreateConnectContext(), addPartitionClause); } @@ -2408,16 +2415,16 @@ public void testAddMultiItemListPartitionSamePartitionValueShouldThrowError() th @Test public void testCatalogAddColumn() throws Exception { starRocksAssert.withDatabase("test").useDatabase("test") - .withTable("CREATE TABLE test.tbl1\n" + - "(\n" + - " k1 date,\n" + - " v1 int \n" + - ")\n" + - "DUPLICATE KEY(`k1`)" + - "DISTRIBUTED BY HASH (k1) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');"); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); - OlapTable tbl = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "tbl1"); + .withTable("CREATE TABLE test.tbl1\n" + + "(\n" + + " k1 date,\n" + + " v1 int \n" + + ")\n" + + "DUPLICATE KEY(`k1`)" + + "DISTRIBUTED BY HASH (k1) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test"); + OlapTable tbl = (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTable(db.getFullName(), "tbl1"); String stmt = "alter table test.tbl1 add column k2 int"; alterTableWithNewParser(stmt, false); @@ -2439,7 +2446,7 @@ public void testCatalogAddColumn() throws Exception { @Test public void testCatalogAddColumns() throws Exception { String stmt = "alter table test.tbl1 add column (`col1` int(11) not null default \"0\" comment \"\", " - + "`col2` int(11) not null default \"0\" comment \"\") in `testTable`;"; + + "`col2` int(11) not null default \"0\" comment \"\") in `testTable`;"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(stmt, starRocksAssert.getCtx()); AddColumnsClause clause = (AddColumnsClause) alterTableStmt.getAlterClauseList().get(0); Assert.assertEquals(2, clause.getColumns().size()); @@ -2447,7 +2454,7 @@ public void testCatalogAddColumns() throws Exception { Assert.assertEquals("testTable", clause.getRollupName()); stmt = "alter table test.tbl1 add column (`col1` int(11) not null default \"0\" comment \"\", " - + "`col2` int(11) not null default \"0\" comment \"\");"; + + "`col2` int(11) not null default \"0\" comment \"\");"; alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(stmt, starRocksAssert.getCtx()); clause = (AddColumnsClause) alterTableStmt.getAlterClauseList().get(0); Assert.assertEquals(null, clause.getRollupName()); @@ -2458,35 +2465,35 @@ public void testCreateTemporaryPartitionInBatch() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); starRocksAssert.withDatabase("test2"); String createSQL = "CREATE TABLE test2.site_access(\n" + - " event_day datetime,\n" + - " site_id INT DEFAULT '10',\n" + - " city_code VARCHAR(100),\n" + - " user_name VARCHAR(32) DEFAULT '',\n" + - " pv BIGINT DEFAULT '0'\n" + - ")\n" + - "DUPLICATE KEY(event_day, site_id, city_code, user_name)\n" + - "PARTITION BY date_trunc('day', event_day)(\n" + - " START (\"2023-03-27\") END (\"2023-03-30\") EVERY (INTERVAL 1 day)\n" + - ")\n" + - "DISTRIBUTED BY HASH(event_day, site_id) BUCKETS 32\n" + - "PROPERTIES(\n" + - " \"replication_num\" = \"1\"\n" + - ");"; + " event_day datetime,\n" + + " site_id INT DEFAULT '10',\n" + + " city_code VARCHAR(100),\n" + + " user_name VARCHAR(32) DEFAULT '',\n" + + " pv BIGINT DEFAULT '0'\n" + + ")\n" + + "DUPLICATE KEY(event_day, site_id, city_code, user_name)\n" + + "PARTITION BY date_trunc('day', event_day)(\n" + + " START (\"2023-03-27\") END (\"2023-03-30\") EVERY (INTERVAL 1 day)\n" + + ")\n" + + "DISTRIBUTED BY HASH(event_day, site_id) BUCKETS 32\n" + + "PROPERTIES(\n" + + " \"replication_num\" = \"1\"\n" + + ");"; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createSQL, ctx); StarRocksAssert.utCreateTableWithRetry(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test2"); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test2"); String sql = "alter table test2.site_access add TEMPORARY partitions " + - "START (\"2023-03-27\") END (\"2023-03-30\") EVERY (INTERVAL 1 day);"; + "START (\"2023-03-27\") END (\"2023-03-30\") EVERY (INTERVAL 1 day);"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, starRocksAssert.getCtx()); AddPartitionClause addPartitionClause = (AddPartitionClause) alterTableStmt.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() - .addPartitions(Util.getOrCreateConnectContext(), db, "site_access", addPartitionClause); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .addPartitions(Util.getOrCreateConnectContext(), db, "site_access", addPartitionClause); - Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test2") - .getTable("site_access"); + Table table = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test2") + .getTable("site_access"); OlapTable olapTable = (OlapTable) table; PartitionInfo partitionInfo = olapTable.getPartitionInfo(); RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo; @@ -2497,14 +2504,14 @@ public void testCreateTemporaryPartitionInBatch() throws Exception { @Test public void testCatalogDropColumn() throws Exception { starRocksAssert.withDatabase("test").useDatabase("test") - .withTable("CREATE TABLE test.tbl1\n" + - "(\n" + - " k1 date,\n" + - " k2 int,\n" + - " v1 int sum\n" + - ")\n" + - "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + - "PROPERTIES('replication_num' = '1');"); + .withTable("CREATE TABLE test.tbl1\n" + + "(\n" + + " k1 date,\n" + + " k2 int,\n" + + " v1 int sum\n" + + ")\n" + + "DISTRIBUTED BY HASH(k2) BUCKETS 3\n" + + "PROPERTIES('replication_num' = '1');"); String stmt = "alter table test.tbl1 drop column k2 from `testRollup`"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(stmt, starRocksAssert.getCtx()); DropColumnClause clause = (DropColumnClause) alterTableStmt.getAlterClauseList().get(0); @@ -2605,9 +2612,9 @@ public Table getTable(String tableName) { TruncatePartitionClause clause = new TruncatePartitionClause(partitionNames); cList.add(clause); AlterJobMgr alter = new AlterJobMgr( - new SchemaChangeHandler(), - new MaterializedViewHandler(), - new SystemHandler()); + new SchemaChangeHandler(), + new MaterializedViewHandler(), + new SystemHandler()); TableName tableName = new TableName("test_db", "test_table"); AlterTableStmt stmt = new AlterTableStmt(tableName, cList); DDLStmtExecutor.execute(stmt, starRocksAssert.getCtx()); @@ -2626,69 +2633,69 @@ public void testAutoPartitionTableUnsupported() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String sql = "ALTER TABLE site_access_date_trunc ADD PARTITION p20210101 VALUES [(\"2021-01-01\"), (\"2021-01-02\"));"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(ctx, alterTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(ctx, alterTableStmt); } @Test(expected = AnalysisException.class) public void testAutoPartitionTableUnsupported2() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String sql = "ALTER TABLE site_access_time_slice\n" + - "ADD PARTITIONS START (\"2022-05-01\") END (\"2022-05-03\") EVERY (INTERVAL 1 day)"; + "ADD PARTITIONS START (\"2022-05-01\") END (\"2022-05-03\") EVERY (INTERVAL 1 day)"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(ctx, alterTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(ctx, alterTableStmt); } @Test(expected = AnalysisException.class) public void testAutoPartitionTableUnsupported3() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String sql = "ALTER TABLE site_access_date_trunc\n" + - "ADD PARTITIONS START (\"2022-05-01\") END (\"2022-05-03\") EVERY (INTERVAL 2 day)"; + "ADD PARTITIONS START (\"2022-05-01\") END (\"2022-05-03\") EVERY (INTERVAL 2 day)"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(ctx, alterTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(ctx, alterTableStmt); } @Test public void testAlterMvWithResourceGroup() throws Exception { starRocksAssert.executeResourceGroupDdlSql("create resource group if not exists mv_rg" + - " with (" + - " 'cpu_core_limit' = '10'," + - " 'mem_limit' = '20%'," + - " 'concurrency_limit' = '11'," + - " 'type' = 'mv'" + - " );"); + " with (" + + " 'cpu_core_limit' = '10'," + + " 'mem_limit' = '20%'," + + " 'concurrency_limit' = '11'," + + " 'type' = 'mv'" + + " );"); starRocksAssert.useDatabase("test") - .withMaterializedView("CREATE MATERIALIZED VIEW `mv2` (a comment \"a1\", b comment \"b2\", c)\n" + - "COMMENT \"MATERIALIZED_VIEW\"\n" + - "DISTRIBUTED BY HASH(a) BUCKETS 12\n" + - "REFRESH ASYNC\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"replicated_storage\" = \"true\",\n" + - "\"resource_group\" = \"mv_rg\",\n" + - "\"storage_medium\" = \"HDD\"\n" + - ")\n" + - "AS SELECT k1, k2, v1 from test.tbl1"); + .withMaterializedView("CREATE MATERIALIZED VIEW `mv2` (a comment \"a1\", b comment \"b2\", c)\n" + + "COMMENT \"MATERIALIZED_VIEW\"\n" + + "DISTRIBUTED BY HASH(a) BUCKETS 12\n" + + "REFRESH ASYNC\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"replicated_storage\" = \"true\",\n" + + "\"resource_group\" = \"mv_rg\",\n" + + "\"storage_medium\" = \"HDD\"\n" + + ")\n" + + "AS SELECT k1, k2, v1 from test.tbl1"); MaterializedView mv = - (MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test").getTable("mv2"); + (MaterializedView) GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("test").getTable("mv2"); Assert.assertEquals("mv_rg", mv.getTableProperty().getResourceGroup()); ConnectContext ctx = starRocksAssert.getCtx(); String sql = "ALTER MATERIALIZED VIEW mv2\n" + - "set (\"resource_group\" =\"\" )"; + "set (\"resource_group\" =\"\" )"; AlterMaterializedViewStmt alterTableStmt = (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterMaterializedView(alterTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterMaterializedView(alterTableStmt); Assert.assertEquals("", mv.getTableProperty().getResourceGroup()); sql = "ALTER MATERIALIZED VIEW mv2\n" + - "set (\"resource_group\" =\"not_exist_rg\" )"; + "set (\"resource_group\" =\"not_exist_rg\" )"; AlterMaterializedViewStmt alterTableStmt2 = - (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); + (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); Assert.assertThrows("resource_group not_exist_rg does not exist.", - SemanticException.class, - () -> GlobalStateMgr.getCurrentState().getLocalMetastore().alterMaterializedView(alterTableStmt2)); + SemanticException.class, + () -> GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterMaterializedView(alterTableStmt2)); sql = "ALTER MATERIALIZED VIEW mv2\n" + - "set (\"resource_group\" =\"mv_rg\" )"; + "set (\"resource_group\" =\"mv_rg\" )"; AlterMaterializedViewStmt alterTableStmt3 = - (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterMaterializedView(alterTableStmt3); + (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterMaterializedView(alterTableStmt3); Assert.assertEquals("mv_rg", mv.getTableProperty().getResourceGroup()); new MockUp() { @@ -2699,31 +2706,31 @@ public Warehouse getWarehouse(String warehouseName) { }; sql = "ALTER MATERIALIZED VIEW mv2 set (\"warehouse\" = \"w1\")"; AlterMaterializedViewStmt alterTableStmt4 = - (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterMaterializedView(alterTableStmt4); + (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterMaterializedView(alterTableStmt4); Assert.assertEquals(1L, mv.getWarehouseId()); } @Test(expected = ErrorReportException.class) public void testAlterListPartitionUseBatchBuildPartition() throws Exception { starRocksAssert.useDatabase("test").withTable("CREATE TABLE t2 (\n" + - " dt datetime not null,\n" + - " user_id bigint not null,\n" + - " recharge_money decimal(32,2) not null, \n" + - " province varchar(20) not null,\n" + - " id varchar(20) not null\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(dt)\n" + - "PARTITION BY (dt)\n" + - "DISTRIBUTED BY HASH(`dt`) BUCKETS 10 \n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"in_memory\" = \"false\"\n" + - ");"); + " dt datetime not null,\n" + + " user_id bigint not null,\n" + + " recharge_money decimal(32,2) not null, \n" + + " province varchar(20) not null,\n" + + " id varchar(20) not null\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(dt)\n" + + "PARTITION BY (dt)\n" + + "DISTRIBUTED BY HASH(`dt`) BUCKETS 10 \n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"in_memory\" = \"false\"\n" + + ");"); ConnectContext ctx = starRocksAssert.getCtx(); String sql = "ALTER TABLE t2 ADD PARTITIONS START (\"2021-01-04\") END (\"2021-01-06\") EVERY (INTERVAL 1 DAY);"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(ctx, alterTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(ctx, alterTableStmt); } @Test @@ -2733,25 +2740,25 @@ public void testAlterForeignKey() throws Exception { { // inner table starRocksAssert.useDatabase("test").withMaterializedView("create materialized view if not exists `fk_mv_1` " + - "refresh manual " + - "as " + - "select t1.event_day, t1.site_id, t2.user_name " + - "from site_access_date_trunc t1 join site_access_time_slice t2 " + - "on t1.site_id = t2.site_id"); + "refresh manual " + + "as " + + "select t1.event_day, t1.site_id, t2.user_name " + + "from site_access_date_trunc t1 join site_access_time_slice t2 " + + "on t1.site_id = t2.site_id"); connectContext.executeSql("alter materialized view fk_mv_1 set " + - "( 'unique_constraints'='site_access_date_trunc.site_id'); "); + "( 'unique_constraints'='site_access_date_trunc.site_id'); "); connectContext.executeSql("alter materialized view fk_mv_1 set " + - "( 'foreign_key_constraints'='site_access_time_slice(site_id)" + - " REFERENCES site_access_date_trunc(site_id)'); "); + "( 'foreign_key_constraints'='site_access_time_slice(site_id)" + + " REFERENCES site_access_date_trunc(site_id)'); "); while (true) { ModifyTablePropertyOperationLog modifyMvLog = - (ModifyTablePropertyOperationLog) UtFrameUtils.PseudoJournalReplayer. - replayNextJournal(OperationType.OP_ALTER_MATERIALIZED_VIEW_PROPERTIES); + (ModifyTablePropertyOperationLog) UtFrameUtils.PseudoJournalReplayer. + replayNextJournal(OperationType.OP_ALTER_MATERIALIZED_VIEW_PROPERTIES); Assert.assertNotNull(modifyMvLog); if (modifyMvLog.getProperties().containsKey("foreign_key_constraints")) { Assert.assertEquals("default_catalog.10001.10133(site_id) " + - "REFERENCES default_catalog.10001.10118(site_id)", - modifyMvLog.getProperties().get("foreign_key_constraints")); + "REFERENCES default_catalog.10001.10118(site_id)", + modifyMvLog.getProperties().get("foreign_key_constraints")); break; } } @@ -2760,24 +2767,24 @@ public void testAlterForeignKey() throws Exception { { // external table starRocksAssert.withMaterializedView("create materialized view if not exists `fk_mv_2` " + - "refresh manual " + - "as " + - "select t1.l_orderkey, t1.l_partkey, t2.o_totalprice " + - "from hive0.tpch.lineitem t1 join hive0.tpch.orders t2 " + - "on t1.l_orderkey = t2.o_orderkey"); + "refresh manual " + + "as " + + "select t1.l_orderkey, t1.l_partkey, t2.o_totalprice " + + "from hive0.tpch.lineitem t1 join hive0.tpch.orders t2 " + + "on t1.l_orderkey = t2.o_orderkey"); connectContext.executeSql("alter materialized view fk_mv_2 set " + - "( 'unique_constraints'='hive0.tpch.orders.o_orderkey'); "); + "( 'unique_constraints'='hive0.tpch.orders.o_orderkey'); "); connectContext.executeSql("alter materialized view fk_mv_2 set " + - "( 'foreign_key_constraints'='hive0.tpch.lineitem(l_orderkey) " + - "REFERENCES hive0.tpch.orders(o_orderkey)'); "); + "( 'foreign_key_constraints'='hive0.tpch.lineitem(l_orderkey) " + + "REFERENCES hive0.tpch.orders(o_orderkey)'); "); while (true) { ModifyTablePropertyOperationLog modifyMvLog = - (ModifyTablePropertyOperationLog) UtFrameUtils.PseudoJournalReplayer. - replayNextJournal(OperationType.OP_ALTER_MATERIALIZED_VIEW_PROPERTIES); + (ModifyTablePropertyOperationLog) UtFrameUtils.PseudoJournalReplayer. + replayNextJournal(OperationType.OP_ALTER_MATERIALIZED_VIEW_PROPERTIES); Assert.assertNotNull(modifyMvLog); if (modifyMvLog.getProperties().containsKey("foreign_key_constraints")) { Assert.assertEquals("hive0.tpch.lineitem:0(l_orderkey) REFERENCES hive0.tpch.orders:0(o_orderkey)", - modifyMvLog.getProperties().get("foreign_key_constraints")); + modifyMvLog.getProperties().get("foreign_key_constraints")); break; } } diff --git a/fe/fe-core/src/test/java/com/starrocks/alter/BatchRollupJobTest.java b/fe/fe-core/src/test/java/com/starrocks/alter/BatchRollupJobTest.java index 455a5a9cd23221..c05023fb5dda8d 100644 --- a/fe/fe-core/src/test/java/com/starrocks/alter/BatchRollupJobTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/alter/BatchRollupJobTest.java @@ -119,7 +119,7 @@ public void testBatchRollup() throws Exception { } Assert.assertEquals(OlapTableState.NORMAL, tbl.getState()); for (Partition partition : tbl.getPartitions()) { - Assert.assertEquals(4, partition.getMaterializedIndices(IndexExtState.VISIBLE).size()); + Assert.assertEquals(4, partition.getDefaultPhysicalPartition().getMaterializedIndices(IndexExtState.VISIBLE).size()); } } @@ -147,14 +147,14 @@ public void testCancelBatchRollup() throws Exception { // cancel rollup jobs stmtStr = "cancel alter table rollup from db1.tbl2 (" + Joiner.on(",").join(jobIds) + ")"; CancelAlterTableStmt cancelStmt = (CancelAlterTableStmt) UtFrameUtils.parseStmtWithNewParser(stmtStr, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().cancelAlter(cancelStmt); + GlobalStateMgr.getCurrentState().getAlterJobMgr().cancelAlter(cancelStmt, ""); for (AlterJobV2 alterJob : alterJobs.values()) { Assert.assertEquals(AlterJobV2.JobState.CANCELLED, alterJob.getJobState()); } Assert.assertEquals(OlapTableState.NORMAL, tbl.getState()); for (Partition partition : tbl.getPartitions()) { - Assert.assertEquals(1, partition.getMaterializedIndices(IndexExtState.VISIBLE).size()); + Assert.assertEquals(1, partition.getDefaultPhysicalPartition().getMaterializedIndices(IndexExtState.VISIBLE).size()); } } } diff --git a/fe/fe-core/src/test/java/com/starrocks/alter/LakeTableAlterMetaJobTest.java b/fe/fe-core/src/test/java/com/starrocks/alter/LakeTableAlterMetaJobTest.java index 04659352d91d46..f9a837f75009ac 100644 --- a/fe/fe-core/src/test/java/com/starrocks/alter/LakeTableAlterMetaJobTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/alter/LakeTableAlterMetaJobTest.java @@ -73,9 +73,9 @@ public static void beforeClass() throws Exception { public void setUp() throws Exception { String createDbStmtStr = "create database " + DB_NAME; CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); connectContext.setDatabase(DB_NAME); - db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(DB_NAME); + db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb(DB_NAME); table = createTable(connectContext, "CREATE TABLE t0(c0 INT) PRIMARY KEY(c0) DISTRIBUTED BY HASH(c0) BUCKETS 1 " + @@ -89,16 +89,16 @@ public void setUp() throws Exception { public void tearDown() throws DdlException, MetaNotFoundException { db.dropTable(table.getName()); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().dropDb(DB_NAME, true); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropDb(DB_NAME, true); } catch (MetaNotFoundException ignored) { } } private static LakeTable createTable(ConnectContext connectContext, String sql) throws Exception { CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createTable(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(createTableStmt.getDbName()); - return (LakeTable) GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createTable(createTableStmt); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb(createTableStmt.getDbName()); + return (LakeTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getTable(db.getFullName(), createTableStmt.getTableName()); } @@ -152,7 +152,7 @@ public void testDropTable01() { @Test public void testDropDb01() throws DdlException, MetaNotFoundException { - GlobalStateMgr.getCurrentState().getLocalMetastore().dropDb(db.getFullName(), true); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropDb(db.getFullName(), true); job.run(); Assert.assertEquals(AlterJobV2.JobState.CANCELLED, job.getJobState()); } @@ -172,7 +172,7 @@ public void testDropDb02() throws DdlException, MetaNotFoundException { job.runPendingJob(); Assert.assertEquals(AlterJobV2.JobState.RUNNING, job.getJobState()); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropDb(db.getFullName(), true); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropDb(db.getFullName(), true); job.run(); Assert.assertEquals(AlterJobV2.JobState.CANCELLED, job.getJobState()); } @@ -198,7 +198,7 @@ public void testDropDb03() throws DdlException, MetaNotFoundException { job.runRunningJob(); Assert.assertEquals(AlterJobV2.JobState.FINISHED_REWRITING, job.getJobState()); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropDb(db.getFullName(), true); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropDb(db.getFullName(), true); job.run(); Assert.assertEquals(AlterJobV2.JobState.CANCELLED, job.getJobState()); } @@ -220,8 +220,8 @@ public void testReplay() { for (long partitionId : partitionIndexMap.rowKeySet()) { Partition partition = table.getPartition(partitionId); long commitVersion = commitVersionMap.get(partitionId); - Assert.assertEquals(partition.getVisibleVersion(), commitVersion); - partition.updateVisibleVersion(commitVersion - 1); + Assert.assertEquals(partition.getDefaultPhysicalPartition().getVisibleVersion(), commitVersion); + partition.getDefaultPhysicalPartition().updateVisibleVersion(commitVersion - 1); } replayAlterMetaJob.replay(job); @@ -238,7 +238,7 @@ public void testReplay() { for (long partitionId : partitionIndexMap.rowKeySet()) { Partition partition = table.getPartition(partitionId); long commitVersion = commitVersionMap.get(partitionId); - Assert.assertEquals(partition.getVisibleVersion(), commitVersion); + Assert.assertEquals(partition.getDefaultPhysicalPartition().getVisibleVersion(), commitVersion); } } diff --git a/fe/fe-core/src/test/java/com/starrocks/alter/LakeTableAsyncFastSchemaChangeJobTest.java b/fe/fe-core/src/test/java/com/starrocks/alter/LakeTableAsyncFastSchemaChangeJobTest.java index 5d8b68899fd40a..08367e22040b73 100644 --- a/fe/fe-core/src/test/java/com/starrocks/alter/LakeTableAsyncFastSchemaChangeJobTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/alter/LakeTableAsyncFastSchemaChangeJobTest.java @@ -50,13 +50,13 @@ public static void setUp() throws Exception { connectContext = UtFrameUtils.createDefaultCtx(); String createDbStmtStr = "create database " + DB_NAME; CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); connectContext.setDatabase(DB_NAME); } private static LakeTable createTable(ConnectContext connectContext, String sql) throws Exception { CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createTable(createTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createTable(createTableStmt); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(createTableStmt.getDbName()); LakeTable table = (LakeTable) GlobalStateMgr.getCurrentState().getLocalMetastore() .getTable(db.getFullName(), createTableStmt.getTableName()); @@ -66,7 +66,7 @@ private static LakeTable createTable(ConnectContext connectContext, String sql) private static void alterTable(ConnectContext connectContext, String sql) throws Exception { AlterTableStmt stmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(connectContext, stmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(connectContext, stmt); } private LakeTableAsyncFastSchemaChangeJob getAlterJob(Table table) { @@ -175,8 +175,8 @@ public void testReplay() throws Exception { Assert.assertEquals(OlapTable.OlapTableState.NORMAL, table.getState()); Partition partition = table.getPartition("t3"); long baseIndexId = table.getBaseIndexId(); - long initVisibleVersion = partition.getVisibleVersion(); - long initNextVersion = partition.getNextVersion(); + long initVisibleVersion = partition.getDefaultPhysicalPartition().getVisibleVersion(); + long initNextVersion = partition.getDefaultPhysicalPartition().getNextVersion(); Assert.assertEquals(initVisibleVersion + 1, initNextVersion); LakeTableAsyncFastSchemaChangeJob replaySourceJob = new LakeTableAsyncFastSchemaChangeJob(job); @@ -186,10 +186,11 @@ public void testReplay() throws Exception { replaySourceJob.setJobState(AlterJobV2.JobState.FINISHED_REWRITING); replaySourceJob.getCommitVersionMap().put(partition.getId(), initNextVersion); - replaySourceJob.addDirtyPartitionIndex(partition.getId(), baseIndexId, partition.getIndex(baseIndexId)); + replaySourceJob.addDirtyPartitionIndex(partition.getId(), baseIndexId, + partition.getDefaultPhysicalPartition().getIndex(baseIndexId)); job.replay(replaySourceJob); - Assert.assertEquals(initNextVersion + 1, partition.getNextVersion()); - Assert.assertEquals(initVisibleVersion, partition.getVisibleVersion()); + Assert.assertEquals(initNextVersion + 1, partition.getDefaultPhysicalPartition().getNextVersion()); + Assert.assertEquals(initVisibleVersion, partition.getDefaultPhysicalPartition().getVisibleVersion()); replaySourceJob.setJobState(AlterJobV2.JobState.FINISHED); replaySourceJob.setFinishedTimeMs(System.currentTimeMillis()); @@ -197,8 +198,9 @@ public void testReplay() throws Exception { job.replay(replaySourceJob); Assert.assertEquals(AlterJobV2.JobState.FINISHED, job.getJobState()); Assert.assertEquals(replaySourceJob.getFinishedTimeMs(), job.getFinishedTimeMs()); - Assert.assertEquals(initVisibleVersion + 1, partition.getVisibleVersion()); - Assert.assertEquals(partition.getVisibleVersion() + 1, partition.getNextVersion()); + Assert.assertEquals(initVisibleVersion + 1, partition.getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(partition.getDefaultPhysicalPartition().getVisibleVersion() + 1, + partition.getDefaultPhysicalPartition().getNextVersion()); Assert.assertEquals(OlapTable.OlapTableState.NORMAL, table.getState()); Assert.assertEquals(2, table.getBaseSchema().size()); Assert.assertEquals(0, table.getBaseSchema().get(0).getUniqueId()); diff --git a/fe/fe-core/src/test/java/com/starrocks/alter/LakeTableSchemaChangeJobTest.java b/fe/fe-core/src/test/java/com/starrocks/alter/LakeTableSchemaChangeJobTest.java index 2881ccc8e25ae5..2e87a5c20e56f8 100644 --- a/fe/fe-core/src/test/java/com/starrocks/alter/LakeTableSchemaChangeJobTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/alter/LakeTableSchemaChangeJobTest.java @@ -75,15 +75,15 @@ public static void setUp() throws Exception { private static LakeTable createTable(ConnectContext connectContext, String sql) throws Exception { CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createTable(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(createTableStmt.getDbName()); - return (LakeTable) GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createTable(createTableStmt); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb(createTableStmt.getDbName()); + return (LakeTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getTable(db.getFullName(), createTableStmt.getTableName()); } private static void alterTable(ConnectContext connectContext, String sql) throws Exception { AlterTableStmt stmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(connectContext, stmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(connectContext, stmt); } private LakeTableSchemaChangeJob getAlterJob(Table table) { @@ -100,9 +100,9 @@ private LakeTableSchemaChangeJob getAlterJob(Table table) { public void before() throws Exception { String createDbStmtStr = "create database " + DB_NAME; CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); connectContext.setDatabase(DB_NAME); - db = GlobalStateMgr.getServingState().getLocalMetastore().getDb(DB_NAME); + db = GlobalStateMgr.getServingState().getStarRocksMetadata().getDb(DB_NAME); table = createTable(connectContext, "CREATE TABLE t0(c0 INT) duplicate key(c0) distributed by hash(c0) buckets " + NUM_BUCKETS); Config.enable_fast_schema_evolution_in_share_data_mode = false; @@ -113,7 +113,7 @@ public void before() throws Exception { @After public void after() throws Exception { - GlobalStateMgr.getCurrentState().getLocalMetastore().dropDb(DB_NAME, true); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropDb(DB_NAME, true); } @Test @@ -241,7 +241,8 @@ public void testCreateTabletSuccess() throws AlterCancelException { Assert.assertEquals(OlapTable.OlapTableState.NORMAL, table.getState()); Partition partition = table.getPartitions().stream().findFirst().get(); - Assert.assertEquals(0, partition.getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW).size()); + Assert.assertEquals(0, partition.getDefaultPhysicalPartition(). + getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW).size()); } @Test @@ -265,7 +266,8 @@ public boolean isPreviousLoadFinished(long dbId, long tableId, long txnId) { Assert.assertEquals(OlapTable.OlapTableState.NORMAL, table.getState()); Partition partition = table.getPartitions().stream().findFirst().get(); - Assert.assertEquals(0, partition.getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW).size()); + Assert.assertEquals(0, partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW).size()); } @Test @@ -292,7 +294,8 @@ public boolean isPreviousLoadFinished(long dbId, long tableId, long txnId) throw Assert.assertEquals(OlapTable.OlapTableState.NORMAL, table.getState()); Partition partition = table.getPartitions().stream().findFirst().get(); - Assert.assertEquals(0, partition.getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW).size()); + Assert.assertEquals(0, partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW).size()); } @Test @@ -323,7 +326,8 @@ public void testTableNotExistWhileWaitingTxn() throws AlterCancelException { Assert.assertEquals(OlapTable.OlapTableState.NORMAL, table.getState()); Partition partition = table.getPartitions().stream().findFirst().get(); - Assert.assertEquals(0, partition.getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW).size()); + Assert.assertEquals(0, partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW).size()); } @Test @@ -356,7 +360,8 @@ public void testTableDroppedBeforeRewriting() throws AlterCancelException { Assert.assertEquals(OlapTable.OlapTableState.NORMAL, table.getState()); Partition partition = table.getPartitions().stream().findFirst().get(); - Assert.assertEquals(0, partition.getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW).size()); + Assert.assertEquals(0, partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW).size()); } @Test @@ -387,7 +392,8 @@ public void sendAgentTask(AgentBatchTask batchTask) { Assert.assertEquals(OlapTable.OlapTableState.NORMAL, table.getState()); Partition partition = table.getPartitions().stream().findFirst().get(); - Assert.assertEquals(0, partition.getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW).size()); + Assert.assertEquals(0, partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW).size()); } @Test @@ -412,9 +418,9 @@ public void sendAgentTask(AgentBatchTask batchTask) { Assert.assertEquals(1, partitions.size()); Partition partition = partitions.stream().findFirst().orElse(null); Assert.assertNotNull(partition); - Assert.assertEquals(3, partition.getNextVersion()); + Assert.assertEquals(3, partition.getDefaultPhysicalPartition().getNextVersion()); List shadowIndexes = - partition.getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW); + partition.getDefaultPhysicalPartition().getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW); Assert.assertEquals(1, shadowIndexes.size()); // Does not support cancel job in FINISHED_REWRITING state. @@ -460,16 +466,16 @@ public void sendAgentTask(AgentBatchTask batchTask) { Partition partition = partitions.stream().findFirst().orElse(null); Assert.assertNotNull(partition); - Assert.assertEquals(1, partition.getVisibleVersion()); - Assert.assertEquals(2, partition.getNextVersion()); + Assert.assertEquals(1, partition.getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(2, partition.getDefaultPhysicalPartition().getNextVersion()); // Disable send publish version - partition.setNextVersion(3); + partition.getDefaultPhysicalPartition().setNextVersion(3); schemaChangeJob.runRunningJob(); Assert.assertEquals(AlterJobV2.JobState.FINISHED_REWRITING, schemaChangeJob.getJobState()); List shadowIndexes = - partition.getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW); + partition.getDefaultPhysicalPartition().getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW); Assert.assertEquals(1, shadowIndexes.size()); // The partition's visible version has not catch up with the commit version of this schema change job now. @@ -477,7 +483,7 @@ public void sendAgentTask(AgentBatchTask batchTask) { Assert.assertEquals(AlterJobV2.JobState.FINISHED_REWRITING, schemaChangeJob.getJobState()); // Reset partition's next version - partition.setVisibleVersion(2, System.currentTimeMillis()); + partition.getDefaultPhysicalPartition().setVisibleVersion(2, System.currentTimeMillis()); // Drop table db.dropTable(table.getName()); @@ -514,14 +520,14 @@ public void publishVersion(@NotNull List tablets, TxnInfoPB txnInfo, lon Assert.assertEquals("c1", table.getBaseSchema().get(1).getName()); Assert.assertSame(partition, table.getPartitions().stream().findFirst().get()); - Assert.assertEquals(3, partition.getVisibleVersion()); - Assert.assertEquals(4, partition.getNextVersion()); + Assert.assertEquals(3, partition.getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(4, partition.getDefaultPhysicalPartition().getNextVersion()); - shadowIndexes = partition.getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW); + shadowIndexes = partition.getDefaultPhysicalPartition().getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW); Assert.assertEquals(0, shadowIndexes.size()); List normalIndexes = - partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE); + partition.getDefaultPhysicalPartition().getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE); Assert.assertEquals(1, normalIndexes.size()); MaterializedIndex normalIndex = normalIndexes.get(0); diff --git a/fe/fe-core/src/test/java/com/starrocks/alter/MaterializedViewHandlerTest.java b/fe/fe-core/src/test/java/com/starrocks/alter/MaterializedViewHandlerTest.java index cee3fca9d59dbf..a4f8ddc86ea429 100644 --- a/fe/fe-core/src/test/java/com/starrocks/alter/MaterializedViewHandlerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/alter/MaterializedViewHandlerTest.java @@ -149,7 +149,7 @@ public void testRollupReplica(@Injectable CreateMaterializedViewStmt createMater result = baseIndexId; olapTable.getPhysicalPartitions(); result = Lists.newArrayList(partition); - partition.getIndex(baseIndexId); + partition.getDefaultPhysicalPartition().getIndex(baseIndexId); result = materializedIndex; materializedIndex.getState(); result = MaterializedIndex.IndexState.SHADOW; @@ -337,7 +337,7 @@ public void testCheckDropMaterializedView(@Injectable OlapTable olapTable, @Inje result = 1; olapTable.getPhysicalPartitions(); result = Lists.newArrayList(partition); - partition.getIndex(1L); + partition.getDefaultPhysicalPartition().getIndex(1L); result = materializedIndex; } }; diff --git a/fe/fe-core/src/test/java/com/starrocks/alter/OnlineOptimizeJobV2Test.java b/fe/fe-core/src/test/java/com/starrocks/alter/OnlineOptimizeJobV2Test.java index 6a3975c4be2d78..cf2e87c4804ba0 100644 --- a/fe/fe-core/src/test/java/com/starrocks/alter/OnlineOptimizeJobV2Test.java +++ b/fe/fe-core/src/test/java/com/starrocks/alter/OnlineOptimizeJobV2Test.java @@ -177,7 +177,7 @@ public void testSchemaChangeWhileTabletNotStable() throws Exception { Assert.assertEquals(1, alterJobsV2.size()); OnlineOptimizeJobV2 optimizeJob = (OnlineOptimizeJobV2) alterJobsV2.values().stream().findAny().get(); - MaterializedIndex baseIndex = testPartition.getBaseIndex(); + MaterializedIndex baseIndex = testPartition.getDefaultPhysicalPartition().getBaseIndex(); LocalTablet baseTablet = (LocalTablet) baseIndex.getTablets().get(0); List replicas = baseTablet.getImmutableReplicas(); Replica replica1 = replicas.get(0); diff --git a/fe/fe-core/src/test/java/com/starrocks/alter/OptimizeJobV2Test.java b/fe/fe-core/src/test/java/com/starrocks/alter/OptimizeJobV2Test.java index 15d8d060c7482f..e36b32ea3e73c2 100644 --- a/fe/fe-core/src/test/java/com/starrocks/alter/OptimizeJobV2Test.java +++ b/fe/fe-core/src/test/java/com/starrocks/alter/OptimizeJobV2Test.java @@ -149,7 +149,7 @@ public void testOptimizeTable() throws Exception { SchemaChangeHandler schemaChangeHandler = GlobalStateMgr.getCurrentState().getSchemaChangeHandler(); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(GlobalStateMgrTestUtil.testDb1); OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable7); + .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable7); schemaChangeHandler.process(alterTableStmt.getAlterClauseList(), db, olapTable); Map alterJobsV2 = schemaChangeHandler.getAlterJobsV2(); @@ -163,7 +163,7 @@ public void testOptimizeTableFinish() throws Exception { SchemaChangeHandler schemaChangeHandler = GlobalStateMgr.getCurrentState().getSchemaChangeHandler(); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(GlobalStateMgrTestUtil.testDb1); OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable7); + .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable7); Partition testPartition = olapTable.getPartition(GlobalStateMgrTestUtil.testTable7); schemaChangeHandler.process(alterTableStmt.getAlterClauseList(), db, olapTable); @@ -184,15 +184,15 @@ public void testOptimizeTableFinish() throws Exception { for (int i = 0; i < optimizeTasks.size(); ++i) { OptimizeTask optimizeTask = optimizeTasks.get(i); GlobalStateMgr.getCurrentState().getTaskManager().getTaskRunManager() - .getTaskRunScheduler().removeRunningTask(optimizeTask.getId()); + .getTaskRunScheduler().removeRunningTask(optimizeTask.getId()); GlobalStateMgr.getCurrentState().getTaskManager().getTaskRunManager() - .getTaskRunScheduler().removePendingTask(optimizeTask); + .getTaskRunScheduler().removePendingTask(optimizeTask); TaskRunStatus taskRunStatus = new TaskRunStatus(); taskRunStatus.setTaskName(optimizeTask.getName()); taskRunStatus.setState(Constants.TaskRunState.SUCCESS); taskRunStatus.setDbName(db.getFullName()); GlobalStateMgr.getCurrentState().getTaskManager() - .getTaskRunManager().getTaskRunHistory().addHistory(taskRunStatus); + .getTaskRunManager().getTaskRunHistory().addHistory(taskRunStatus); } optimizeJob.runRunningJob(); @@ -205,7 +205,7 @@ public void testOptimizeTableFailed() throws Exception { SchemaChangeHandler schemaChangeHandler = GlobalStateMgr.getCurrentState().getSchemaChangeHandler(); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(GlobalStateMgrTestUtil.testDb1); OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable7); + .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable7); schemaChangeHandler.process(alterTableStmt.getAlterClauseList(), db, olapTable); Map alterJobsV2 = schemaChangeHandler.getAlterJobsV2(); @@ -242,7 +242,7 @@ public void testOptimizeTableFailed() throws Exception { Assert.assertEquals(JobState.CANCELLED, optimizeJob.getJobState()); OptimizeJobV2 replayOptimizeJob = new OptimizeJobV2( - optimizeJob.getJobId(), db.getId(), olapTable.getId(), olapTable.getName(), 1000); + optimizeJob.getJobId(), db.getId(), olapTable.getId(), olapTable.getName(), 1000); replayOptimizeJob.replay(optimizeJob); } @@ -251,7 +251,7 @@ public void testSchemaChangeWhileTabletNotStable() throws Exception { SchemaChangeHandler schemaChangeHandler = GlobalStateMgr.getCurrentState().getSchemaChangeHandler(); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(GlobalStateMgrTestUtil.testDb1); OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable7); + .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable7); Partition testPartition = olapTable.getPartition(GlobalStateMgrTestUtil.testTable7); schemaChangeHandler.process(alterTableStmt.getAlterClauseList(), db, olapTable); @@ -259,7 +259,7 @@ public void testSchemaChangeWhileTabletNotStable() throws Exception { Assert.assertEquals(1, alterJobsV2.size()); OptimizeJobV2 optimizeJob = (OptimizeJobV2) alterJobsV2.values().stream().findAny().get(); - MaterializedIndex baseIndex = testPartition.getBaseIndex(); + MaterializedIndex baseIndex = testPartition.getDefaultPhysicalPartition().getBaseIndex(); LocalTablet baseTablet = (LocalTablet) baseIndex.getTablets().get(0); List replicas = baseTablet.getImmutableReplicas(); Replica replica1 = replicas.get(0); @@ -302,7 +302,7 @@ public void testOptimizeReplay() throws Exception { SchemaChangeHandler schemaChangeHandler = GlobalStateMgr.getCurrentState().getSchemaChangeHandler(); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(GlobalStateMgrTestUtil.testDb1); OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable7); + .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable7); schemaChangeHandler.process(alterTableStmt.getAlterClauseList(), db, olapTable); Map alterJobsV2 = schemaChangeHandler.getAlterJobsV2(); @@ -310,7 +310,7 @@ public void testOptimizeReplay() throws Exception { OptimizeJobV2 optimizeJob = (OptimizeJobV2) alterJobsV2.values().stream().findAny().get(); OptimizeJobV2 replayOptimizeJob = new OptimizeJobV2( - optimizeJob.getJobId(), db.getId(), olapTable.getId(), olapTable.getName(), 1000); + optimizeJob.getJobId(), db.getId(), olapTable.getId(), olapTable.getName(), 1000); replayOptimizeJob.replay(optimizeJob); Assert.assertEquals(JobState.PENDING, replayOptimizeJob.getJobState()); @@ -349,7 +349,7 @@ public void testOptimizeReplayPartialSuccess() throws Exception { SchemaChangeHandler schemaChangeHandler = GlobalStateMgr.getCurrentState().getSchemaChangeHandler(); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(GlobalStateMgrTestUtil.testDb1); OlapTable olapTable = - (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "testTable2"); + (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "testTable2"); String stmt = "alter table testTable2 distributed by hash(v1)"; AlterTableStmt alterStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(stmt, starRocksAssert.getCtx()); @@ -359,7 +359,7 @@ public void testOptimizeReplayPartialSuccess() throws Exception { OptimizeJobV2 optimizeJob = (OptimizeJobV2) alterJobsV2.values().stream().findAny().get(); OptimizeJobV2 replayOptimizeJob = new OptimizeJobV2( - optimizeJob.getJobId(), db.getId(), olapTable.getId(), olapTable.getName(), 1000); + optimizeJob.getJobId(), db.getId(), olapTable.getId(), olapTable.getName(), 1000); replayOptimizeJob.replay(optimizeJob); Assert.assertEquals(JobState.PENDING, replayOptimizeJob.getJobState()); @@ -394,7 +394,7 @@ public void testOptimizeFailedByVersion() throws Exception { SchemaChangeHandler schemaChangeHandler = GlobalStateMgr.getCurrentState().getSchemaChangeHandler(); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(GlobalStateMgrTestUtil.testDb1); OlapTable olapTable = - (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "testTable2"); + (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "testTable2"); String stmt = "alter table testTable2 distributed by hash(v1)"; AlterTableStmt alterStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(stmt, starRocksAssert.getCtx()); @@ -420,7 +420,8 @@ public void testOptimizeFailedByVersion() throws Exception { optimizeTasks.get(1).setOptimizeTaskState(Constants.TaskRunState.SUCCESS); for (Partition p : olapTable.getPartitions()) { - p.setVisibleVersion(p.getVisibleVersion() + 1, 0); + p.getDefaultPhysicalPartition().setVisibleVersion( + p.getDefaultPhysicalPartition().getVisibleVersion() + 1, 0); } try { diff --git a/fe/fe-core/src/test/java/com/starrocks/alter/RollupJobV2Test.java b/fe/fe-core/src/test/java/com/starrocks/alter/RollupJobV2Test.java index fc8e951b894bea..a0357ec971af5d 100644 --- a/fe/fe-core/src/test/java/com/starrocks/alter/RollupJobV2Test.java +++ b/fe/fe-core/src/test/java/com/starrocks/alter/RollupJobV2Test.java @@ -92,12 +92,12 @@ public class RollupJobV2Test extends DDLTestBase { public void setUp() throws Exception { super.setUp(); clause = new AddRollupClause(GlobalStateMgrTestUtil.testRollupIndex2, Lists.newArrayList("v1"), null, - GlobalStateMgrTestUtil.testTable1, null); + GlobalStateMgrTestUtil.testTable1, null); AlterTableClauseAnalyzer analyzer = new AlterTableClauseAnalyzer(null); analyzer.analyze(null, clause); clause2 = new AddRollupClause(GlobalStateMgrTestUtil.testRollupIndex3, Lists.newArrayList("v1", "v2"), null, - GlobalStateMgrTestUtil.testTable1, null); + GlobalStateMgrTestUtil.testTable1, null); analyzer.analyze(null, clause2); AgentTaskQueue.clearAllTasks(); @@ -116,14 +116,14 @@ public void testRunRollupJobConcurrentLimit() throws UserException { alterClauses.add(clause2); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(GlobalStateMgrTestUtil.testDb1); OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable1); + .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable1); materializedViewHandler.process(alterClauses, db, olapTable); Map alterJobsV2 = materializedViewHandler.getAlterJobsV2(); materializedViewHandler.runAfterCatalogReady(); assertEquals(Config.max_running_rollup_job_num_per_table, - materializedViewHandler.getTableRunningJobMap().get(olapTable.getId()).size()); + materializedViewHandler.getTableRunningJobMap().get(olapTable.getId()).size()); assertEquals(2, alterJobsV2.size()); assertEquals(OlapTableState.ROLLUP, olapTable.getState()); } @@ -135,7 +135,7 @@ public void testAddSchemaChange() throws UserException { alterClauses.add(clause); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(GlobalStateMgrTestUtil.testDb1); OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable1); + .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable1); materializedViewHandler.process(alterClauses, db, olapTable); Map alterJobsV2 = materializedViewHandler.getAlterJobsV2(); assertEquals(1, alterJobsV2.size()); @@ -152,7 +152,7 @@ public void testSchemaChange1() throws Exception { alterClauses.add(clause); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(GlobalStateMgrTestUtil.testDb1); OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable1); + .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable1); Partition testPartition = olapTable.getPartition(GlobalStateMgrTestUtil.testTable1); materializedViewHandler.process(alterClauses, db, olapTable); Map alterJobsV2 = materializedViewHandler.getAlterJobsV2(); @@ -163,9 +163,12 @@ public void testSchemaChange1() throws Exception { // runPendingJob rollupJob.runPendingJob(); assertEquals(AlterJobV2.JobState.WAITING_TXN, rollupJob.getJobState()); - assertEquals(2, testPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL).size()); - assertEquals(1, testPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); - assertEquals(1, testPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW).size()); + assertEquals(2, testPartition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL).size()); + assertEquals(1, testPartition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); + assertEquals(1, testPartition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW).size()); // runWaitingTxnJob rollupJob.runWaitingTxnJob(); @@ -196,7 +199,7 @@ public void testSchemaChangeWhileTabletNotStable() throws Exception { Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(GlobalStateMgrTestUtil.testDb1); OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable1); + .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable1); Partition testPartition = olapTable.getPartition(GlobalStateMgrTestUtil.testTable1); materializedViewHandler.process(alterClauses, db, olapTable); @@ -204,7 +207,7 @@ public void testSchemaChangeWhileTabletNotStable() throws Exception { assertEquals(1, alterJobsV2.size()); RollupJobV2 rollupJob = (RollupJobV2) alterJobsV2.values().stream().findAny().get(); - MaterializedIndex baseIndex = testPartition.getBaseIndex(); + MaterializedIndex baseIndex = testPartition.getDefaultPhysicalPartition().getBaseIndex(); assertEquals(MaterializedIndex.IndexState.NORMAL, baseIndex.getState()); assertEquals(Partition.PartitionState.NORMAL, testPartition.getState()); assertEquals(OlapTableState.ROLLUP, olapTable.getState()); @@ -226,9 +229,12 @@ public void testSchemaChangeWhileTabletNotStable() throws Exception { replica1.setState(Replica.ReplicaState.NORMAL); rollupJob.runPendingJob(); assertEquals(AlterJobV2.JobState.WAITING_TXN, rollupJob.getJobState()); - assertEquals(2, testPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL).size()); - assertEquals(1, testPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); - assertEquals(1, testPartition.getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW).size()); + assertEquals(2, testPartition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL).size()); + assertEquals(1, testPartition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE).size()); + assertEquals(1, testPartition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.SHADOW).size()); // runWaitingTxnJob rollupJob.runWaitingTxnJob(); @@ -249,7 +255,7 @@ public void testSchemaChangeWhileTabletNotStable() throws Exception { @Test public void testSerializeOfRollupJob() throws IOException, - AnalysisException { + AnalysisException { Config.enable_materialized_view = true; // prepare file String fileName = "./RollupJobV2Test"; @@ -261,13 +267,13 @@ public void testSerializeOfRollupJob() throws IOException, List columns = Lists.newArrayList(); String mvColumnName = MVUtils.MATERIALIZED_VIEW_NAME_PREFIX + "bitmap_union_" + "c1"; Column column = new Column(mvColumnName, Type.BITMAP, false, AggregateType.BITMAP_UNION, false, - new ColumnDef.DefaultValueDef(true, new StringLiteral("1")), ""); + new ColumnDef.DefaultValueDef(true, new StringLiteral("1")), ""); columns.add(column); RollupJobV2 rollupJobV2 = new RollupJobV2(1, 1, 1, "test", 1, 1, - 1, "test", "rollup", 0, columns, null, 1, 1, - KeysType.AGG_KEYS, keysCount, - new OriginStatement("create materialized view rollup as select bitmap_union(to_bitmap(c1)) from test", - 0), "", false); + 1, "test", "rollup", 0, columns, null, 1, 1, + KeysType.AGG_KEYS, keysCount, + new OriginStatement("create materialized view rollup as select bitmap_union(to_bitmap(c1)) from test", + 0), "", false); // write rollup job rollupJobV2.write(out); @@ -282,7 +288,7 @@ public void testSerializeOfRollupJob() throws IOException, assertEquals(1, resultColumns.size()); Column resultColumn1 = resultColumns.get(0); assertEquals(mvColumnName, - resultColumn1.getName()); + resultColumn1.getName()); Assert.assertTrue(resultColumn1.getDefineExpr() instanceof FunctionCallExpr); FunctionCallExpr resultFunctionCall = (FunctionCallExpr) resultColumn1.getDefineExpr(); assertEquals("to_bitmap", resultFunctionCall.getFnName().getFunction()); @@ -295,7 +301,7 @@ public void testReplayPendingRollupJob() throws Exception { alterClauses.add(clause); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(GlobalStateMgrTestUtil.testDb1); OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable1); + .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable1); materializedViewHandler.process(alterClauses, db, olapTable); Map alterJobsV2 = materializedViewHandler.getAlterJobsV2(); assertEquals(1, alterJobsV2.size()); @@ -313,7 +319,7 @@ public void testCancelPendingJobWithFlag() throws Exception { alterClauses.add(clause); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(GlobalStateMgrTestUtil.testDb1); OlapTable olapTable = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable1); + .getTable(db.getFullName(), GlobalStateMgrTestUtil.testTable1); Partition testPartition = olapTable.getPartition(GlobalStateMgrTestUtil.testTable1); materializedViewHandler.process(alterClauses, db, olapTable); Map alterJobsV2 = materializedViewHandler.getAlterJobsV2(); diff --git a/fe/fe-core/src/test/java/com/starrocks/alter/SchemaChangeJobV2Test.java b/fe/fe-core/src/test/java/com/starrocks/alter/SchemaChangeJobV2Test.java index 091b22899e3d7c..3e2b69a4972b15 100644 --- a/fe/fe-core/src/test/java/com/starrocks/alter/SchemaChangeJobV2Test.java +++ b/fe/fe-core/src/test/java/com/starrocks/alter/SchemaChangeJobV2Test.java @@ -144,7 +144,7 @@ public void testSchemaChange1() throws Exception { SchemaChangeJobV2 schemaChangeJob = (SchemaChangeJobV2) alterJobsV2.values().stream().findAny().get(); alterJobsV2.clear(); - MaterializedIndex baseIndex = testPartition.getBaseIndex(); + MaterializedIndex baseIndex = testPartition.getDefaultPhysicalPartition().getBaseIndex(); assertEquals(IndexState.NORMAL, baseIndex.getState()); assertEquals(PartitionState.NORMAL, testPartition.getState()); assertEquals(OlapTableState.SCHEMA_CHANGE, olapTable.getState()); @@ -160,9 +160,12 @@ public void testSchemaChange1() throws Exception { // runPendingJob schemaChangeJob.runPendingJob(); Assert.assertEquals(JobState.WAITING_TXN, schemaChangeJob.getJobState()); - Assert.assertEquals(2, testPartition.getMaterializedIndices(IndexExtState.ALL).size()); - Assert.assertEquals(1, testPartition.getMaterializedIndices(IndexExtState.VISIBLE).size()); - Assert.assertEquals(1, testPartition.getMaterializedIndices(IndexExtState.SHADOW).size()); + Assert.assertEquals(2, testPartition.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.ALL).size()); + Assert.assertEquals(1, testPartition.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.VISIBLE).size()); + Assert.assertEquals(1, testPartition.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.SHADOW).size()); // runWaitingTxnJob schemaChangeJob.runWaitingTxnJob(); @@ -199,7 +202,7 @@ public void testSchemaChangeWhileTabletNotStable() throws Exception { SchemaChangeJobV2 schemaChangeJob = (SchemaChangeJobV2) alterJobsV2.values().stream().findAny().get(); alterJobsV2.clear(); - MaterializedIndex baseIndex = testPartition.getBaseIndex(); + MaterializedIndex baseIndex = testPartition.getDefaultPhysicalPartition().getBaseIndex(); assertEquals(IndexState.NORMAL, baseIndex.getState()); assertEquals(PartitionState.NORMAL, testPartition.getState()); assertEquals(OlapTableState.SCHEMA_CHANGE, olapTable.getState()); @@ -221,9 +224,12 @@ public void testSchemaChangeWhileTabletNotStable() throws Exception { replica1.setState(Replica.ReplicaState.NORMAL); schemaChangeJob.runPendingJob(); Assert.assertEquals(JobState.WAITING_TXN, schemaChangeJob.getJobState()); - Assert.assertEquals(2, testPartition.getMaterializedIndices(IndexExtState.ALL).size()); - Assert.assertEquals(1, testPartition.getMaterializedIndices(IndexExtState.VISIBLE).size()); - Assert.assertEquals(1, testPartition.getMaterializedIndices(IndexExtState.SHADOW).size()); + Assert.assertEquals(2, testPartition.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.ALL).size()); + Assert.assertEquals(1, testPartition.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.VISIBLE).size()); + Assert.assertEquals(1, testPartition.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.SHADOW).size()); // runWaitingTxnJob schemaChangeJob.runWaitingTxnJob(); @@ -471,7 +477,7 @@ public void testCancelPendingJobWithFlag() throws Exception { SchemaChangeJobV2 schemaChangeJob = (SchemaChangeJobV2) alterJobsV2.values().stream().findAny().get(); alterJobsV2.clear(); - MaterializedIndex baseIndex = testPartition.getBaseIndex(); + MaterializedIndex baseIndex = testPartition.getDefaultPhysicalPartition().getBaseIndex(); assertEquals(IndexState.NORMAL, baseIndex.getState()); assertEquals(PartitionState.NORMAL, testPartition.getState()); assertEquals(OlapTableState.SCHEMA_CHANGE, olapTable.getState()); diff --git a/fe/fe-core/src/test/java/com/starrocks/analysis/AccessTestUtil.java b/fe/fe-core/src/test/java/com/starrocks/analysis/AccessTestUtil.java index 0c33c0f4306814..49862407084c9a 100644 --- a/fe/fe-core/src/test/java/com/starrocks/analysis/AccessTestUtil.java +++ b/fe/fe-core/src/test/java/com/starrocks/analysis/AccessTestUtil.java @@ -106,11 +106,11 @@ public static OlapTable mockTable(String name) { Partition partition = Deencapsulation.newInstance(Partition.class); new Expectations(partition) { { - partition.getBaseIndex(); + partition.getDefaultPhysicalPartition().getBaseIndex(); minTimes = 0; result = index; - partition.getIndex(30000L); + partition.getDefaultPhysicalPartition().getIndex(30000L); minTimes = 0; result = index; } @@ -237,11 +237,11 @@ public static Analyzer fetchTableAnalyzer() { Partition partition = Deencapsulation.newInstance(Partition.class); new Expectations(partition) { { - partition.getBaseIndex(); + partition.getDefaultPhysicalPartition().getBaseIndex(); minTimes = 0; result = index; - partition.getIndex(30000L); + partition.getDefaultPhysicalPartition().getIndex(30000L); minTimes = 0; result = index; } diff --git a/fe/fe-core/src/test/java/com/starrocks/analysis/AlterMaterializedViewTest.java b/fe/fe-core/src/test/java/com/starrocks/analysis/AlterMaterializedViewTest.java index 1f217dd6a8b633..8b5cf00b311303 100644 --- a/fe/fe-core/src/test/java/com/starrocks/analysis/AlterMaterializedViewTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/analysis/AlterMaterializedViewTest.java @@ -136,14 +136,14 @@ public void testAlterMVProperties() throws Exception { String alterMvSql = "alter materialized view mv1 set (\"session.query_timeout\" = \"10000\")"; AlterMaterializedViewStmt stmt = (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(alterMvSql, connectContext); - currentState.getLocalMetastore().alterMaterializedView(stmt); + currentState.getStarRocksMetadata().alterMaterializedView(stmt); } { String alterMvSql = "alter materialized view mv1 set (\"session.not_exists\" = \"10000\")"; AlterMaterializedViewStmt stmt = (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(alterMvSql, connectContext); Exception e = Assert.assertThrows(SemanticException.class, - () -> currentState.getLocalMetastore().alterMaterializedView(stmt)); + () -> currentState.getStarRocksMetadata().alterMaterializedView(stmt)); Assert.assertEquals("Getting analyzing error. Detail message: " + "Unknown system variable 'not_exists', the most similar variables are " + "{'init_connect', 'connector_max_split_size', 'tx_isolation'}.", e.getMessage()); @@ -153,7 +153,7 @@ public void testAlterMVProperties() throws Exception { String alterMvSql = "alter materialized view mv1 set (\"query_timeout\" = \"10000\")"; AlterMaterializedViewStmt stmt = (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(alterMvSql, connectContext); - Assert.assertThrows(SemanticException.class, () -> currentState.getLocalMetastore().alterMaterializedView(stmt)); + Assert.assertThrows(SemanticException.class, () -> currentState.getStarRocksMetadata().alterMaterializedView(stmt)); } } @@ -163,7 +163,7 @@ public void testAlterMVColocateGroup() throws Exception { String alterMvSql = "alter materialized view mv1 set (\"colocate_with\" = \"group1\")"; AlterMaterializedViewStmt stmt = (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(alterMvSql, connectContext); - Assert.assertThrows(SemanticException.class, () -> currentState.getLocalMetastore().alterMaterializedView(stmt)); + Assert.assertThrows(SemanticException.class, () -> currentState.getStarRocksMetadata().alterMaterializedView(stmt)); } @Test @@ -172,14 +172,14 @@ public void testAlterMVRewriteStalenessProperties() throws Exception { String alterMvSql = "alter materialized view mv1 set (\"mv_rewrite_staleness_second\" = \"60\")"; AlterMaterializedViewStmt stmt = (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(alterMvSql, connectContext); - currentState.getLocalMetastore().alterMaterializedView(stmt); + currentState.getStarRocksMetadata().alterMaterializedView(stmt); } { String alterMvSql = "alter materialized view mv1 set (\"mv_rewrite_staleness_second\" = \"abc\")"; AlterMaterializedViewStmt stmt = (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(alterMvSql, connectContext); - Assert.assertThrows(SemanticException.class, () -> currentState.getLocalMetastore().alterMaterializedView(stmt)); + Assert.assertThrows(SemanticException.class, () -> currentState.getStarRocksMetadata().alterMaterializedView(stmt)); } } diff --git a/fe/fe-core/src/test/java/com/starrocks/analysis/CreateMaterializedViewTest.java b/fe/fe-core/src/test/java/com/starrocks/analysis/CreateMaterializedViewTest.java index b990fdace4b53f..04509689f6f0e3 100644 --- a/fe/fe-core/src/test/java/com/starrocks/analysis/CreateMaterializedViewTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/analysis/CreateMaterializedViewTest.java @@ -619,7 +619,7 @@ public void handleDMLStmt(ExecPlan execPlan, DmlStmt stmt) throws Exception { "as select date_trunc('month',tb1.k1) s1, tb2.k2 s2 from tbl1 tb1 join tbl2 tb2 on tb1.k2 = tb2.k2;"; try { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); Table mv1 = GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(testDb.getFullName(), "mv1"); Assert.assertTrue(mv1 instanceof MaterializedView); // test partition @@ -686,7 +686,7 @@ public void handleDMLStmt(ExecPlan execPlan, DmlStmt stmt) throws Exception { "as select k1, tbl1.k2 from tbl1;"; try { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); Table mv1 = GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(testDb.getFullName(), "mv1"); Assert.assertTrue(mv1 instanceof MaterializedView); // test partition @@ -742,7 +742,7 @@ public void handleDMLStmt(ExecPlan execPlan, DmlStmt stmt) throws Exception { "as select k1, tbl1.k2 from tbl1;"; try { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); } catch (Exception e) { Assert.fail(e.getMessage()); } finally { @@ -1626,7 +1626,7 @@ public void testCreateMvFromMv() { "as select k1, k2 from tbl1;"; try { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql1, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); } catch (Exception e) { Assert.fail(e.getMessage()); } @@ -1640,7 +1640,7 @@ public void testCreateMvFromMv() { "as select k1, k2 from base_mv;"; try { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql2, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); } catch (Exception e) { Assert.fail(e.getMessage()); } @@ -1658,7 +1658,7 @@ public void testCreateMvFromMv2() throws Exception { "as select k1, k2 from tbl1;"; { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql1, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); } String sql2 = "create materialized view mv_from_base_mv2 " + @@ -1671,7 +1671,7 @@ public void testCreateMvFromMv2() throws Exception { "as select k1, k2 from base_mv2;"; try { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql2, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); } catch (Exception e) { Assert.fail(e.getMessage()); } @@ -1689,7 +1689,7 @@ public void testCreateMvFromInactiveMv() { "as select k1, k2 from tbl1;"; try { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql1, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); } catch (Exception e) { Assert.fail(e.getMessage()); } @@ -1707,7 +1707,7 @@ public void testCreateMvFromInactiveMv() { "as select k1, k2 from base_inactive_mv;"; try { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql2, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); } catch (Exception e) { Assert.assertEquals("Getting analyzing error at line 3, column 24. Detail message: " + "Create/Rebuild materialized view from inactive materialized view: base_inactive_mv.", @@ -1727,7 +1727,7 @@ public void testAsHasStar() throws Exception { "as select k1 ss, * from tbl1;"; try { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); MaterializedView mv = ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(testDb.getFullName(), "testAsHasStar")); mv.setInactiveAndReason(""); List mvColumns = mv.getFullSchema(); @@ -1760,7 +1760,7 @@ public void testAsHasStarWithSameColumns() throws Exception { "as select a.k1 ss, a.*, b.* from tbl1 as a join tbl1 as b on a.k1=b.k1;"; try { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); Assert.fail(); } catch (Exception e) { Assert.assertTrue(e.getMessage().contains("Duplicate column name 'k1'")); @@ -1781,7 +1781,7 @@ public void testMVWithSameColumns() throws Exception { "as select a.k1 ss, a.k2, b.k2 from tbl1 as a join tbl1 as b on a.k1=b.k1;"; try { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); Assert.fail(); } catch (Exception e) { Assert.assertTrue(e.getMessage().contains("Duplicate column name 'k2'")); @@ -1820,7 +1820,7 @@ public void testAsSelectItemAlias1() throws Exception { "as select date_trunc('month',tbl1.k1), k1, k2 from tbl1;"; try { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); MaterializedView mv = ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(testDb.getFullName(), "testAsSelectItemAlias1")); mv.setInactiveAndReason(""); List mvColumns = mv.getFullSchema(); @@ -1851,7 +1851,7 @@ public void testAsSelectItemAlias2() throws Exception { "select date_trunc('month',tbl1.k1), k1, k2 from tbl1;"; try { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); MaterializedView mv = ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(testDb.getFullName(), "testAsSelectItemAlias2")); mv.setInactiveAndReason(""); List mvColumns = mv.getFullSchema(); @@ -1878,7 +1878,7 @@ public void testAsSelectItemAlias3() { "as select date_trunc('month',tbl1.k1), k1, k2 from tbl1;"; try { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); } catch (Exception e) { Assert.assertTrue(e.getMessage().contains("Materialized view partition exp: " + "`tbl1`.`k1` must related to column")); @@ -1898,7 +1898,7 @@ public void testAsSelectItemAlias4() { "as select date_trunc('month',tbl1.k1), k1, k2 from tbl1;"; try { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); } catch (Exception e) { Assert.assertTrue(e.getMessage() .contains("No viable statement for input 'distributed by hash(date_trunc('.")); @@ -2001,7 +2001,7 @@ public boolean isEnableColocateMVIndex() throws Exception { "as select k1, k2 from colocateTable;"; try { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStmt) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStmt) statementBase); waitingRollupJobV2Finish(); ColocateTableIndex colocateTableIndex = currentState.getColocateTableIndex(); String fullGroupName = testDb.getId() + "_" + "colocate_group1"; @@ -2048,7 +2048,7 @@ public void testCreateColocateMvWithoutGroup() throws Exception { Assert.assertThrows(AnalysisException.class, () -> { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStmt) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStmt) statementBase); }); currentState.getColocateTableIndex().clear(); @@ -2097,10 +2097,10 @@ public boolean isEnableColocateMVIndex() throws Exception { "as select k1, k2 from colocateTable3;"; try { StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStmt) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStmt) statementBase); waitingRollupJobV2Finish(); statementBase = UtFrameUtils.parseStmtWithNewParser(sql2, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStmt) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStmt) statementBase); waitingRollupJobV2Finish(); ColocateTableIndex colocateTableIndex = currentState.getColocateTableIndex(); @@ -2251,7 +2251,7 @@ private void assertCreateFailWithException(String sql, String msg) { } try { - currentState.getLocalMetastore().createMaterializedView(stmt); + currentState.getStarRocksMetadata().createMaterializedView(stmt); Assert.fail(); } catch (Exception e) { Assert.assertTrue(e.getMessage().contains(msg)); @@ -2287,7 +2287,7 @@ public void testCreateMVWithSessionProperties1() { CreateMaterializedViewStatement stmt = (CreateMaterializedViewStatement) UtFrameUtils.parseStmtWithNewParser(sql, starRocksAssert.getCtx()); - currentState.getLocalMetastore().createMaterializedView(stmt); + currentState.getStarRocksMetadata().createMaterializedView(stmt); Table mv1 = GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(testDb.getFullName(), "mv_with_property1"); Assert.assertTrue(mv1 instanceof MaterializedView); } catch (Exception e) { @@ -2337,7 +2337,7 @@ public void testNoDuplicateKey() { CreateMaterializedViewStatement stmt = (CreateMaterializedViewStatement) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView(stmt); + currentState.getStarRocksMetadata().createMaterializedView(stmt); } catch (Exception e) { Assert.fail(); } @@ -2470,7 +2470,7 @@ public void testCreateMvWithColocateGroup() throws Exception { ") " + "as select tbl1.k1 ss, k2 from tbl1;"; StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); String fullGroupName = testDb.getId() + "_" + groupName; long tableId = currentState.getColocateTableIndex().getTableIdByGroup(fullGroupName); Assert.assertTrue(tableId > 0); @@ -2544,7 +2544,7 @@ public void testMvNameInvalid() { CreateMaterializedViewStatement stmt = (CreateMaterializedViewStatement) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView(stmt); + currentState.getStarRocksMetadata().createMaterializedView(stmt); } catch (Exception e) { Assert.fail(); } @@ -2564,7 +2564,7 @@ public void testMvName1() { CreateMaterializedViewStatement stmt = (CreateMaterializedViewStatement) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView(stmt); + currentState.getStarRocksMetadata().createMaterializedView(stmt); } catch (Exception e) { Assert.fail(); } @@ -2584,7 +2584,7 @@ public void testPartitionAndDistributionByColumnNameIgnoreCase() { CreateMaterializedViewStatement stmt = (CreateMaterializedViewStatement) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - currentState.getLocalMetastore().createMaterializedView(stmt); + currentState.getStarRocksMetadata().createMaterializedView(stmt); } catch (Exception e) { Assert.fail(e.getMessage()); } @@ -2954,7 +2954,7 @@ public void testCreateMVWithDifferentDB() { (CreateMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(sql, newStarRocksAssert.getCtx()); Assert.assertEquals(stmt.getDBName(), "test"); Assert.assertEquals(stmt.getMVName(), "test_mv_use_different_tbl"); - currentState.getLocalMetastore().createMaterializedView(stmt); + currentState.getStarRocksMetadata().createMaterializedView(stmt); waitingRollupJobV2Finish(); Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(testDb.getFullName(), "tbl5"); @@ -3008,7 +3008,7 @@ public void testCreateAsyncMVWithDifferentDB() { Assert.assertEquals(stmt.getTableName().getDb(), "test"); Assert.assertEquals(stmt.getTableName().getTbl(), "test_mv_use_different_tbl"); - currentState.getLocalMetastore().createMaterializedView(stmt); + currentState.getStarRocksMetadata().createMaterializedView(stmt); newStarRocksAssert.dropDatabase("test_mv_different_db"); Table mv1 = GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(testDb.getFullName(), "test_mv_use_different_tbl"); Assert.assertTrue(mv1 instanceof MaterializedView); @@ -3034,7 +3034,7 @@ public void testCreateAsyncMVWithDifferentDB2() { Assert.assertEquals(stmt.getTableName().getDb(), "test_mv_different_db"); Assert.assertEquals(stmt.getTableName().getTbl(), "test_mv_use_different_tbl"); - currentState.getLocalMetastore().createMaterializedView(stmt); + currentState.getStarRocksMetadata().createMaterializedView(stmt); Database differentDb = currentState.getLocalMetastore().getDb("test_mv_different_db"); Table mv1 = GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(differentDb.getFullName(), "test_mv_use_different_tbl"); @@ -3880,7 +3880,7 @@ MaterializedView getMaterializedViewChecked(String sql) { CreateMaterializedViewStatement createMaterializedViewStatement = (CreateMaterializedViewStatement) statementBase; - currentState.getLocalMetastore().createMaterializedView(createMaterializedViewStatement); + currentState.getStarRocksMetadata().createMaterializedView(createMaterializedViewStatement); ThreadUtil.sleepAtLeastIgnoreInterrupts(4000L); TableName mvName = createMaterializedViewStatement.getTableName(); @@ -3902,7 +3902,7 @@ List getMaterializedViewKeysChecked(String sql) { CreateMaterializedViewStatement createMaterializedViewStatement = (CreateMaterializedViewStatement) statementBase; - currentState.getLocalMetastore().createMaterializedView(createMaterializedViewStatement); + currentState.getStarRocksMetadata().createMaterializedView(createMaterializedViewStatement); ThreadUtil.sleepAtLeastIgnoreInterrupts(4000L); TableName mvTableName = createMaterializedViewStatement.getTableName(); @@ -4737,7 +4737,8 @@ public void testCreateMVWithLocationAndPersist() throws Exception { String result = starRocksAssert.showCreateTable("show create table mv_with_location"); System.out.println(result); Assert.assertTrue(result.contains("rack:*")); - for (Tablet tablet : materializedView.getPartitions().iterator().next().getBaseIndex().getTablets()) { + for (Tablet tablet : materializedView.getPartitions().iterator().next().getDefaultPhysicalPartition() + .getBaseIndex().getTablets()) { Assert.assertEquals(backend.getId(), (long) tablet.getBackendIds().iterator().next()); } @@ -4746,7 +4747,7 @@ public void testCreateMVWithLocationAndPersist() throws Exception { GlobalStateMgr.getCurrentState().getLocalMetastore().save(finalImage.getImageWriter()); // test replay - LocalMetastore localMetastoreFollower = new LocalMetastore(GlobalStateMgr.getCurrentState(), null, null); + LocalMetastore localMetastoreFollower = new LocalMetastore(GlobalStateMgr.getCurrentState()); localMetastoreFollower.load(new SRMetaBlockReaderV2(initialImage.getJsonReader())); CreateTableInfo info = (CreateTableInfo) UtFrameUtils.PseudoJournalReplayer.replayNextJournal(OperationType.OP_CREATE_TABLE_V2); @@ -4758,7 +4759,7 @@ public void testCreateMVWithLocationAndPersist() throws Exception { Assert.assertTrue(mv.getLocation().containsKey("rack")); // test restart - LocalMetastore localMetastoreLeader = new LocalMetastore(GlobalStateMgr.getCurrentState(), null, null); + LocalMetastore localMetastoreLeader = new LocalMetastore(GlobalStateMgr.getCurrentState()); localMetastoreLeader.load(new SRMetaBlockReaderV2(finalImage.getJsonReader())); mv = (MaterializedView) localMetastoreLeader.getDb("test") .getTable("mv_with_location"); diff --git a/fe/fe-core/src/test/java/com/starrocks/analysis/CreateSyncMaterializedViewTest.java b/fe/fe-core/src/test/java/com/starrocks/analysis/CreateSyncMaterializedViewTest.java index 280c74477758f7..b627eac444c0af 100644 --- a/fe/fe-core/src/test/java/com/starrocks/analysis/CreateSyncMaterializedViewTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/analysis/CreateSyncMaterializedViewTest.java @@ -221,7 +221,7 @@ public void testSelectFromSyncMV() throws Exception { String sql = "create materialized view sync_mv1 as select k1, sum(v1) from tbl1 group by k1;"; CreateMaterializedViewStmt createTableStmt = (CreateMaterializedViewStmt) UtFrameUtils. parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createTableStmt); waitingRollupJobV2Finish(); sql = "select * from sync_mv1 [_SYNC_MV_];"; @@ -241,7 +241,7 @@ public void testCreateSyncMV1() throws Exception { parseStmtWithNewParser(sql, connectContext); try { // aggregate_table_with_null already existed in the db - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createTableStmt); Assert.fail(); } catch (Exception e) { Assert.assertTrue(e.getMessage().contains("Table [aggregate_table_with_null] already exists in the db test")); @@ -254,7 +254,7 @@ public void testCreateSyncMV2() throws Exception { String sql = "create materialized view sync_mv1 as select k1, sum(v1) from tbl1 group by k1;"; CreateMaterializedViewStmt createTableStmt = (CreateMaterializedViewStmt) UtFrameUtils. parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createTableStmt); waitingRollupJobV2Finish(); OlapTable tbl1 = (OlapTable) (getTable("test", "tbl1")); @@ -266,7 +266,7 @@ public void testCreateSyncMV2() throws Exception { createTableStmt = (CreateMaterializedViewStmt) UtFrameUtils. parseStmtWithNewParser(sql, connectContext); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createTableStmt); Assert.fail(); } catch (Throwable e) { Assert.assertTrue(e.getMessage().contains("Materialized view[sync_mv1] already exists in " + @@ -281,7 +281,7 @@ public void testCreateSyncMV3() throws Exception { String sql = "create materialized view sync_mv1 as select k1, sum(v1) from tbl1 group by k1;"; CreateMaterializedViewStmt createTableStmt = (CreateMaterializedViewStmt) UtFrameUtils. parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createTableStmt); waitingRollupJobV2Finish(); OlapTable tbl1 = (OlapTable) (getTable("test", "tbl1")); @@ -292,7 +292,7 @@ public void testCreateSyncMV3() throws Exception { createTableStmt = (CreateMaterializedViewStmt) UtFrameUtils. parseStmtWithNewParser(sql, connectContext); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createTableStmt); Assert.fail(); } catch (Throwable e) { Assert.assertTrue(e.getMessage().contains("Materialized view[sync_mv1] already exists " + @@ -307,7 +307,7 @@ public void testCreateSyncMV_WithUpperColumn() throws Exception { String sql = "create materialized view UPPER_MV1 as select K1, sum(V1) from TBL1 group by K1;"; CreateMaterializedViewStmt createTableStmt = (CreateMaterializedViewStmt) UtFrameUtils. parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createTableStmt); waitingRollupJobV2Finish(); { @@ -343,7 +343,7 @@ public void testCreateSyncMV_WithLowerColumn() throws Exception { String sql = "create materialized view lower_mv1 as select k1, sum(v1) from tbl1 group by K1;"; CreateMaterializedViewStmt createTableStmt = (CreateMaterializedViewStmt) UtFrameUtils. parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createTableStmt); waitingRollupJobV2Finish(); { @@ -382,7 +382,7 @@ public void testCreateSynchronousMVOnLakeTable() throws Exception { // Change table type to cloud native table Deencapsulation.setField(table, "type", Table.TableType.CLOUD_NATIVE); DdlException e = Assert.assertThrows(DdlException.class, () -> { - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createTableStmt); }); Assert.assertTrue(e.getMessage().contains("Creating synchronous materialized view(rollup) is not supported in " + "shared data clusters.\nPlease use asynchronous materialized view instead.\n" + @@ -399,7 +399,7 @@ public void testCreateSynchronousMVOnAnotherMV() throws Exception { // Change table type to materialized view Deencapsulation.setField(table, "type", Table.TableType.MATERIALIZED_VIEW); DdlException e = Assert.assertThrows(DdlException.class, () -> { - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createTableStmt); }); Assert.assertTrue(e.getMessage().contains("Do not support create synchronous materialized view(rollup) on")); } diff --git a/fe/fe-core/src/test/java/com/starrocks/analysis/DropMaterializedViewStmtNewPlannerTest.java b/fe/fe-core/src/test/java/com/starrocks/analysis/DropMaterializedViewStmtNewPlannerTest.java index 40815958f22b35..57604d63752b5f 100644 --- a/fe/fe-core/src/test/java/com/starrocks/analysis/DropMaterializedViewStmtNewPlannerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/analysis/DropMaterializedViewStmtNewPlannerTest.java @@ -55,7 +55,7 @@ public static void beforeClass() throws Exception { String createDbStmtStr = "create database test;"; CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); connectContext.setDatabase("test"); } diff --git a/fe/fe-core/src/test/java/com/starrocks/analysis/RefreshMaterializedViewStatementTest.java b/fe/fe-core/src/test/java/com/starrocks/analysis/RefreshMaterializedViewStatementTest.java index 4751f3d69f0a8f..2e4580a9c3d34a 100644 --- a/fe/fe-core/src/test/java/com/starrocks/analysis/RefreshMaterializedViewStatementTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/analysis/RefreshMaterializedViewStatementTest.java @@ -106,7 +106,8 @@ public void testRefreshMaterializedView() throws Exception { refreshScheme.getAsyncRefreshContext().getBaseTableVisibleVersionMap().get(table.getId()); if (partitionInfoMap.containsKey("table_name_tmp_1")) { MaterializedView.BasePartitionInfo partitionInfo = partitionInfoMap.get("table_name_tmp_1"); - Assert.assertEquals(table.getPartition("table_name_tmp_1").getVisibleVersion(), partitionInfo.getVersion()); + Assert.assertEquals(table.getPartition("table_name_tmp_1").getDefaultPhysicalPartition() + .getVisibleVersion(), partitionInfo.getVersion()); } } } diff --git a/fe/fe-core/src/test/java/com/starrocks/analysis/RefreshMaterializedViewTest.java b/fe/fe-core/src/test/java/com/starrocks/analysis/RefreshMaterializedViewTest.java index a8e9a75593a5bc..277885f51b5097 100644 --- a/fe/fe-core/src/test/java/com/starrocks/analysis/RefreshMaterializedViewTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/analysis/RefreshMaterializedViewTest.java @@ -143,7 +143,7 @@ public void testRefreshExecution() throws Exception { OlapTable table = (OlapTable) getTable("test", "tbl_with_mv"); Partition p1 = table.getPartition("p1"); Partition p2 = table.getPartition("p2"); - if (p2.getVisibleVersion() == 3) { + if (p2.getDefaultPhysicalPartition().getVisibleVersion() == 3) { MvUpdateInfo mvUpdateInfo = getMvUpdateInfo(mv1); Assert.assertTrue(mvUpdateInfo.getMvToRefreshType() == MvUpdateInfo.MvToRefreshType.FULL); Assert.assertTrue(!mvUpdateInfo.isValidRewrite()); @@ -155,8 +155,8 @@ public void testRefreshExecution() throws Exception { } else { // publish version is async, so version update may be late // for debug - System.out.println("p1 visible version:" + p1.getVisibleVersion()); - System.out.println("p2 visible version:" + p2.getVisibleVersion()); + System.out.println("p1 visible version:" + p1.getDefaultPhysicalPartition().getVisibleVersion()); + System.out.println("p2 visible version:" + p2.getDefaultPhysicalPartition().getVisibleVersion()); System.out.println("mv1 refresh context" + mv1.getRefreshScheme().getAsyncRefreshContext()); System.out.println("mv2 refresh context" + mv2.getRefreshScheme().getAsyncRefreshContext()); } @@ -200,7 +200,7 @@ public void testMaxMVRewriteStaleness1() { "set (\"mv_rewrite_staleness_second\" = \"%s\")", MV_STALENESS); AlterMaterializedViewStmt stmt = (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(alterMvSql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterMaterializedView(stmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterMaterializedView(stmt); } // no refresh partitions if mv_rewrite_staleness is set. executeInsertSql(connectContext, "insert into tbl_staleness1 values(\"2022-02-20\", 1, 10)"); @@ -264,7 +264,8 @@ public void testMaxMVRewriteStaleness2() { Table tbl1 = getTable("test", "tbl_staleness2"); Optional maxPartitionRefreshTimestamp = - tbl1.getPartitions().stream().map(Partition::getVisibleVersionTime).max(Long::compareTo); + tbl1.getPartitions().stream().map( + p -> p.getDefaultPhysicalPartition().getVisibleVersionTime()).max(Long::compareTo); Assert.assertTrue(maxPartitionRefreshTimestamp.isPresent()); MaterializedView mv1 = getMv("test", "mv_with_mv_rewrite_staleness2"); @@ -282,7 +283,8 @@ public void testMaxMVRewriteStaleness2() { Table tbl1 = getTable("test", "tbl_staleness2"); Optional maxPartitionRefreshTimestamp = - tbl1.getPartitions().stream().map(Partition::getVisibleVersionTime).max(Long::compareTo); + tbl1.getPartitions().stream().map( + p -> p.getDefaultPhysicalPartition().getVisibleVersionTime()).max(Long::compareTo); Assert.assertTrue(maxPartitionRefreshTimestamp.isPresent()); MaterializedView mv1 = getMv("test", "mv_with_mv_rewrite_staleness2"); @@ -347,7 +349,8 @@ public void testMaxMVRewriteStaleness3() { { Table tbl1 = getTable("test", "tbl_staleness3"); Optional maxPartitionRefreshTimestamp = - tbl1.getPartitions().stream().map(Partition::getVisibleVersionTime).max(Long::compareTo); + tbl1.getPartitions().stream().map( + p -> p.getDefaultPhysicalPartition().getVisibleVersionTime()).max(Long::compareTo); Assert.assertTrue(maxPartitionRefreshTimestamp.isPresent()); MaterializedView mv1 = getMv("test", "mv_with_mv_rewrite_staleness21"); @@ -362,7 +365,8 @@ public void testMaxMVRewriteStaleness3() { { Table tbl1 = getTable("test", "mv_with_mv_rewrite_staleness21"); Optional maxPartitionRefreshTimestamp = - tbl1.getPartitions().stream().map(Partition::getVisibleVersionTime).max(Long::compareTo); + tbl1.getPartitions().stream().map( + p -> p.getDefaultPhysicalPartition().getVisibleVersionTime()).max(Long::compareTo); Assert.assertTrue(maxPartitionRefreshTimestamp.isPresent()); MaterializedView mv2 = getMv("test", "mv_with_mv_rewrite_staleness22"); @@ -380,7 +384,8 @@ public void testMaxMVRewriteStaleness3() { { Table tbl1 = getTable("test", "tbl_staleness3"); Optional maxPartitionRefreshTimestamp = - tbl1.getPartitions().stream().map(Partition::getVisibleVersionTime).max(Long::compareTo); + tbl1.getPartitions().stream().map( + p -> p.getDefaultPhysicalPartition().getVisibleVersionTime()).max(Long::compareTo); Assert.assertTrue(maxPartitionRefreshTimestamp.isPresent()); MaterializedView mv1 = getMv("test", "mv_with_mv_rewrite_staleness21"); @@ -401,7 +406,8 @@ public void testMaxMVRewriteStaleness3() { { Table tbl1 = getTable("test", "mv_with_mv_rewrite_staleness21"); Optional maxPartitionRefreshTimestamp = - tbl1.getPartitions().stream().map(Partition::getVisibleVersionTime).max(Long::compareTo); + tbl1.getPartitions().stream().map( + p -> p.getDefaultPhysicalPartition().getVisibleVersionTime()).max(Long::compareTo); Assert.assertTrue(maxPartitionRefreshTimestamp.isPresent()); MaterializedView mv2 = getMv("test", "mv_with_mv_rewrite_staleness22"); @@ -431,7 +437,7 @@ public void testMaxMVRewriteStaleness3() { AlterMaterializedViewStmt stmt = (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(alterMvSql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterMaterializedView(stmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterMaterializedView(stmt); } MaterializedView mv1 = getMv("test", "mv_with_mv_rewrite_staleness21"); @@ -465,9 +471,9 @@ public void handleDMLStmt(ExecPlan execPlan, DmlStmt stmt) throws Exception { .getTable(testDb.getFullName(), tableName.getTbl())); for (Partition partition : tbl.getPartitions()) { if (insertStmt.getTargetPartitionIds().contains(partition.getId())) { - long version = partition.getVisibleVersion() + 1; - partition.setVisibleVersion(version, System.currentTimeMillis()); - MaterializedIndex baseIndex = partition.getBaseIndex(); + long version = partition.getDefaultPhysicalPartition().getVisibleVersion() + 1; + partition.getDefaultPhysicalPartition().setVisibleVersion(version, System.currentTimeMillis()); + MaterializedIndex baseIndex = partition.getDefaultPhysicalPartition().getBaseIndex(); List tablets = baseIndex.getTablets(); for (Tablet tablet : tablets) { List replicas = ((LocalTablet) tablet).getImmutableReplicas(); @@ -569,9 +575,9 @@ public void handleDMLStmt(ExecPlan execPlan, DmlStmt stmt) throws Exception { .getTable(testDb.getFullName(), tableName.getTbl())); for (Partition partition : tbl.getPartitions()) { if (insertStmt.getTargetPartitionIds().contains(partition.getId())) { - long version = partition.getVisibleVersion() + 1; - partition.setVisibleVersion(version, System.currentTimeMillis()); - MaterializedIndex baseIndex = partition.getBaseIndex(); + long version = partition.getDefaultPhysicalPartition().getVisibleVersion() + 1; + partition.getDefaultPhysicalPartition().setVisibleVersion(version, System.currentTimeMillis()); + MaterializedIndex baseIndex = partition.getDefaultPhysicalPartition().getBaseIndex(); List tablets = baseIndex.getTablets(); for (Tablet tablet : tablets) { List replicas = ((LocalTablet) tablet).getImmutableReplicas(); diff --git a/fe/fe-core/src/test/java/com/starrocks/analysis/ShowColumnStmtTest.java b/fe/fe-core/src/test/java/com/starrocks/analysis/ShowColumnStmtTest.java index 69ae5a4d21b6a1..1da57d2ef806f6 100644 --- a/fe/fe-core/src/test/java/com/starrocks/analysis/ShowColumnStmtTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/analysis/ShowColumnStmtTest.java @@ -50,7 +50,7 @@ public static void tearDown() throws Exception { String dropSQL = "drop table test_default"; try { DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } catch (Exception ex) { } diff --git a/fe/fe-core/src/test/java/com/starrocks/analysis/ShowCreateMaterializedViewStmtTest.java b/fe/fe-core/src/test/java/com/starrocks/analysis/ShowCreateMaterializedViewStmtTest.java index 20f9e51633af96..f5298cf141298a 100644 --- a/fe/fe-core/src/test/java/com/starrocks/analysis/ShowCreateMaterializedViewStmtTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/analysis/ShowCreateMaterializedViewStmtTest.java @@ -96,7 +96,7 @@ public void testShowInternalCatalogConstraints() throws Exception { "as select tbl1.k1, tbl2.k2 from tbl1 join tbl2 on tbl1.k1 = tbl2.k1;"; StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(createMvSql, ctx); GlobalStateMgr currentState = GlobalStateMgr.getCurrentState(); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); Table table = currentState.getLocalMetastore().getDb("test").getTable("mv9"); List createTableStmt = Lists.newArrayList(); AstToStringBuilder.getDdlStmt(table, createTableStmt, null, null, false, true); @@ -124,7 +124,7 @@ public void testShowExternalCatalogConstraints() throws Exception { "as select t2.c1, t1.c2 from hive0.partitioned_db.t1 join hive0.partitioned_db2.t2 on t1.c2 = t2.c2;"; StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(createMvSql, ctx); GlobalStateMgr currentState = GlobalStateMgr.getCurrentState(); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); Table table = currentState.getLocalMetastore().getDb("test").getTable("mv10"); List createTableStmt = Lists.newArrayList(); AstToStringBuilder.getDdlStmt(table, createTableStmt, null, null, false, true); @@ -153,7 +153,7 @@ public void testShowExternalTableCreateMvSql() throws Exception { "as select l_orderkey,l_partkey,l_shipdate from hive0.tpch.lineitem;"; StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(createMvSql, ctx); GlobalStateMgr currentState = GlobalStateMgr.getCurrentState(); - currentState.getLocalMetastore().createMaterializedView((CreateMaterializedViewStatement) statementBase); + currentState.getStarRocksMetadata().createMaterializedView((CreateMaterializedViewStatement) statementBase); Table table = currentState.getLocalMetastore().getDb("test").getTable("mv8"); List createTableStmt = Lists.newArrayList(); AstToStringBuilder.getDdlStmt(table, createTableStmt, null, null, false, true); diff --git a/fe/fe-core/src/test/java/com/starrocks/analysis/ShowCreateViewStmtTest.java b/fe/fe-core/src/test/java/com/starrocks/analysis/ShowCreateViewStmtTest.java index dde731d6f8ac85..f877ccf80006a9 100644 --- a/fe/fe-core/src/test/java/com/starrocks/analysis/ShowCreateViewStmtTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/analysis/ShowCreateViewStmtTest.java @@ -140,7 +140,7 @@ public static void tearDown() throws Exception { String dropSQL = "drop table tbl1"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } catch (Exception ex) { } @@ -185,9 +185,9 @@ public void testCreateView() throws Exception { for (String[] testcase : testCases) { String dropViewSql = "drop view if exists " + testcase[0]; DropTableStmt dropViewStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropViewSql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropViewStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropViewStmt); CreateViewStmt createViewStmt = (CreateViewStmt) UtFrameUtils.parseStmtWithNewParser(testcase[1], ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().createView(createViewStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createView(createViewStmt); List
views = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(createViewStmt.getDbName()).getViews(); List res = Lists.newArrayList(); @@ -206,7 +206,7 @@ public void testShowCreateView() throws Exception { String createViewSql = "create view test_view (k1 COMMENT \"dt\", k2, v1) COMMENT \"view comment\" " + "as select * from tbl1"; CreateViewStmt createViewStmt = (CreateViewStmt) UtFrameUtils.parseStmtWithNewParser(createViewSql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().createView(createViewStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createView(createViewStmt); List
views = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(createViewStmt.getDbName()).getViews(); List res = Lists.newArrayList(); @@ -243,7 +243,7 @@ public void testViewOfThreeUnionAllWithConstNullOutput() throws Exception { "\tt0.c4 as d\n" + "from t0"; CreateViewStmt createViewStmt = (CreateViewStmt) UtFrameUtils.parseStmtWithNewParser(createViewSql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().createView(createViewStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createView(createViewStmt); String descViewSql = "describe v2"; @@ -271,7 +271,7 @@ public void testViewOfThreeUnionAllWithConstNullOutput() throws Exception { String dropViewSql = "drop view if exists v2"; DropTableStmt dropViewStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropViewSql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropViewStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropViewStmt); } @Test diff --git a/fe/fe-core/src/test/java/com/starrocks/backup/BackupHandlerTest.java b/fe/fe-core/src/test/java/com/starrocks/backup/BackupHandlerTest.java index 8806b92658e2f9..a71a7ed42f03ca 100644 --- a/fe/fe-core/src/test/java/com/starrocks/backup/BackupHandlerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/backup/BackupHandlerTest.java @@ -224,7 +224,8 @@ public Status getSnapshotInfoFile(String label, String backupTimestamp, List snapshotInfos = Maps.newHashMap(); for (Partition part : tbl.getPartitions()) { - for (MaterializedIndex idx : part.getMaterializedIndices(IndexExtState.VISIBLE)) { + for (MaterializedIndex idx : part.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.VISIBLE)) { for (Tablet tablet : idx.getTablets()) { List files = Lists.newArrayList(); SnapshotInfo sinfo = new SnapshotInfo(db.getId(), tbl.getId(), part.getId(), idx.getId(), diff --git a/fe/fe-core/src/test/java/com/starrocks/backup/CatalogMocker.java b/fe/fe-core/src/test/java/com/starrocks/backup/CatalogMocker.java index 997d26e21f9ef3..baa8bf45a546e6 100644 --- a/fe/fe-core/src/test/java/com/starrocks/backup/CatalogMocker.java +++ b/fe/fe-core/src/test/java/com/starrocks/backup/CatalogMocker.java @@ -54,7 +54,6 @@ import com.starrocks.catalog.PartitionInfo; import com.starrocks.catalog.PartitionKey; import com.starrocks.catalog.PhysicalPartition; -import com.starrocks.catalog.PhysicalPartitionImpl; import com.starrocks.catalog.PrimitiveType; import com.starrocks.catalog.RandomDistributionInfo; import com.starrocks.catalog.RangePartitionInfo; @@ -376,7 +375,7 @@ public static Database mockDb() throws AnalysisException { rollupTabletP1.addReplica(replica10); rollupTabletP1.addReplica(replica11); - partition1.createRollupIndex(rollupIndexP1); + partition1.getDefaultPhysicalPartition().createRollupIndex(rollupIndexP1); // rollup index p2 MaterializedIndex rollupIndexP2 = new MaterializedIndex(TEST_ROLLUP_ID, IndexState.NORMAL); @@ -392,7 +391,7 @@ public static Database mockDb() throws AnalysisException { rollupTabletP2.addReplica(replica13); rollupTabletP2.addReplica(replica14); - partition2.createRollupIndex(rollupIndexP2); + partition2.getDefaultPhysicalPartition().createRollupIndex(rollupIndexP2); olapTable2.setIndexMeta(TEST_ROLLUP_ID, TEST_ROLLUP_NAME, TEST_ROLLUP_SCHEMA, 0, ROLLUP_SCHEMA_HASH, (short) 1, TStorageType.COLUMN, KeysType.AGG_KEYS); @@ -475,7 +474,7 @@ public static Database mockDb() throws AnalysisException { partition1 = new Partition(TEST_PARTITION1_ID, TEST_PARTITION1_NAME, baseIndexP1, distributionInfo4); - PhysicalPartition physicalPartition2 = new PhysicalPartitionImpl( + PhysicalPartition physicalPartition2 = new PhysicalPartition( TEST_PARTITION2_ID, "", TEST_PARTITION1_ID, 0, baseIndexP2); partition1.addSubPartition(physicalPartition2); diff --git a/fe/fe-core/src/test/java/com/starrocks/backup/RestoreJobMaterializedViewTest.java b/fe/fe-core/src/test/java/com/starrocks/backup/RestoreJobMaterializedViewTest.java index b5dde67a513e82..861449de7b8bb9 100644 --- a/fe/fe-core/src/test/java/com/starrocks/backup/RestoreJobMaterializedViewTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/backup/RestoreJobMaterializedViewTest.java @@ -203,7 +203,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { //minTimes = 0; //result = systemInfoService; - globalStateMgr.getLocalMetastore().mayGetDb(anyLong); + globalStateMgr.getStarRocksMetadata().mayGetDb(anyLong); minTimes = 0; result = Optional.of(db); @@ -331,7 +331,8 @@ private BackupTableInfo mockBackupTableInfo(OlapTable olapTable) { partInfo.name = partition.getName(); tblInfo.partitions.put(partInfo.name, partInfo); - for (MaterializedIndex index : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { + for (MaterializedIndex index : partition.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.VISIBLE)) { BackupIndexInfo idxInfo = new BackupIndexInfo(); idxInfo.id = index.getId(); idxInfo.name = olapTable.getIndexNameById(index.getId()); diff --git a/fe/fe-core/src/test/java/com/starrocks/backup/RestoreJobPrimaryKeyTest.java b/fe/fe-core/src/test/java/com/starrocks/backup/RestoreJobPrimaryKeyTest.java index 947e2f8231bbef..822c18c5ddf10c 100644 --- a/fe/fe-core/src/test/java/com/starrocks/backup/RestoreJobPrimaryKeyTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/backup/RestoreJobPrimaryKeyTest.java @@ -238,7 +238,8 @@ boolean await(long timeout, TimeUnit unit) { partInfo.name = partition.getName(); tblInfo.partitions.put(partInfo.name, partInfo); - for (MaterializedIndex index : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { + for (MaterializedIndex index : partition.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.VISIBLE)) { BackupIndexInfo idxInfo = new BackupIndexInfo(); idxInfo.id = index.getId(); idxInfo.name = expectedRestoreTbl.getIndexNameById(index.getId()); diff --git a/fe/fe-core/src/test/java/com/starrocks/backup/RestoreJobTest.java b/fe/fe-core/src/test/java/com/starrocks/backup/RestoreJobTest.java index 81a6ec5a740597..3b17ad74cd5c25 100644 --- a/fe/fe-core/src/test/java/com/starrocks/backup/RestoreJobTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/backup/RestoreJobTest.java @@ -458,7 +458,8 @@ boolean await(long timeout, TimeUnit unit) { partInfo.name = partition.getName(); tblInfo.partitions.put(partInfo.name, partInfo); - for (MaterializedIndex index : partition.getMaterializedIndices(IndexExtState.VISIBLE)) { + for (MaterializedIndex index : partition.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.VISIBLE)) { BackupIndexInfo idxInfo = new BackupIndexInfo(); idxInfo.id = index.getId(); idxInfo.name = expectedRestoreTbl.getIndexNameById(index.getId()); diff --git a/fe/fe-core/src/test/java/com/starrocks/binlog/BinlogManagerTest.java b/fe/fe-core/src/test/java/com/starrocks/binlog/BinlogManagerTest.java index 17f09434c32943..c5dd6c71e942dd 100644 --- a/fe/fe-core/src/test/java/com/starrocks/binlog/BinlogManagerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/binlog/BinlogManagerTest.java @@ -44,7 +44,7 @@ public static void beforeClass() throws Exception { // create database String createDbStmtStr = "create database test;"; CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); String createTableStmtStr = "CREATE TABLE test.binlog_test(k1 int, v1 int) " + "duplicate key(k1) distributed by hash(k1) buckets 2 properties('replication_num' = '1', " + "'binlog_enable' = 'false', 'binlog_ttl_second' = '100', 'binlog_max_size' = '100');"; diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/AdminStmtTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/AdminStmtTest.java index 06db0098cd87cc..b709f17a11b314 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/AdminStmtTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/AdminStmtTest.java @@ -69,7 +69,7 @@ public static void beforeClass() throws Exception { // create database String createDbStmtStr = "create database test;"; CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); String sql = "CREATE TABLE test.tbl1 (\n" + " `id` int(11) NULL COMMENT \"\",\n" + @@ -93,7 +93,8 @@ public void testAdminSetReplicaStatus() throws Exception { // tablet id, backend id List> tabletToBackendList = Lists.newArrayList(); for (Partition partition : tbl.getPartitions()) { - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { + for (MaterializedIndex index : partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { for (Tablet tablet : index.getTablets()) { for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { tabletToBackendList.add(Pair.create(tablet.getId(), replica.getBackendId())); @@ -112,7 +113,7 @@ public void testAdminSetReplicaStatus() throws Exception { + backendId + "', 'status' = 'bad');"; AdminSetReplicaStatusStmt stmt = (AdminSetReplicaStatusStmt) UtFrameUtils.parseStmtWithNewParser(adminStmt, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().setReplicaStatus(stmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().setReplicaStatus(stmt); replica = GlobalStateMgr.getCurrentState().getTabletInvertedIndex().getReplica(tabletId, backendId); Assert.assertTrue(replica.isBad()); @@ -120,7 +121,7 @@ public void testAdminSetReplicaStatus() throws Exception { adminStmt = "admin set replica status properties ('tablet_id' = '" + tabletId + "', 'backend_id' = '" + backendId + "', 'status' = 'ok');"; stmt = (AdminSetReplicaStatusStmt) UtFrameUtils.parseStmtWithNewParser(adminStmt, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().setReplicaStatus(stmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().setReplicaStatus(stmt); replica = GlobalStateMgr.getCurrentState().getTabletInvertedIndex().getReplica(tabletId, backendId); Assert.assertFalse(replica.isBad()); } diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/BrokerMgrTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/BrokerMgrTest.java index 84cfb137eab6ca..8b471341d5759c 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/BrokerMgrTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/BrokerMgrTest.java @@ -19,7 +19,6 @@ import com.starrocks.common.Pair; import com.starrocks.persist.EditLog; import com.starrocks.persist.gson.GsonUtils; -import com.starrocks.qe.SessionVariable; import com.starrocks.server.GlobalStateMgr; import com.starrocks.utframe.UtFrameUtils; import mockit.Expectations; @@ -45,9 +44,6 @@ public void setUp() throws Exception { globalStateMgr.getEditLog(); minTimes = 0; result = editLog; - - editLog.logGlobalVariable((SessionVariable) any); - minTimes = 0; } }; } diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/CatalogRecycleBinLakeTableTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/CatalogRecycleBinLakeTableTest.java index 9820fdfb31bcf2..da05258c082194 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/CatalogRecycleBinLakeTableTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/CatalogRecycleBinLakeTableTest.java @@ -56,14 +56,14 @@ public static void beforeClass() { private static Table createTable(ConnectContext connectContext, String sql) throws Exception { CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createTable(createTableStmt); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(createTableStmt.getDbName()); - return GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), createTableStmt.getTableName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createTable(createTableStmt); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb(createTableStmt.getDbName()); + return GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTable(db.getFullName(), createTableStmt.getTableName()); } private static void dropTable(ConnectContext connectContext, String sql) throws Exception { DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } private static void alterTable(ConnectContext connectContext, String sql) throws Exception { @@ -73,12 +73,12 @@ private static void alterTable(ConnectContext connectContext, String sql) throws private static void recoverDatabase(ConnectContext connectContext, String sql) throws Exception { RecoverDbStmt stmt = (RecoverDbStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().recoverDatabase(stmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().recoverDatabase(stmt); } private static void recoverPartition(ConnectContext connectContext, String sql) throws Exception { RecoverPartitionStmt stmt = (RecoverPartitionStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().recoverPartition(stmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().recoverPartition(stmt); } private static Future buildDropTableResponse(int errCode, String msg) { @@ -97,7 +97,8 @@ private static void checkTableTablet(Table table, boolean expectExist) { private static void checkPartitionTablet(Partition partition, boolean expectExist) { TabletInvertedIndex tabletIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex(); - for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + for (MaterializedIndex index : partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { for (Tablet tablet : index.getTablets()) { TabletMeta meta = tabletIndex.getTabletMeta(tablet.getId()); if (expectExist) { @@ -120,8 +121,8 @@ public void testRecycleLakeTable(@Mocked LakeService lakeService) throws Excepti // create database String createDbStmtStr = "create database recycle_bin_test;"; CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("recycle_bin_test"); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("recycle_bin_test"); Table table1 = createTable(connectContext, "create table recycle_bin_test.t0" + "(key1 int," + @@ -215,7 +216,7 @@ public LakeService getLakeService(TNetworkAddress address) throws RpcException { // Recover table2 Assert.assertTrue(recycleBin.recoverTable(db, "t0")); - Assert.assertSame(table2, GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "t0")); + Assert.assertSame(table2, GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTable(db.getFullName(), "t0")); Assert.assertNull(recycleBin.getTable(db.getId(), table2.getId())); checkTableTablet(table2, true); @@ -234,8 +235,8 @@ public void testReplayRecycleLakeTable(@Mocked LakeService lakeService) throws E // create database String createDbStmtStr = String.format("create database %s;", dbName); CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbName); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb(dbName); Table table1 = createTable(connectContext, String.format("create table %s.t0" + "(key1 int," + @@ -259,8 +260,8 @@ public void testRecycleLakeDatabase(@Mocked LakeService lakeService) throws Exce // create database String createDbStmtStr = String.format("create database %s;", dbName); CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbName); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb(dbName); Table table1 = createTable(connectContext, String.format("create table %s.t1" + "(key1 int," + @@ -274,7 +275,7 @@ public void testRecycleLakeDatabase(@Mocked LakeService lakeService) throws Exce ") distributed by hash(key1) buckets 3 " + "properties('replication_num' = '1');", dbName)); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropDb(dbName, false); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropDb(dbName, false); Assert.assertNotNull(recycleBin.getTable(db.getId(), table1.getId())); Assert.assertNotNull(recycleBin.getTable(db.getId(), table2.getId())); @@ -285,7 +286,7 @@ public void testRecycleLakeDatabase(@Mocked LakeService lakeService) throws Exce Assert.assertNull(recycleBin.getTable(db.getId(), table2.getId())); // Drop the database again. - GlobalStateMgr.getCurrentState().getLocalMetastore().dropDb(dbName, false); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropDb(dbName, false); Assert.assertNotNull(recycleBin.getTable(db.getId(), table1.getId())); Assert.assertNotNull(recycleBin.getTable(db.getId(), table2.getId())); @@ -321,8 +322,8 @@ public void testRecycleLakePartition(@Mocked LakeService lakeService) throws Exc // create database String createDbStmtStr = String.format("create database %s;", dbName); CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbName); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb(dbName); Table table1 = createTable(connectContext, String.format( "CREATE TABLE %s.t1" + @@ -343,9 +344,12 @@ public void testRecycleLakePartition(@Mocked LakeService lakeService) throws Exc Partition p2 = table1.getPartition("p2"); Partition p3 = table1.getPartition("p3"); Assert.assertNotNull(p1); - Assert.assertFalse(LakeTableHelper.isSharedPartitionDirectory(p1, WarehouseManager.DEFAULT_WAREHOUSE_ID)); - Assert.assertFalse(LakeTableHelper.isSharedPartitionDirectory(p2, WarehouseManager.DEFAULT_WAREHOUSE_ID)); - Assert.assertFalse(LakeTableHelper.isSharedPartitionDirectory(p3, WarehouseManager.DEFAULT_WAREHOUSE_ID)); + Assert.assertFalse(LakeTableHelper.isSharedPartitionDirectory(p1.getDefaultPhysicalPartition(), + WarehouseManager.DEFAULT_WAREHOUSE_ID)); + Assert.assertFalse(LakeTableHelper.isSharedPartitionDirectory(p2.getDefaultPhysicalPartition(), + WarehouseManager.DEFAULT_WAREHOUSE_ID)); + Assert.assertFalse(LakeTableHelper.isSharedPartitionDirectory(p3.getDefaultPhysicalPartition(), + WarehouseManager.DEFAULT_WAREHOUSE_ID)); // Drop partition "p1" alterTable(connectContext, String.format("ALTER TABLE %s.t1 DROP PARTITION p1", dbName)); @@ -440,7 +444,8 @@ public LakeService getLakeService(TNetworkAddress address) throws RpcException { "PROPERTIES('replication_num' = '1');", dbName)); p1 = table2.getPartition("p1"); - Assert.assertFalse(LakeTableHelper.isSharedPartitionDirectory(p1, WarehouseManager.DEFAULT_WAREHOUSE_ID)); + Assert.assertFalse(LakeTableHelper.isSharedPartitionDirectory(p1.getDefaultPhysicalPartition(), + WarehouseManager.DEFAULT_WAREHOUSE_ID)); // Drop partition "p1" alterTable(connectContext, String.format("ALTER TABLE %s.t2 DROP PARTITION p1", dbName)); Assert.assertNull(table2.getPartition("p1")); @@ -472,8 +477,8 @@ public void testRecycleLakePartitionWithSharedDirectory(@Mocked LakeService lake // create database String createDbStmtStr = String.format("create database %s;", dbName); CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); - Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbName); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); + Database db = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb(dbName); Table table1 = createTable(connectContext, String.format( "CREATE TABLE %s.t1" + diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/CatalogRecycleBinTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/CatalogRecycleBinTest.java index e60f249f8492e0..f3f711f1728e5d 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/CatalogRecycleBinTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/CatalogRecycleBinTest.java @@ -44,9 +44,9 @@ public class CatalogRecycleBinTest { public void testGetDb() { CatalogRecycleBin bin = new CatalogRecycleBin(); Database database = new Database(1, "db"); - bin.recycleDatabase(database, Sets.newHashSet()); + bin.recycleDatabase(database, Sets.newHashSet(), false); Database database2 = new Database(2, "db"); - bin.recycleDatabase(database2, Sets.newHashSet()); + bin.recycleDatabase(database2, Sets.newHashSet(), false); Database recycledDb = bin.getDatabase(1); Assert.assertNull(recycledDb); @@ -117,7 +117,7 @@ public void testGetPhysicalPartition() throws Exception { PhysicalPartition recycledPart = bin.getPhysicalPartition(1L); Assert.assertNotNull(recycledPart); - recycledPart = bin.getPartition(2L); + recycledPart = bin.getPartition(2L).getDefaultPhysicalPartition(); Assert.assertEquals(2L, recycledPart.getId()); } @@ -312,7 +312,7 @@ public void testEnsureEraseLater() { Config.catalog_trash_expire_second = 600; // set expire in 10 minutes CatalogRecycleBin recycleBin = new CatalogRecycleBin(); Database db = new Database(111, "uno"); - recycleBin.recycleDatabase(db, new HashSet<>()); + recycleBin.recycleDatabase(db, new HashSet<>(), false); // no need to set enable erase later if there are a lot of time left long now = System.currentTimeMillis(); @@ -342,9 +342,9 @@ public void testRecycleDb(@Mocked GlobalStateMgr globalStateMgr, @Mocked EditLog // 1. recycle 2 dbs CatalogRecycleBin recycleBin = new CatalogRecycleBin(); - recycleBin.recycleDatabase(db1, new HashSet<>()); - recycleBin.recycleDatabase(db2SameName, new HashSet<>()); // will remove same name - recycleBin.recycleDatabase(db2, new HashSet<>()); + recycleBin.recycleDatabase(db1, new HashSet<>(), false); + recycleBin.recycleDatabase(db2SameName, new HashSet<>(), false); // will remove same name + recycleBin.recycleDatabase(db2, new HashSet<>(), false); Assert.assertEquals(recycleBin.getDatabase(db1.getId()), db1); Assert.assertEquals(recycleBin.getDatabase(db2.getId()), db2); @@ -367,7 +367,7 @@ public void testRecycleDb(@Mocked GlobalStateMgr globalStateMgr, @Mocked EditLog }; new Expectations() { { - globalStateMgr.getLocalMetastore().onEraseDatabase(anyLong); + globalStateMgr.getRecycleBin().onEraseDatabase(anyLong); minTimes = 0; globalStateMgr.getEditLog(); minTimes = 0; @@ -540,7 +540,7 @@ public void testRecyclePartition(@Mocked GlobalStateMgr globalStateMgr, @Mocked }; new Expectations() { { - globalStateMgr.getLocalMetastore().onErasePartition((Partition) any); + globalStateMgr.getStarRocksMetadata().onErasePartition((Partition) any); minTimes = 0; globalStateMgr.getEditLog(); diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/CatalogUtilsTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/CatalogUtilsTest.java index 9c56dff9497b10..dcc966e94ab3fb 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/CatalogUtilsTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/CatalogUtilsTest.java @@ -61,7 +61,7 @@ public void testCalAvgBucketNumOfRecentPartitions_CalculateByDataSize() { partitions.add(partition); when(olapTable.getPartitions()).thenReturn(partitions); when(olapTable.getRecentPartitions(anyInt())).thenReturn(partitions); - when(partition.getVisibleVersion()).thenReturn(2L); + when(partition.getDefaultPhysicalPartition().getVisibleVersion()).thenReturn(2L); when(partition.getDataSize()).thenReturn(2L * FeConstants.AUTO_DISTRIBUTION_UNIT); int bucketNum = CatalogUtils.calAvgBucketNumOfRecentPartitions(olapTable, 1, true); diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/ColocateTableIndexTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/ColocateTableIndexTest.java index 6015951e5abbb9..52b0c53ace6597 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/ColocateTableIndexTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/ColocateTableIndexTest.java @@ -81,7 +81,7 @@ public void testDropTable() throws Exception { // create db1 String createDbStmtStr = "create database db1;"; CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); // create table1_1->group1 String sql = "CREATE TABLE db1.table1_1 (k1 int, k2 int, k3 varchar(32))\n" + @@ -118,7 +118,7 @@ public void testDropTable() throws Exception { // create db2 createDbStmtStr = "create database db2;"; createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); // create table2_1 -> group2 sql = "CREATE TABLE db2.table2_1 (k1 int, k2 int, k3 varchar(32))\n" + "PRIMARY KEY(k1)\n" + @@ -140,7 +140,7 @@ public void testDropTable() throws Exception { // drop db1.table1_1 sql = "DROP TABLE db1.table1_1;"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); // group1 -> table1_1*, table1_2 // group2 -> table2_l infos = GlobalStateMgr.getCurrentState().getColocateTableIndex().getInfos(); @@ -153,7 +153,7 @@ public void testDropTable() throws Exception { // drop db1.table1_2 sql = "DROP TABLE db1.table1_2;"; dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); // group1 -> table1_1*, table1_2* // group2 -> table2_l infos = GlobalStateMgr.getCurrentState().getColocateTableIndex().getInfos(); @@ -171,7 +171,7 @@ public void testDropTable() throws Exception { // drop db2 sql = "DROP DATABASE db2;"; DropDbStmt dropDbStmt = (DropDbStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropDb(dropDbStmt.getDbName(), dropDbStmt.isForceDrop()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropDb(dropDbStmt.getDbName(), dropDbStmt.isForceDrop()); // group1 -> table1_1*, table1_2* // group2 -> table2_l* infos = GlobalStateMgr.getCurrentState().getColocateTableIndex().getInfos(); @@ -184,7 +184,7 @@ public void testDropTable() throws Exception { // create & drop db2 again createDbStmtStr = "create database db2;"; createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); // create table2_1 -> group2 sql = "CREATE TABLE db2.table2_3 (k1 int, k2 int, k3 varchar(32))\n" + "PRIMARY KEY(k1)\n" + @@ -196,7 +196,7 @@ public void testDropTable() throws Exception { Table table2To3 = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("db2").getTable("table2_3"); sql = "DROP DATABASE db2;"; dropDbStmt = (DropDbStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropDb(dropDbStmt.getDbName(), dropDbStmt.isForceDrop()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropDb(dropDbStmt.getDbName(), dropDbStmt.isForceDrop()); infos = GlobalStateMgr.getCurrentState().getColocateTableIndex().getInfos(); map = groupByName(infos); LOG.info("after create & drop db2: {}", infos); @@ -218,7 +218,7 @@ public void testCleanUp() throws Exception { // create goodDb CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser("create database goodDb;", connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); Database goodDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("goodDb"); // create goodtable String sql = "CREATE TABLE " + @@ -319,7 +319,7 @@ public void testSaveLoadJsonFormatImage() throws Exception { // create goodDb CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils .parseStmtWithNewParser("create database db_image;", connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("db_image"); // create goodtable String sql = "CREATE TABLE " + diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/ColocateTableTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/ColocateTableTest.java index fb6a49bf970bc0..b98c52875b1234 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/ColocateTableTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/ColocateTableTest.java @@ -88,7 +88,7 @@ public void createDb() throws Exception { public void dropDb() throws Exception { String dropDbStmtStr = "drop database " + dbName; DropDbStmt dropDbStmt = (DropDbStmt) UtFrameUtils.parseStmtWithNewParser(dropDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropDb(dropDbStmt.getDbName(), dropDbStmt.isForceDrop()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropDb(dropDbStmt.getDbName(), dropDbStmt.isForceDrop()); } private static void createTable(String sql) throws Exception { diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/CreateTableLikeTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/CreateTableLikeTest.java index 91ea85f51b2308..2a4e5f63a0debd 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/CreateTableLikeTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/CreateTableLikeTest.java @@ -63,10 +63,10 @@ public static void beforeClass() throws Exception { // create database String createDbStmtStr = "create database test;"; CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); String createDbStmtStr2 = "create database test2;"; CreateDbStmt createDbStmt2 = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr2, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt2.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt2.getFullDbName()); } private static void createTable(String sql) throws Exception { @@ -77,7 +77,7 @@ private static void createTable(String sql) throws Exception { private static void createTableLike(String sql) throws Exception { CreateTableLikeStmt createTableLikeStmt = (CreateTableLikeStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createTable(createTableLikeStmt.getCreateTableStmt()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createTable(createTableLikeStmt.getCreateTableStmt()); } private static void checkTableEqual(Table newTable, Table existedTable) { diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/CreateTableTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/CreateTableTest.java index 2c0f538051ddfc..629d1309ad6a4f 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/CreateTableTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/CreateTableTest.java @@ -90,7 +90,7 @@ public static void beforeClass() throws Exception { // create database String createDbStmtStr = "create database test;"; CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); UtFrameUtils.setUpForPersistTest(); } @@ -102,7 +102,7 @@ private static void createTable(String sql) throws Exception { private static void alterTableWithNewParser(String sql) throws Exception { AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(connectContext, alterTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(connectContext, alterTableStmt); } @Test(expected = DdlException.class) @@ -1054,7 +1054,7 @@ public void testCreateTableLocationPropPersist() throws Exception { GlobalStateMgr.getCurrentState().getLocalMetastore().save(finalImage.getImageWriter()); // ** test replay from edit log - LocalMetastore localMetastoreFollower = new LocalMetastore(GlobalStateMgr.getCurrentState(), null, null); + LocalMetastore localMetastoreFollower = new LocalMetastore(GlobalStateMgr.getCurrentState()); localMetastoreFollower.load(initialImage.getMetaBlockReader()); CreateTableInfo info = (CreateTableInfo) UtFrameUtils.PseudoJournalReplayer.replayNextJournal(OperationType.OP_CREATE_TABLE_V2); @@ -1066,7 +1066,7 @@ public void testCreateTableLocationPropPersist() throws Exception { Assert.assertTrue(olapTable.getLocation().containsKey("rack")); // ** test load from image(simulate restart) - LocalMetastore localMetastoreLeader = new LocalMetastore(GlobalStateMgr.getCurrentState(), null, null); + LocalMetastore localMetastoreLeader = new LocalMetastore(GlobalStateMgr.getCurrentState()); localMetastoreLeader.load(finalImage.getMetaBlockReader()); olapTable = (OlapTable) localMetastoreLeader.getDb("test") .getTable("test_location_persist_t1"); diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/CreateTableWithAggStateTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/CreateTableWithAggStateTest.java index 911012f2a1f798..375789b00387f2 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/CreateTableWithAggStateTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/CreateTableWithAggStateTest.java @@ -43,7 +43,7 @@ public static void beforeClass() throws Exception { // create database String createDbStmtStr = "create database test;"; CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); starRocksAssert.useDatabase("test"); UtFrameUtils.setUpForPersistTest(); } diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/CreateTableWithLocationTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/CreateTableWithLocationTest.java index d1809b2a25fca5..7dbb2d34476879 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/CreateTableWithLocationTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/CreateTableWithLocationTest.java @@ -75,7 +75,7 @@ private void clearBackendLocationProp() throws SQLException { PseudoCluster cluster = PseudoCluster.getInstance(); for (PseudoBackend backend : cluster.getBackends()) { String stmtStr = "alter system modify backend '" + backend.getHostHeartbeatPort() + "' set ('" + - AlterSystemStmtAnalyzer.PROP_KEY_LOCATION + "' = '')"; + AlterSystemStmtAnalyzer.PROP_KEY_LOCATION + "' = '')"; System.out.println(stmtStr); cluster.runSql(null, stmtStr); } @@ -86,7 +86,7 @@ private void setBackendLocationProp(List locations) throws SQLException int i = 0; for (PseudoBackend backend : cluster.getBackends()) { String stmtStr = "alter system modify backend '" + backend.getHostHeartbeatPort() + "' set ('" + - AlterSystemStmtAnalyzer.PROP_KEY_LOCATION + "' = '" + locations.get(i++) + "')"; + AlterSystemStmtAnalyzer.PROP_KEY_LOCATION + "' = '" + locations.get(i++) + "')"; System.out.println(stmtStr); cluster.runSql(null, stmtStr); } @@ -97,21 +97,21 @@ public void testCreateTableAndBackendNoLocationProp() throws SQLException { clearBackendLocationProp(); PseudoCluster cluster = PseudoCluster.getInstance(); String sql = "CREATE TABLE test.`test_table_backend_no_loc` (\n" + - " k1 int,\n" + - " k2 VARCHAR NOT NULL\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 2\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"3\",\n" + - " \"in_memory\" = \"false\"\n" + - ");"; + " k1 int,\n" + + " k2 VARCHAR NOT NULL\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 2\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"3\",\n" + + " \"in_memory\" = \"false\"\n" + + ");"; cluster.runSql("test", sql); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), "test_table_backend_no_loc"); + .getTable(db.getFullName(), "test_table_backend_no_loc"); Assert.assertNull(table.getLocation()); } @@ -120,7 +120,7 @@ private Set getBackendIdsWithLocProp() { Set backendIds = Sets.newHashSet(); for (PseudoBackend pseudoBackend : cluster.getBackends()) { Backend backend = GlobalStateMgr.getCurrentState().getNodeMgr() - .getClusterInfo().getBackend(pseudoBackend.getId()); + .getClusterInfo().getBackend(pseudoBackend.getId()); if (!backend.getLocation().isEmpty()) { backendIds.add(backend.getId()); } @@ -133,7 +133,7 @@ private Set getBackendIdsWithLocProp(String locationKey) { Set backendIds = Sets.newHashSet(); for (PseudoBackend pseudoBackend : cluster.getBackends()) { Backend backend = GlobalStateMgr.getCurrentState().getNodeMgr() - .getClusterInfo().getBackend(pseudoBackend.getId()); + .getClusterInfo().getBackend(pseudoBackend.getId()); if (backend.getLocation().containsKey(locationKey)) { backendIds.add(backend.getId()); } @@ -146,9 +146,9 @@ private Set getBackendIdsWithLocProp(String locationKey, String locationVa Set backendIds = Sets.newHashSet(); for (PseudoBackend pseudoBackend : cluster.getBackends()) { Backend backend = GlobalStateMgr.getCurrentState().getNodeMgr() - .getClusterInfo().getBackend(pseudoBackend.getId()); + .getClusterInfo().getBackend(pseudoBackend.getId()); if (backend.getLocation().containsKey(locationKey) && - Objects.equals(backend.getLocation().get(locationKey), locationVal)) { + Objects.equals(backend.getLocation().get(locationKey), locationVal)) { backendIds.add(backend.getId()); } } @@ -160,7 +160,7 @@ private Set getBackendIdsWithoutLocProp() { Set backendIds = Sets.newHashSet(); for (PseudoBackend pseudoBackend : cluster.getBackends()) { Backend backend = GlobalStateMgr.getCurrentState().getNodeMgr() - .getClusterInfo().getBackend(pseudoBackend.getId()); + .getClusterInfo().getBackend(pseudoBackend.getId()); if (backend.getLocation().isEmpty()) { backendIds.add(backend.getId()); } @@ -172,29 +172,29 @@ private Set getBackendIdsWithoutLocProp() { public void testCreateTableNoLocPropBackendWithLocProp() throws SQLException { // last backend doesn't contain location property List locations = Lists.newArrayList("rack:r1", "rack:rack2", "rack:rack3", - "region:r1", "region:r2", "region:r3", ""); + "region:r1", "region:r2", "region:r3", ""); setBackendLocationProp(locations); PseudoCluster cluster = PseudoCluster.getInstance(); String sql = "CREATE TABLE test.`test_table_no_loc_backend_with_loc` (\n" + - " k1 int,\n" + - " k2 VARCHAR NOT NULL\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"3\",\n" + - " \"in_memory\" = \"false\"\n" + - ");"; + " k1 int,\n" + + " k2 VARCHAR NOT NULL\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"3\",\n" + + " \"in_memory\" = \"false\"\n" + + ");"; cluster.runSql("test", sql); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), "test_table_no_loc_backend_with_loc"); + .getTable(db.getFullName(), "test_table_no_loc_backend_with_loc"); Assert.assertTrue(table.getLocation().keySet().contains("*")); List partitions = new ArrayList<>(table.getAllPartitions()); - List tablets = partitions.get(0).getBaseIndex().getTablets(); + List tablets = partitions.get(0).getDefaultPhysicalPartition().getBaseIndex().getTablets(); Set backendIdsWithLocProp = getBackendIdsWithLocProp(); Set backendIdsWithoutLocProp = getBackendIdsWithoutLocProp(); System.out.println(backendIdsWithLocProp); @@ -216,31 +216,31 @@ public void testCreateTableNoLocPropBackendWithLocProp() throws SQLException { public void testCreateTableWithExplicitLocPropBackendWithLocProp() throws SQLException { // last backend doesn't contain location property List locations = Lists.newArrayList("rack:r1", "rack:rack2", "rack:rack3", - "region:r1", "region:r2", "region:r3", ""); + "region:r1", "region:r2", "region:r3", ""); setBackendLocationProp(locations); PseudoCluster cluster = PseudoCluster.getInstance(); // Test: rack:* String sql = "CREATE TABLE test.`test_table_explicit_loc_backend_with_loc1` (\n" + - " k1 int,\n" + - " k2 VARCHAR NOT NULL\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"3\",\n" + - " \"" + PropertyAnalyzer.PROPERTIES_LABELS_LOCATION + "\" = \"rack:*\",\n" + - " \"in_memory\" = \"false\"\n" + - ");"; + " k1 int,\n" + + " k2 VARCHAR NOT NULL\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"3\",\n" + + " \"" + PropertyAnalyzer.PROPERTIES_LABELS_LOCATION + "\" = \"rack:*\",\n" + + " \"in_memory\" = \"false\"\n" + + ");"; System.out.println(sql); cluster.runSql("test", sql); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), "test_table_explicit_loc_backend_with_loc1"); + .getTable(db.getFullName(), "test_table_explicit_loc_backend_with_loc1"); Assert.assertTrue(table.getLocation().keySet().contains("rack")); List partitions = new ArrayList<>(table.getAllPartitions()); - List tablets = partitions.get(0).getBaseIndex().getTablets(); + List tablets = partitions.get(0).getDefaultPhysicalPartition().getBaseIndex().getTablets(); Set backendIdsWithLocProp = getBackendIdsWithLocProp("rack"); Set backendIdsWithoutLocProp = getBackendIdsWithoutLocProp(); // test_table_explicit_loc_backend_with_loc1's replicas should only distribute on backends with rack location @@ -261,26 +261,26 @@ public void testCreateTableWithExplicitLocPropBackendWithLocProp() throws SQLExc // Test: rack:*, region:* sql = "CREATE TABLE test.`test_table_explicit_loc_backend_with_loc2` (\n" + - " k1 int,\n" + - " k2 VARCHAR NOT NULL\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"3\",\n" + - " \"" + PropertyAnalyzer.PROPERTIES_LABELS_LOCATION + "\" = \"rack:*, region:*\",\n" + - " \"in_memory\" = \"false\"\n" + - ");"; + " k1 int,\n" + + " k2 VARCHAR NOT NULL\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"3\",\n" + + " \"" + PropertyAnalyzer.PROPERTIES_LABELS_LOCATION + "\" = \"rack:*, region:*\",\n" + + " \"in_memory\" = \"false\"\n" + + ");"; cluster.runSql("test", sql); boolean hasReplicaOnRegionBackend = false; boolean hasReplicaOnRackBackend = false; table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), "test_table_explicit_loc_backend_with_loc2"); + .getTable(db.getFullName(), "test_table_explicit_loc_backend_with_loc2"); Assert.assertTrue(table.getLocation().keySet().contains("rack")); Assert.assertTrue(table.getLocation().keySet().contains("region")); partitions = new ArrayList<>(table.getAllPartitions()); - tablets = partitions.get(0).getBaseIndex().getTablets(); + tablets = partitions.get(0).getDefaultPhysicalPartition().getBaseIndex().getTablets(); for (Tablet tablet : tablets) { List replicas = tablet.getAllReplicas(); Set replicaBackendIds = Sets.newHashSet(); @@ -302,24 +302,24 @@ public void testCreateTableWithExplicitLocPropBackendWithLocProp() throws SQLExc // Test: rack:r1, rack:rack2, rack:rack3 sql = "CREATE TABLE test.`test_table_explicit_loc_backend_with_loc3` (\n" + - " k1 int,\n" + - " k2 VARCHAR NOT NULL\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"3\",\n" + - " \"" + PropertyAnalyzer.PROPERTIES_LABELS_LOCATION + "\" = \"rack:r1, rack:rack2, rack:rack3\",\n" + - " \"in_memory\" = \"false\"\n" + - ");"; + " k1 int,\n" + + " k2 VARCHAR NOT NULL\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"3\",\n" + + " \"" + PropertyAnalyzer.PROPERTIES_LABELS_LOCATION + "\" = \"rack:r1, rack:rack2, rack:rack3\",\n" + + " \"in_memory\" = \"false\"\n" + + ");"; cluster.runSql("test", sql); table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), "test_table_explicit_loc_backend_with_loc3"); + .getTable(db.getFullName(), "test_table_explicit_loc_backend_with_loc3"); Assert.assertTrue(table.getLocation().keySet().contains("rack")); Assert.assertEquals(1, table.getLocation().keySet().size()); partitions = new ArrayList<>(table.getAllPartitions()); - tablets = partitions.get(0).getBaseIndex().getTablets(); + tablets = partitions.get(0).getDefaultPhysicalPartition().getBaseIndex().getTablets(); backendIdsWithLocProp = getBackendIdsWithLocProp("rack", "r1"); backendIdsWithLocProp.addAll(getBackendIdsWithLocProp("rack", "rack2")); backendIdsWithLocProp.addAll(getBackendIdsWithLocProp("rack", "rack3")); @@ -337,24 +337,24 @@ public void testCreateTableWithExplicitLocPropBackendWithLocProp() throws SQLExc // Test: rack:r1, region:* sql = "CREATE TABLE test.`test_table_explicit_loc_backend_with_loc4` (\n" + - " k1 int,\n" + - " k2 VARCHAR NOT NULL\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"3\",\n" + - " \"" + PropertyAnalyzer.PROPERTIES_LABELS_LOCATION + "\" = \"rack:r1, region:*\",\n" + - " \"in_memory\" = \"false\"\n" + - ");"; + " k1 int,\n" + + " k2 VARCHAR NOT NULL\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"3\",\n" + + " \"" + PropertyAnalyzer.PROPERTIES_LABELS_LOCATION + "\" = \"rack:r1, region:*\",\n" + + " \"in_memory\" = \"false\"\n" + + ");"; cluster.runSql("test", sql); table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), "test_table_explicit_loc_backend_with_loc4"); + .getTable(db.getFullName(), "test_table_explicit_loc_backend_with_loc4"); Assert.assertTrue(table.getLocation().keySet().contains("rack")); Assert.assertTrue(table.getLocation().keySet().contains("region")); partitions = new ArrayList<>(table.getAllPartitions()); - tablets = partitions.get(0).getBaseIndex().getTablets(); + tablets = partitions.get(0).getDefaultPhysicalPartition().getBaseIndex().getTablets(); Set allReplicasBackendIds = Sets.newHashSet(); for (Tablet tablet : tablets) { List replicas = tablet.getAllReplicas(); @@ -377,23 +377,23 @@ public void testCreateTableWithExplicitLocPropBackendWithLocProp() throws SQLExc // Test: rack:r1, rack:rack2, not enough hosts, fallback to ignore location prop sql = "CREATE TABLE test.`test_table_explicit_loc_backend_with_loc5` (\n" + - " k1 int,\n" + - " k2 VARCHAR NOT NULL\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"3\",\n" + - " \"" + PropertyAnalyzer.PROPERTIES_LABELS_LOCATION + "\" = \"rack:r1, rack:rack2\",\n" + - " \"in_memory\" = \"false\"\n" + - ");"; + " k1 int,\n" + + " k2 VARCHAR NOT NULL\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"3\",\n" + + " \"" + PropertyAnalyzer.PROPERTIES_LABELS_LOCATION + "\" = \"rack:r1, rack:rack2\",\n" + + " \"in_memory\" = \"false\"\n" + + ");"; cluster.runSql("test", sql); table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), "test_table_explicit_loc_backend_with_loc5"); + .getTable(db.getFullName(), "test_table_explicit_loc_backend_with_loc5"); Assert.assertTrue(table.getLocation().keySet().contains("rack")); partitions = new ArrayList<>(table.getAllPartitions()); - tablets = partitions.get(0).getBaseIndex().getTablets(); + tablets = partitions.get(0).getDefaultPhysicalPartition().getBaseIndex().getTablets(); allReplicasBackendIds = Sets.newHashSet(); for (Tablet tablet : tablets) { List replicas = tablet.getAllReplicas(); @@ -409,27 +409,27 @@ public void testCreateTableWithExplicitLocPropBackendWithLocProp() throws SQLExc backendIdsWithLocProp = getBackendIdsWithLocProp(); backendIdsWithLocProp.addAll(getBackendIdsWithoutLocProp()); Assert.assertEquals(backendIdsWithLocProp.size(), - Sets.intersection(allReplicasBackendIds, backendIdsWithLocProp).size()); + Sets.intersection(allReplicasBackendIds, backendIdsWithLocProp).size()); // Test: rack:r1, rack:rack2, not enough hosts, fallback to ignore location prop sql = "CREATE TABLE test.`test_table_explicit_loc_backend_with_loc6` (\n" + - " k1 int,\n" + - " k2 VARCHAR NOT NULL\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"10\",\n" + - " \"" + PropertyAnalyzer.PROPERTIES_LABELS_LOCATION + "\" = \"rack:r1, rack:rack2\",\n" + - " \"in_memory\" = \"false\"\n" + - ");"; + " k1 int,\n" + + " k2 VARCHAR NOT NULL\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"10\",\n" + + " \"" + PropertyAnalyzer.PROPERTIES_LABELS_LOCATION + "\" = \"rack:r1, rack:rack2\",\n" + + " \"in_memory\" = \"false\"\n" + + ");"; try { cluster.runSql("test", sql); } catch (Exception e) { Assert.assertTrue(e.getMessage().contains( - "Table replication num should be less than of equal to the number of available BE nodes")); + "Table replication num should be less than of equal to the number of available BE nodes")); } // clean up @@ -440,31 +440,31 @@ public void testCreateTableWithExplicitLocPropBackendWithLocProp() throws SQLExc public void testBestEffortMatchLocationProp() throws SQLException { // last backend has the same location prop with the first one List locations = Lists.newArrayList("rack:r1", "rack:rack2", "rack:rack3", - "region:r1", "region:r2", "region:r3", "rack:r1"); + "region:r1", "region:r2", "region:r3", "rack:r1"); setBackendLocationProp(locations); PseudoCluster cluster = PseudoCluster.getInstance(); // Test: rack:r1, rack:rack2, 3 hosts, but only 2 racks String sql = "CREATE TABLE test.`test_table_explicit_loc_backend_with_loc_best_effort` (\n" + - " k1 int,\n" + - " k2 VARCHAR NOT NULL\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"3\",\n" + - " \"" + PropertyAnalyzer.PROPERTIES_LABELS_LOCATION + "\" = \"rack:r1, rack:rack2\",\n" + - " \"in_memory\" = \"false\"\n" + - ");"; + " k1 int,\n" + + " k2 VARCHAR NOT NULL\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 5\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"3\",\n" + + " \"" + PropertyAnalyzer.PROPERTIES_LABELS_LOCATION + "\" = \"rack:r1, rack:rack2\",\n" + + " \"in_memory\" = \"false\"\n" + + ");"; System.out.println(sql); cluster.runSql("test", sql); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), "test_table_explicit_loc_backend_with_loc_best_effort"); + .getTable(db.getFullName(), "test_table_explicit_loc_backend_with_loc_best_effort"); Assert.assertTrue(table.getLocation().keySet().contains("rack")); List partitions = new ArrayList<>(table.getAllPartitions()); - List tablets = partitions.get(0).getBaseIndex().getTablets(); + List tablets = partitions.get(0).getDefaultPhysicalPartition().getBaseIndex().getTablets(); Set backendIdsWithLocProp = getBackendIdsWithLocProp("rack", "r1"); backendIdsWithLocProp.addAll(getBackendIdsWithLocProp("rack", "rack2")); for (Tablet tablet : tablets) { diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/CreateViewTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/CreateViewTest.java index b7fc4dba32db0b..e4a4f6ef22ae5d 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/CreateViewTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/CreateViewTest.java @@ -43,7 +43,7 @@ public static void beforeClass() throws Exception { // create database String createDbStmtStr = "create database test;"; CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); } @Test diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/DropPartitionTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/DropPartitionTest.java index ce766913c26b90..036790a34404e5 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/DropPartitionTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/DropPartitionTest.java @@ -71,7 +71,7 @@ public static void beforeClass() throws Exception { private static void createDb(String sql) throws Exception { CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); } @Before public void createTable() throws Exception { @@ -100,7 +100,7 @@ public void testNormalDropPartition() throws Exception { Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "tbl1"); Partition partition = table.getPartition("p20210201"); - long tabletId = partition.getBaseIndex().getTablets().get(0).getId(); + long tabletId = partition.getDefaultPhysicalPartition().getBaseIndex().getTablets().get(0).getId(); String dropPartitionSql = " alter table test.tbl1 drop partition p20210201;"; dropPartition(dropPartitionSql); List replicaList = @@ -111,7 +111,7 @@ public void testNormalDropPartition() throws Exception { String recoverPartitionSql = "recover partition p20210201 from test.tbl1"; RecoverPartitionStmt recoverPartitionStmt = (RecoverPartitionStmt) UtFrameUtils.parseStmtWithNewParser(recoverPartitionSql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().recoverPartition(recoverPartitionStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().recoverPartition(recoverPartitionStmt); partition = table.getPartition("p20210201"); Assert.assertNotNull(partition); Assert.assertEquals("p20210201", partition.getName()); @@ -122,7 +122,7 @@ public void testForceDropPartition() throws Exception { Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "tbl1"); Partition partition = table.getPartition("p20210202"); - long tabletId = partition.getBaseIndex().getTablets().get(0).getId(); + long tabletId = partition.getDefaultPhysicalPartition().getBaseIndex().getTablets().get(0).getId(); String dropPartitionSql = " alter table test.tbl1 drop partition p20210202 force;"; dropPartition(dropPartitionSql); List replicaList; @@ -135,7 +135,7 @@ public void testForceDropPartition() throws Exception { (RecoverPartitionStmt) UtFrameUtils.parseStmtWithNewParser(recoverPartitionSql, connectContext); ExceptionChecker.expectThrowsWithMsg(DdlException.class, "No partition named 'p20210202' in recycle bin that belongs to table 'tbl1'", - () -> GlobalStateMgr.getCurrentState().getLocalMetastore().recoverPartition(recoverPartitionStmt)); + () -> GlobalStateMgr.getCurrentState().getStarRocksMetadata().recoverPartition(recoverPartitionStmt)); GlobalStateMgr.getCurrentState().getRecycleBin().erasePartition(System.currentTimeMillis()); replicaList = GlobalStateMgr.getCurrentState().getTabletInvertedIndex().getReplicasByTabletId(tabletId); @@ -147,7 +147,7 @@ public void testDropPartitionAndReserveTablets() throws Exception { Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "tbl1"); Partition partition = table.getPartition("p20210203"); - long tabletId = partition.getBaseIndex().getTablets().get(0).getId(); + long tabletId = partition.getDefaultPhysicalPartition().getBaseIndex().getTablets().get(0).getId(); table.dropPartitionAndReserveTablet("p20210203"); List replicaList = GlobalStateMgr.getCurrentState().getTabletInvertedIndex().getReplicasByTabletId(tabletId); @@ -196,7 +196,7 @@ private void checkNormalDropPartitions(String dbName, String tableName, String d for (String partitionName : partitionNames) { Table table = getTable(dbName, tableName); Partition partition = table.getPartition(partitionName); - long tabletId = partition.getBaseIndex().getTablets().get(0).getId(); + long tabletId = partition.getDefaultPhysicalPartition().getBaseIndex().getTablets().get(0).getId(); tabletIds.add(tabletId); } dropPartition(String.format(dropPartitionSql, dbName, tableName)); @@ -218,7 +218,7 @@ private void checkForceDropPartitions(String dropPartitionSql) throws Exception for (String partitionName : partitionNames) { Table table = getTable(dbName, tableName); Partition partition = table.getPartition(partitionName); - long tabletId = partition.getBaseIndex().getTablets().get(0).getId(); + long tabletId = partition.getDefaultPhysicalPartition().getBaseIndex().getTablets().get(0).getId(); tabletIds.add(tabletId); } dropPartition(dropPartitionSql); @@ -237,7 +237,7 @@ private void checkForceDropPartitions(String dropPartitionSql) throws Exception partitionName), connectContext); ExceptionChecker.expectThrowsWithMsg(DdlException.class, String.format("No partition named '%s' in recycle bin that belongs to table '%s'", partitionName, "tbl1"), - () -> GlobalStateMgr.getCurrentState().getLocalMetastore().recoverPartition(recoverPartitionStmt)); + () -> GlobalStateMgr.getCurrentState().getStarRocksMetadata().recoverPartition(recoverPartitionStmt)); } //该方法会立马删除partition GlobalStateMgr.getCurrentState().getRecycleBin().erasePartition(System.currentTimeMillis()); @@ -268,7 +268,7 @@ private Partition recoverPartition(String db, String table, String partitionName String recoverPartitionSql = String.format("recover partition %s from %s.%s", partitionName, db, table); RecoverPartitionStmt recoverPartitionStmt = (RecoverPartitionStmt) UtFrameUtils.parseStmtWithNewParser(recoverPartitionSql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().recoverPartition(recoverPartitionStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().recoverPartition(recoverPartitionStmt); return getTable(db, table).getPartition(partitionName); } diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/GlobalStateMgrTestUtil.java b/fe/fe-core/src/test/java/com/starrocks/catalog/GlobalStateMgrTestUtil.java index 889f155678620b..121a809f199e1a 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/GlobalStateMgrTestUtil.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/GlobalStateMgrTestUtil.java @@ -131,13 +131,16 @@ public static boolean compareState(GlobalStateMgr masterGlobalStateMgr, GlobalSt if (masterPartition.getId() != slavePartition.getId()) { return false; } - if (masterPartition.getVisibleVersion() != slavePartition.getVisibleVersion() - || masterPartition.getNextVersion() != slavePartition.getNextVersion()) { + if (masterPartition.getDefaultPhysicalPartition().getVisibleVersion() + != slavePartition.getDefaultPhysicalPartition().getVisibleVersion() + || masterPartition.getDefaultPhysicalPartition().getNextVersion() + != slavePartition.getDefaultPhysicalPartition().getNextVersion()) { return false; } - List allMaterializedIndices = masterPartition.getMaterializedIndices(IndexExtState.ALL); + List allMaterializedIndices = masterPartition.getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.ALL); for (MaterializedIndex masterIndex : allMaterializedIndices) { - MaterializedIndex slaveIndex = slavePartition.getIndex(masterIndex.getId()); + MaterializedIndex slaveIndex = slavePartition.getDefaultPhysicalPartition().getIndex(masterIndex.getId()); if (slaveIndex == null) { return false; } @@ -191,8 +194,8 @@ public static Database createSimpleDb(long dbId, long tableId, long partitionId, // partition RandomDistributionInfo distributionInfo = new RandomDistributionInfo(10); Partition partition = new Partition(partitionId, testPartition1, index, distributionInfo); - partition.updateVisibleVersion(testStartVersion); - partition.setNextVersion(testStartVersion + 1); + partition.getDefaultPhysicalPartition().updateVisibleVersion(testStartVersion); + partition.getDefaultPhysicalPartition().setNextVersion(testStartVersion + 1); // columns List columns = new ArrayList(); diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/ListPartitionInfoTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/ListPartitionInfoTest.java index aec1b3c59422ad..0e145bbe36ac0e 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/ListPartitionInfoTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/ListPartitionInfoTest.java @@ -96,7 +96,7 @@ public void testTruncateWithPartition() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String truncateSql = "truncate table t_recharge_detail partition(p1)"; TruncateTableStmt truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseStmtWithNewParser(truncateSql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().truncateTable(truncateTableStmt, ctx); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().truncateTable(truncateTableStmt, ctx); String showSql = "show partitions from t_recharge_detail;"; StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(showSql, ctx); StmtExecutor executor = new StmtExecutor(ctx, statementBase); diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/LocalTabletTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/LocalTabletTest.java index 8813f55d16fff4..d3e14224b7a53f 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/LocalTabletTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/LocalTabletTest.java @@ -50,11 +50,6 @@ import org.junit.Before; import org.junit.Test; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.util.List; public class LocalTabletTest { @@ -155,25 +150,6 @@ public void deleteReplicaTest() { @Test public void testSerialization() throws Exception { - File file = new File("./olapTabletTest"); - file.createNewFile(); - DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); - tablet.write(dos); - dos.flush(); - dos.close(); - - // Read an object from file - DataInputStream dis = new DataInputStream(new FileInputStream(file)); - LocalTablet rTablet1 = LocalTablet.read(dis); - Assert.assertEquals(1, rTablet1.getId()); - Assert.assertEquals(3, rTablet1.getImmutableReplicas().size()); - Assert.assertEquals(rTablet1.getImmutableReplicas().get(0).getVersion(), - rTablet1.getImmutableReplicas().get(1).getVersion()); - - Assert.assertTrue(rTablet1.equals(tablet)); - Assert.assertTrue(rTablet1.equals(rTablet1)); - Assert.assertFalse(rTablet1.equals(this)); - LocalTablet tablet2 = new LocalTablet(1); Replica replica1 = new Replica(1L, 1L, 100L, 0, 200000L, 3000L, ReplicaState.NORMAL, 0, 0); Replica replica2 = new Replica(2L, 2L, 100L, 0, 200001L, 3001L, ReplicaState.NORMAL, 0, 0); @@ -190,9 +166,6 @@ public void testSerialization() throws Exception { tablet3.addReplica(new Replica(4L, 4L, 100L, 0, 200002L, 3002L, ReplicaState.NORMAL, 0, 0)); Assert.assertFalse(tablet3.equals(tablet)); - dis.close(); - file.delete(); - // Read an object from json String jsonStr = GsonUtils.GSON.toJson(tablet); LocalTablet jTablet = GsonUtils.GSON.fromJson(jsonStr, LocalTablet.class); diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/PhysicalPartitionImplTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/PhysicalPartitionTest.java similarity index 92% rename from fe/fe-core/src/test/java/com/starrocks/catalog/PhysicalPartitionImplTest.java rename to fe/fe-core/src/test/java/com/starrocks/catalog/PhysicalPartitionTest.java index 48738cafbb8ad3..55400218ff73e3 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/PhysicalPartitionImplTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/PhysicalPartitionTest.java @@ -24,7 +24,7 @@ import org.junit.Before; import org.junit.Test; -public class PhysicalPartitionImplTest { +public class PhysicalPartitionTest { private FakeGlobalStateMgr fakeGlobalStateMgr; private GlobalStateMgr globalStateMgr; @@ -40,7 +40,7 @@ public void setUp() { @Test public void testPhysicalPartition() throws Exception { - PhysicalPartitionImpl p = new PhysicalPartitionImpl(1, "", 1, 0, new MaterializedIndex()); + PhysicalPartition p = new PhysicalPartition(1, "", 1, 0, new MaterializedIndex()); Assert.assertEquals(1, p.getId()); Assert.assertEquals(1, p.getParentId()); Assert.assertEquals(0, p.getShardGroupId()); @@ -107,7 +107,7 @@ public void testPhysicalPartition() throws Exception { Assert.assertTrue(p.equals(p)); Assert.assertFalse(p.equals(new Partition(0, "", null, null))); - PhysicalPartitionImpl p2 = new PhysicalPartitionImpl(1, "", 1, 0, new MaterializedIndex()); + PhysicalPartition p2 = new PhysicalPartition(1, "", 1, 0, new MaterializedIndex()); Assert.assertFalse(p.equals(p2)); p2.setBaseIndex(new MaterializedIndex(1)); @@ -144,8 +144,8 @@ public void testPhysicalPartition() throws Exception { @Test public void testPhysicalPartitionEqual() throws Exception { - PhysicalPartitionImpl p1 = new PhysicalPartitionImpl(1, "", 1, 0, new MaterializedIndex()); - PhysicalPartitionImpl p2 = new PhysicalPartitionImpl(1, "", 1, 0, new MaterializedIndex()); + PhysicalPartition p1 = new PhysicalPartition(1, "", 1, 0, new MaterializedIndex()); + PhysicalPartition p2 = new PhysicalPartition(1, "", 1, 0, new MaterializedIndex()); Assert.assertTrue(p1.equals(p2)); p1.createRollupIndex(new MaterializedIndex()); diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/StorageCoolDownTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/StorageCoolDownTest.java index 0afeeb9ba98b3e..424d1527a299ce 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/StorageCoolDownTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/StorageCoolDownTest.java @@ -159,7 +159,7 @@ public void testDateWithTTLLessThan() throws Exception { String dropSQL = "drop table site_access_datetime_with_1_day_ttl_less_than"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @@ -217,7 +217,7 @@ public void testDateWithTTLUpperLower() throws Exception { String dropSQL = "drop table site_access_date_upper_lower_ttl"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @@ -267,7 +267,7 @@ public void testDateWithTTLStartEnd() throws Exception { String dropSQL = "drop table site_access_date_with_1_day_ttl_start_end"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @@ -320,7 +320,7 @@ public void testDateTimeWithTTLLessThan() throws Exception { String dropSQL = "drop table site_access_date_with_1_day_ttl_less_than"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @@ -373,7 +373,7 @@ public void testDateTimeWithMaxPartition() throws Exception { String dropSQL = "drop table site_access_with_max_partition"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } } diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/StorageMediumInferTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/StorageMediumInferTest.java index d683b9ba8abc6a..24334d9f905f44 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/StorageMediumInferTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/StorageMediumInferTest.java @@ -70,7 +70,7 @@ public static void init() throws Exception { // create database String createDbStmtStr = "create database if not exists test;"; CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); } private static void createTable(String sql) throws Exception { @@ -95,7 +95,7 @@ public void testCreateTable() throws Exception { OlapTable tbl1 = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "tbl1"); List partitionList1 = Lists.newArrayList(tbl1.getPartitions()); DataProperty dataProperty1 = - globalStateMgr.getLocalMetastore().getDataPropertyIncludeRecycleBin(tbl1.getPartitionInfo(), + globalStateMgr.getStarRocksMetadata().getDataPropertyIncludeRecycleBin(tbl1.getPartitionInfo(), partitionList1.get(0).getId()); Assert.assertEquals(TStorageMedium.HDD, dataProperty1.getStorageMedium()); @@ -108,7 +108,7 @@ public void testCreateTable() throws Exception { OlapTable tbl2 = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "tbl2"); List partitionList2 = Lists.newArrayList(tbl2.getPartitions()); DataProperty dataProperty2 = - globalStateMgr.getLocalMetastore().getDataPropertyIncludeRecycleBin(tbl2.getPartitionInfo(), + globalStateMgr.getStarRocksMetadata().getDataPropertyIncludeRecycleBin(tbl2.getPartitionInfo(), partitionList2.get(0).getId()); Assert.assertEquals(TStorageMedium.SSD, dataProperty2.getStorageMedium()); @@ -120,7 +120,7 @@ public void testCreateTable() throws Exception { OlapTable tbl3 = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "tbl3"); List partitionList3 = Lists.newArrayList(tbl3.getPartitions()); DataProperty dataProperty3 = - globalStateMgr.getLocalMetastore().getDataPropertyIncludeRecycleBin(tbl3.getPartitionInfo(), + globalStateMgr.getStarRocksMetadata().getDataPropertyIncludeRecycleBin(tbl3.getPartitionInfo(), partitionList3.get(0).getId()); Assert.assertEquals(TStorageMedium.SSD, dataProperty3.getStorageMedium()); @@ -130,7 +130,7 @@ public void testCreateTable() throws Exception { OlapTable tbl4 = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "tbl4"); List partitionList4 = Lists.newArrayList(tbl4.getPartitions()); DataProperty dataProperty4 = - globalStateMgr.getLocalMetastore().getDataPropertyIncludeRecycleBin(tbl4.getPartitionInfo(), + globalStateMgr.getStarRocksMetadata().getDataPropertyIncludeRecycleBin(tbl4.getPartitionInfo(), partitionList4.get(0).getId()); Assert.assertEquals(TStorageMedium.HDD, dataProperty4.getStorageMedium()); } @@ -151,7 +151,7 @@ public void testAlterTableAddPartition() throws Exception { Assert.assertEquals(2, partitionList2.size()); for (Partition partition : partitionList2) { DataProperty dataProperty2 = - globalStateMgr.getLocalMetastore().getDataPropertyIncludeRecycleBin(tbl2.getPartitionInfo(), + globalStateMgr.getStarRocksMetadata().getDataPropertyIncludeRecycleBin(tbl2.getPartitionInfo(), partition.getId()); Assert.assertEquals(TStorageMedium.SSD, dataProperty2.getStorageMedium()); } diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/TabletStatMgrTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/TabletStatMgrTest.java index 6720d574f36363..6c39ad23f9ce0e 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/TabletStatMgrTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/TabletStatMgrTest.java @@ -202,7 +202,7 @@ private LakeTable createLakeTableForTest() { PartitionInfo partitionInfo = new SinglePartitionInfo(); partitionInfo.setReplicationNum(PARTITION_ID, (short) 3); Partition partition = new Partition(PARTITION_ID, "p1", index, distributionInfo); - partition.setVisibleVersion(2L, visibleVersionTime); + partition.getDefaultPhysicalPartition().setVisibleVersion(2L, visibleVersionTime); // Lake table LakeTable table = new LakeTable(TABLE_ID, "t1", columns, KeysType.AGG_KEYS, partitionInfo, distributionInfo); @@ -219,8 +219,10 @@ public void testUpdateLakeTabletStat(@Mocked SystemInfoService systemInfoService LakeTable table = createLakeTableForTest(); - long tablet1Id = table.getPartition(PARTITION_ID).getBaseIndex().getTablets().get(0).getId(); - long tablet2Id = table.getPartition(PARTITION_ID).getBaseIndex().getTablets().get(1).getId(); + long tablet1Id = table.getPartition(PARTITION_ID) + .getDefaultPhysicalPartition().getBaseIndex().getTablets().get(0).getId(); + long tablet2Id = table.getPartition(PARTITION_ID) + .getDefaultPhysicalPartition().getBaseIndex().getTablets().get(1).getId(); // db Database db = new Database(DB_ID, "db"); @@ -316,8 +318,10 @@ public TabletStatResponse get(long timeout, @NotNull TimeUnit unit) { Deencapsulation.invoke(tabletStatMgr, "updateLakeTableTabletStat", db, table); long t2 = System.currentTimeMillis(); - LakeTablet tablet1 = (LakeTablet) table.getPartition(PARTITION_ID).getBaseIndex().getTablets().get(0); - LakeTablet tablet2 = (LakeTablet) table.getPartition(PARTITION_ID).getBaseIndex().getTablets().get(1); + LakeTablet tablet1 = (LakeTablet) table.getPartition(PARTITION_ID) + .getDefaultPhysicalPartition().getBaseIndex().getTablets().get(0); + LakeTablet tablet2 = (LakeTablet) table.getPartition(PARTITION_ID) + .getDefaultPhysicalPartition().getBaseIndex().getTablets().get(1); Assert.assertEquals(tablet1.getRowCount(-1), tablet1NumRows); Assert.assertEquals(tablet1.getDataSize(true), tablet1DataSize); @@ -332,8 +336,10 @@ public void testUpdateLakeTabletStat2(@Mocked SystemInfoService systemInfoServic @Mocked LakeService lakeService) { LakeTable table = createLakeTableForTest(); - long tablet1Id = table.getPartition(PARTITION_ID).getBaseIndex().getTablets().get(0).getId(); - long tablet2Id = table.getPartition(PARTITION_ID).getBaseIndex().getTablets().get(1).getId(); + long tablet1Id = table.getPartition(PARTITION_ID).getDefaultPhysicalPartition() + .getBaseIndex().getTablets().get(0).getId(); + long tablet2Id = table.getPartition(PARTITION_ID).getDefaultPhysicalPartition() + .getBaseIndex().getTablets().get(1).getId(); // db Database db = new Database(DB_ID, "db"); @@ -365,8 +371,10 @@ public ComputeNode chooseNode(LakeTablet tablet) { TabletStatMgr tabletStatMgr = new TabletStatMgr(); Deencapsulation.invoke(tabletStatMgr, "updateLakeTableTabletStat", db, table); - LakeTablet tablet1 = (LakeTablet) table.getPartition(PARTITION_ID).getBaseIndex().getTablets().get(0); - LakeTablet tablet2 = (LakeTablet) table.getPartition(PARTITION_ID).getBaseIndex().getTablets().get(1); + LakeTablet tablet1 = (LakeTablet) table.getPartition(PARTITION_ID) + .getDefaultPhysicalPartition().getBaseIndex().getTablets().get(0); + LakeTablet tablet2 = (LakeTablet) table.getPartition(PARTITION_ID) + .getDefaultPhysicalPartition().getBaseIndex().getTablets().get(1); Assert.assertEquals(0, tablet1.getRowCount(-1)); Assert.assertEquals(0, tablet1.getDataSize(true)); @@ -381,8 +389,10 @@ public void testUpdateLakeTabletStat3(@Mocked SystemInfoService systemInfoServic @Mocked LakeService lakeService) { LakeTable table = createLakeTableForTest(); - long tablet1Id = table.getPartition(PARTITION_ID).getBaseIndex().getTablets().get(0).getId(); - long tablet2Id = table.getPartition(PARTITION_ID).getBaseIndex().getTablets().get(1).getId(); + long tablet1Id = table.getPartition(PARTITION_ID).getDefaultPhysicalPartition() + .getBaseIndex().getTablets().get(0).getId(); + long tablet2Id = table.getPartition(PARTITION_ID).getDefaultPhysicalPartition() + .getBaseIndex().getTablets().get(1).getId(); // db Database db = new Database(DB_ID, "db"); @@ -457,8 +467,10 @@ public TabletStatResponse get(long timeout, @NotNull TimeUnit unit) { TabletStatMgr tabletStatMgr = new TabletStatMgr(); Deencapsulation.invoke(tabletStatMgr, "updateLakeTableTabletStat", db, table); - LakeTablet tablet1 = (LakeTablet) table.getPartition(PARTITION_ID).getBaseIndex().getTablets().get(0); - LakeTablet tablet2 = (LakeTablet) table.getPartition(PARTITION_ID).getBaseIndex().getTablets().get(1); + LakeTablet tablet1 = (LakeTablet) table.getPartition(PARTITION_ID).getDefaultPhysicalPartition() + .getBaseIndex().getTablets().get(0); + LakeTablet tablet2 = (LakeTablet) table.getPartition(PARTITION_ID).getDefaultPhysicalPartition() + .getBaseIndex().getTablets().get(1); Assert.assertEquals(0, tablet1.getRowCount(-1)); Assert.assertEquals(0, tablet1.getDataSize(true)); diff --git a/fe/fe-core/src/test/java/com/starrocks/catalog/TempPartitionTest.java b/fe/fe-core/src/test/java/com/starrocks/catalog/TempPartitionTest.java index a25ca833f465d7..b7345d0c110fa8 100644 --- a/fe/fe-core/src/test/java/com/starrocks/catalog/TempPartitionTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/catalog/TempPartitionTest.java @@ -379,7 +379,7 @@ public void testForMultiPartitionTable() throws Exception { "distributed by hash(k2) buckets 1\n" + "properties('replication_num' = '1');"); - Database db2 = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("db2"); + Database db2 = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("db2"); OlapTable tbl2 = (OlapTable) db2.getTable("tbl2"); Map originPartitionTabletIds = Maps.newHashMap(); @@ -458,7 +458,7 @@ public void testForMultiPartitionTable() throws Exception { String recoverStr = "recover partition p1 from db2.tbl2;"; RecoverPartitionStmt recoverStmt = (RecoverPartitionStmt) UtFrameUtils.parseStmtWithNewParser(recoverStr, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().recoverPartition(recoverStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().recoverPartition(recoverStmt); checkShowPartitionsResultNum("db2.tbl2", true, 3); checkShowPartitionsResultNum("db2.tbl2", false, 3); @@ -501,7 +501,7 @@ public void testForMultiPartitionTable() throws Exception { String truncateStr = "truncate table db2.tbl2 partition (p3);"; TruncateTableStmt truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseStmtWithNewParser(truncateStr, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().truncateTable(truncateTableStmt, ctx); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().truncateTable(truncateTableStmt, ctx); checkShowPartitionsResultNum("db2.tbl2", true, 1); checkShowPartitionsResultNum("db2.tbl2", false, 3); checkPartitionExist(tbl2, "tp1", false, true); @@ -585,7 +585,7 @@ public void testForMultiPartitionTable() throws Exception { truncateStr = "truncate table db2.tbl2"; truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseStmtWithNewParser(truncateStr, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().truncateTable(truncateTableStmt, ctx); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().truncateTable(truncateTableStmt, ctx); checkShowPartitionsResultNum("db2.tbl2", false, 3); checkShowPartitionsResultNum("db2.tbl2", true, 0); @@ -608,7 +608,7 @@ public void testForMultiPartitionTable() throws Exception { } OlapTable olapTable = - (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("db2").getTable("tbl2"); + (OlapTable) GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("db2").getTable("tbl2"); // waiting table state to normal int retryTimes = 5; @@ -675,7 +675,7 @@ public void testForStrictRangeCheck() throws Exception { "distributed by hash(k2) buckets 1\n" + "properties('replication_num' = '1');"); - Database db3 = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("db3"); + Database db3 = GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDb("db3"); OlapTable tbl3 = (OlapTable) db3.getTable("tbl3"); // base range is [min, 10), [10, 20), [20, 30) @@ -693,7 +693,7 @@ public void testForStrictRangeCheck() throws Exception { // now base range is [min, 10), [10, 15), [15, 25), [25, 30) -> p1,tp1,tp2,tp3 stmtStr = "truncate table db3.tbl3"; TruncateTableStmt truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseStmtWithNewParser(stmtStr, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().truncateTable(truncateTableStmt, ctx); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().truncateTable(truncateTableStmt, ctx); // 2. add temp ranges: [10, 31), and replace the [10, 15), [15, 25), [25, 30) stmtStr = "alter table db3.tbl3 add temporary partition tp4 values [('10'), ('31'))"; alterTableWithNewAnalyzer(stmtStr, false); @@ -710,7 +710,7 @@ public void testForStrictRangeCheck() throws Exception { // now base range is [min, 10), [10, 30) -> p1,tp4 stmtStr = "truncate table db3.tbl3"; truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseStmtWithNewParser(stmtStr, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().truncateTable(truncateTableStmt, ctx); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().truncateTable(truncateTableStmt, ctx); // 3. add temp partition tp5 [50, 60) and replace partition tp4 stmtStr = "alter table db3.tbl3 add temporary partition tp5 values [('50'), ('60'))"; alterTableWithNewAnalyzer(stmtStr, false); @@ -780,6 +780,7 @@ private void testSerializeTempPartitions(TempPartitions tempPartitionsInstance) TempPartitions readTempPartition = TempPartitions.read(in); List partitions = readTempPartition.getAllPartitions(); Assert.assertEquals(1, partitions.size()); - Assert.assertEquals(2, partitions.get(0).getMaterializedIndices(IndexExtState.VISIBLE).size()); + Assert.assertEquals(2, partitions.get(0).getDefaultPhysicalPartition() + .getMaterializedIndices(IndexExtState.VISIBLE).size()); } } diff --git a/fe/fe-core/src/test/java/com/starrocks/clone/ColocateTableBalancerTest.java b/fe/fe-core/src/test/java/com/starrocks/clone/ColocateTableBalancerTest.java index 5687eb8d18e61d..9e0048853afcba 100644 --- a/fe/fe-core/src/test/java/com/starrocks/clone/ColocateTableBalancerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/clone/ColocateTableBalancerTest.java @@ -145,23 +145,23 @@ private ColocateTableIndex createColocateIndex(GroupId groupId, List flatL private void addTabletsToScheduler(String dbName, String tableName, boolean setGroupId) { Database database = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(dbName); OlapTable table = - (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(database.getFullName(), tableName); + (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(database.getFullName(), tableName); // add its tablet to TabletScheduler TabletScheduler tabletScheduler = GlobalStateMgr.getCurrentState().getTabletScheduler(); for (Partition partition : table.getPartitions()) { - MaterializedIndex materializedIndex = partition.getBaseIndex(); + MaterializedIndex materializedIndex = partition.getDefaultPhysicalPartition().getBaseIndex(); for (Tablet tablet : materializedIndex.getTablets()) { TabletSchedCtx ctx = new TabletSchedCtx(TabletSchedCtx.Type.REPAIR, - database.getId(), - table.getId(), - partition.getId(), - materializedIndex.getId(), - tablet.getId(), - System.currentTimeMillis()); + database.getId(), + table.getId(), + partition.getId(), + materializedIndex.getId(), + tablet.getId(), + System.currentTimeMillis()); ctx.setOrigPriority(TabletSchedCtx.Priority.LOW); if (setGroupId) { ctx.setColocateGroupId( - GlobalStateMgr.getCurrentState().getColocateTableIndex().getGroup(table.getId())); + GlobalStateMgr.getCurrentState().getColocateTableIndex().getGroup(table.getId())); } tabletScheduler.addTablet(ctx, false); } @@ -171,13 +171,13 @@ private void addTabletsToScheduler(String dbName, String tableName, boolean setG @Test public void test1MatchGroup() throws Exception { starRocksAssert.withDatabase("db1").useDatabase("db1") - .withTable("CREATE TABLE db1.tbl(id INT NOT NULL) " + - "distributed by hash(`id`) buckets 3 " + - "properties('replication_num' = '1', 'colocate_with' = 'group1');"); + .withTable("CREATE TABLE db1.tbl(id INT NOT NULL) " + + "distributed by hash(`id`) buckets 3 " + + "properties('replication_num' = '1', 'colocate_with' = 'group1');"); Database database = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("db1"); OlapTable table = - (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(database.getFullName(), "tbl"); + (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(database.getFullName(), "tbl"); addTabletsToScheduler("db1", "tbl", false); ColocateTableIndex colocateIndex = GlobalStateMgr.getCurrentState().getColocateTableIndex(); @@ -205,17 +205,17 @@ public void test3RepairWithBadReplica() throws Exception { UtFrameUtils.addMockBackend(10003); UtFrameUtils.addMockBackend(10004); starRocksAssert.withDatabase("db3").useDatabase("db3") - .withTable("CREATE TABLE db3.tbl3(id INT NOT NULL) " + - "distributed by hash(`id`) buckets 1 " + - "properties('replication_num' = '1', 'colocate_with' = 'group3');"); + .withTable("CREATE TABLE db3.tbl3(id INT NOT NULL) " + + "distributed by hash(`id`) buckets 1 " + + "properties('replication_num' = '1', 'colocate_with' = 'group3');"); Database database = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("db3"); OlapTable table = - (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(database.getFullName(), "tbl3"); + (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(database.getFullName(), "tbl3"); ColocateTableIndex colocateTableIndex = GlobalStateMgr.getCurrentState().getColocateTableIndex(); List partitions = Lists.newArrayList(table.getPartitions()); - LocalTablet tablet = (LocalTablet) partitions.get(0).getBaseIndex().getTablets().get(0); + LocalTablet tablet = (LocalTablet) partitions.get(0).getDefaultPhysicalPartition().getBaseIndex().getTablets().get(0); tablet.getImmutableReplicas().get(0).setBad(true); ColocateTableBalancer colocateTableBalancer = ColocateTableBalancer.getInstance(); long oldVal = Config.tablet_sched_repair_delay_factor_second; @@ -312,7 +312,7 @@ public void testRepairPrecedeBalance(@Mocked SystemInfoService infoService, GroupId groupId = new GroupId(10005, 10006); short replicationNUm = 3; ColocateTableIndex colocateTableIndex = createColocateIndex(groupId, - Lists.newArrayList(1L, 2L, 3L, 1L, 2L, 3L, 1L, 2L, 4L, 1L, 2L, 5L), replicationNUm); + Lists.newArrayList(1L, 2L, 3L, 1L, 2L, 3L, 1L, 2L, 4L, 1L, 2L, 5L), replicationNUm); setGroup2Schema(groupId, colocateTableIndex, 4, replicationNUm); Set unavailableBeIds = Sets.newHashSet(5L); @@ -321,23 +321,23 @@ public void testRepairPrecedeBalance(@Mocked SystemInfoService infoService, boolean changed = false; ColocateTableBalancer.disableRepairPrecedence = true; changed = (Boolean) Deencapsulation - .invoke(balancer, "doRelocateAndBalance", groupId, unavailableBeIds, availBackendIds, - colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); + .invoke(balancer, "doRelocateAndBalance", groupId, unavailableBeIds, availBackendIds, + colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); Assert.assertTrue(changed); System.out.println(balancedBackendsPerBucketSeq); List> expected = Lists.partition( - Lists.newArrayList(4L, 2L, 3L, 1L, 2L, 3L, 1L, 3L, 4L, 1L, 2L, 4L), 3); + Lists.newArrayList(4L, 2L, 3L, 1L, 2L, 3L, 1L, 3L, 4L, 1L, 2L, 4L), 3); Assert.assertEquals(expected, balancedBackendsPerBucketSeq); ColocateTableBalancer.disableRepairPrecedence = false; balancedBackendsPerBucketSeq.clear(); changed = (Boolean) Deencapsulation - .invoke(balancer, "doRelocateAndBalance", groupId, unavailableBeIds, availBackendIds, - colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); + .invoke(balancer, "doRelocateAndBalance", groupId, unavailableBeIds, availBackendIds, + colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); Assert.assertTrue(changed); System.out.println(balancedBackendsPerBucketSeq); expected = Lists.partition( - Lists.newArrayList(1L, 2L, 3L, 1L, 2L, 3L, 1L, 2L, 4L, 1L, 2L, 4L), 3); + Lists.newArrayList(1L, 2L, 3L, 1L, 2L, 3L, 1L, 2L, 4L, 1L, 2L, 4L), 3); Assert.assertEquals(expected, balancedBackendsPerBucketSeq); } @@ -403,27 +403,27 @@ public void testPerGroupBalance(@Mocked SystemInfoService infoService, // [[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4], [1, 2, 3]] FeConstants.runningUnitTest = true; ColocateTableIndex colocateTableIndex = createColocateIndex(groupId, - Lists.newArrayList(1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L), 3); + Lists.newArrayList(1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L), 3); Deencapsulation.setField(colocateTableIndex, "group2Schema", group2Schema); List> balancedBackendsPerBucketSeq = Lists.newArrayList(); List allAvailBackendIds = Lists.newArrayList(1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L); boolean changed = (Boolean) Deencapsulation - .invoke(balancer, "doRelocateAndBalance", groupId, new HashSet(), allAvailBackendIds, - colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); + .invoke(balancer, "doRelocateAndBalance", groupId, new HashSet(), allAvailBackendIds, + colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); List> expected = Lists.partition( - Lists.newArrayList(9L, 5L, 3L, 4L, 6L, 8L, 7L, 6L, 1L, 2L, 9L, 4L, 1L, 2L, 3L), 3); + Lists.newArrayList(9L, 5L, 3L, 4L, 6L, 8L, 7L, 6L, 1L, 2L, 9L, 4L, 1L, 2L, 3L), 3); Assert.assertTrue(changed); Assert.assertEquals(expected, balancedBackendsPerBucketSeq); // 2. balance an already balanced group colocateTableIndex = createColocateIndex(groupId, - Lists.newArrayList(9L, 8L, 7L, 8L, 6L, 5L, 9L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L), 3); + Lists.newArrayList(9L, 8L, 7L, 8L, 6L, 5L, 9L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L), 3); Deencapsulation.setField(colocateTableIndex, "group2Schema", group2Schema); balancedBackendsPerBucketSeq.clear(); changed = (Boolean) Deencapsulation - .invoke(balancer, "doRelocateAndBalance", groupId, new HashSet(), allAvailBackendIds, - colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); + .invoke(balancer, "doRelocateAndBalance", groupId, new HashSet(), allAvailBackendIds, + colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); System.out.println(balancedBackendsPerBucketSeq); Assert.assertFalse(changed); Assert.assertTrue(balancedBackendsPerBucketSeq.isEmpty()); @@ -518,10 +518,10 @@ public synchronized Map getTabletsNumInScheduleForEachCG() { ColocateTableIndex colocateTableIndex = GlobalStateMgr.getCurrentState().getColocateTableIndex(); // For group 1, bucket: 3, replication_num: 1, backend list per bucket seq: [[1], [2], [3]] colocateTableIndex.addBackendsPerBucketSeq(groupId1, - Lists.partition(Lists.newArrayList(1L, 2L, 3L), 1)); + Lists.partition(Lists.newArrayList(1L, 2L, 3L), 1)); // For group 2, bucket: 3, replication_num: 1, backend list per bucket seq: [[1], [2], [3]] colocateTableIndex.addBackendsPerBucketSeq(groupId2, - Lists.partition(Lists.newArrayList(1L, 2L, 3L), 1)); + Lists.partition(Lists.newArrayList(1L, 2L, 3L), 1)); Deencapsulation.setField(colocateTableIndex, "group2Schema", group2Schema); Multimap group2Tables = ArrayListMultimap.create(); group2Tables.put(groupId1, 20001L); @@ -548,7 +548,7 @@ public synchronized Map getTabletsNumInScheduleForEachCG() { // totally 6 replicas, after adding 2 backends and overall balance, // every backend should have 1 replica, except one Assert.assertEquals(Lists.newArrayList(1, 1, 1, 1, 2), - result.values().stream().sorted().collect(Collectors.toList())); + result.values().stream().sorted().collect(Collectors.toList())); } } @@ -557,7 +557,7 @@ private void setGroup2Schema(GroupId groupId, ColocateTableIndex colocateTableIn List distributionCols = Lists.newArrayList(); distributionCols.add(new Column("k1", Type.INT)); ColocateGroupSchema groupSchema = - new ColocateGroupSchema(groupId, distributionCols, bucketNum, replicationNum); + new ColocateGroupSchema(groupId, distributionCols, bucketNum, replicationNum); Map group2Schema = Maps.newHashMap(); group2Schema.put(groupId, groupSchema); Deencapsulation.setField(colocateTableIndex, "group2Schema", group2Schema); @@ -613,7 +613,7 @@ public void testBalanceWithOnlyOneAvailableBackend(@Mocked SystemInfoService inf GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getIdToBackend(); GroupId groupId = new GroupId(10000, 10001); ColocateTableIndex colocateTableIndex = createColocateIndex(groupId, - Lists.newArrayList(4L, 2L, 3L, 4L, 2L, 3L, 4L, 2L, 3L), 3); + Lists.newArrayList(4L, 2L, 3L, 4L, 2L, 3L, 4L, 2L, 3L), 3); setGroup2Schema(groupId, colocateTableIndex, 3, (short) 3); Set unavailableBeIds = Sets.newHashSet(3L, 4L); @@ -621,8 +621,8 @@ public void testBalanceWithOnlyOneAvailableBackend(@Mocked SystemInfoService inf List availBackendIds = Lists.newArrayList(2L); boolean changed = (Boolean) Deencapsulation - .invoke(balancer, "doRelocateAndBalance", groupId, unavailableBeIds, availBackendIds, - colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); + .invoke(balancer, "doRelocateAndBalance", groupId, unavailableBeIds, availBackendIds, + colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); // in this case, there is only on available backend, no need to make balancing decision. Assert.assertFalse(changed); } @@ -679,7 +679,7 @@ public void testBalanceWithSingleReplica(@Mocked SystemInfoService infoService, GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getIdToBackend(); GroupId groupId = new GroupId(10000, 10001); ColocateTableIndex colocateTableIndex = createColocateIndex(groupId, - Lists.newArrayList(4L, 2L, 3L, 4L, 2L, 3L, 4L, 2L, 3L), 1); + Lists.newArrayList(4L, 2L, 3L, 4L, 2L, 3L, 4L, 2L, 3L), 1); setGroup2Schema(groupId, colocateTableIndex, 9, (short) 1); Set unavailableBeIds = Sets.newHashSet(4L); @@ -688,22 +688,22 @@ public void testBalanceWithSingleReplica(@Mocked SystemInfoService infoService, boolean changed = false; changed = (Boolean) Deencapsulation - .invoke(balancer, "doRelocateAndBalance", groupId, unavailableBeIds, availBackendIds, - colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); + .invoke(balancer, "doRelocateAndBalance", groupId, unavailableBeIds, availBackendIds, + colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); // there is unavailable backend, but the replication number is 1 and replicas on available backends are // already balanced, so the bucket sequence will remain unchanged. Assert.assertFalse(changed); colocateTableIndex = createColocateIndex(groupId, - Lists.newArrayList(2L, 2L, 4L, 2L, 2L, 4L, 3L, 2L, 4L), 1); + Lists.newArrayList(2L, 2L, 4L, 2L, 2L, 4L, 3L, 2L, 4L), 1); setGroup2Schema(groupId, colocateTableIndex, 9, (short) 1); changed = (Boolean) Deencapsulation - .invoke(balancer, "doRelocateAndBalance", groupId, unavailableBeIds, availBackendIds, - colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); + .invoke(balancer, "doRelocateAndBalance", groupId, unavailableBeIds, availBackendIds, + colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); Assert.assertTrue(changed); System.out.println(balancedBackendsPerBucketSeq); List> expected = Lists.partition( - Lists.newArrayList(3L, 3L, 4L, 2L, 2L, 4L, 3L, 2L, 4L), 1); + Lists.newArrayList(3L, 3L, 4L, 2L, 2L, 4L, 3L, 2L, 4L), 1); // there is unavailable backend, but the replication number is 1 and replicas on available backends are // not balanced, check the balancer actually working. Assert.assertEquals(expected, balancedBackendsPerBucketSeq); @@ -717,15 +717,15 @@ public void testBalanceWithSingleReplica(@Mocked SystemInfoService infoService, }; balancedBackendsPerBucketSeq = Lists.newArrayList(); colocateTableIndex = createColocateIndex(groupId, - Lists.newArrayList(2L, 2L, 4L, 2L, 2L, 4L, 3L, 2L, 4L), 1); + Lists.newArrayList(2L, 2L, 4L, 2L, 2L, 4L, 3L, 2L, 4L), 1); setGroup2Schema(groupId, colocateTableIndex, 9, (short) 1); changed = (Boolean) Deencapsulation - .invoke(balancer, "doRelocateAndBalance", groupId, unavailableBeIds, availBackendIds, - colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); + .invoke(balancer, "doRelocateAndBalance", groupId, unavailableBeIds, availBackendIds, + colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); Assert.assertTrue(changed); System.out.println(balancedBackendsPerBucketSeq); List> expected3 = Lists.partition( - Lists.newArrayList(2L, 2L, 3L, 2L, 2L, 3L, 3L, 2L, 3L), 1); + Lists.newArrayList(2L, 2L, 3L, 2L, 2L, 3L, 3L, 2L, 3L), 1); // there is unavailable backend, but the replication number is 1 and there is decommissioned backend, // so we need to do relocation first. Assert.assertEquals(expected3, balancedBackendsPerBucketSeq); @@ -779,42 +779,42 @@ public void testFixBalanceEndlessLoop(@Mocked SystemInfoService infoService, // 1. only one available backend // [[7], [7], [7], [7], [7]] ColocateTableIndex colocateTableIndex = createColocateIndex(groupId, - Lists.newArrayList(7L, 7L, 7L, 7L, 7L), 3); + Lists.newArrayList(7L, 7L, 7L, 7L, 7L), 3); Deencapsulation.setField(colocateTableIndex, "group2Schema", group2Schema); List> balancedBackendsPerBucketSeq = Lists.newArrayList(); List allAvailBackendIds = Lists.newArrayList(7L); boolean changed = - Deencapsulation.invoke(balancer, "doRelocateAndBalance", - groupId, new HashSet(), allAvailBackendIds, - colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); + Deencapsulation.invoke(balancer, "doRelocateAndBalance", + groupId, new HashSet(), allAvailBackendIds, + colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); Assert.assertFalse(changed); // 2. all backends are checked but this round is not changed // [[7], [7], [7], [7], [7]] // and add new backends 8, 9 that are on the same host with 7 colocateTableIndex = createColocateIndex(groupId, - Lists.newArrayList(7L, 7L, 7L, 7L, 7L), 3); + Lists.newArrayList(7L, 7L, 7L, 7L, 7L), 3); Deencapsulation.setField(colocateTableIndex, "group2Schema", group2Schema); balancedBackendsPerBucketSeq = Lists.newArrayList(); allAvailBackendIds = Lists.newArrayList(7L, 8L, 9L); changed = - Deencapsulation.invoke(balancer, "doRelocateAndBalance", - groupId, new HashSet(), allAvailBackendIds, - colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); + Deencapsulation.invoke(balancer, "doRelocateAndBalance", + groupId, new HashSet(), allAvailBackendIds, + colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); Assert.assertFalse(changed); // 3. all backends are not available colocateTableIndex = createColocateIndex(groupId, - Lists.newArrayList(7L, 7L, 7L, 7L, 7L), 3); + Lists.newArrayList(7L, 7L, 7L, 7L, 7L), 3); Deencapsulation.setField(colocateTableIndex, "group2Schema", group2Schema); balancedBackendsPerBucketSeq = Lists.newArrayList(); allAvailBackendIds = Lists.newArrayList(); Set unAvailableBackendIds = Sets.newHashSet(7L); changed = Deencapsulation - .invoke(balancer, "doRelocateAndBalance", groupId, unAvailableBackendIds, allAvailBackendIds, - colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); + .invoke(balancer, "doRelocateAndBalance", groupId, unAvailableBackendIds, allAvailBackendIds, + colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); Assert.assertFalse(changed); } @@ -838,15 +838,15 @@ BackendLoadStatistic delegate(Long beId) { Set unavailBackendIds = Sets.newHashSet(9L); List flatBackendsPerBucketSeq = Lists.newArrayList(1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L); List> backends = Deencapsulation.invoke(balancer, "getSortedBackendReplicaNumPairs", - allAvailBackendIds, unavailBackendIds, statistic, flatBackendsPerBucketSeq); + allAvailBackendIds, unavailBackendIds, statistic, flatBackendsPerBucketSeq); long[] backendIds = backends.stream().mapToLong(Map.Entry::getKey).toArray(); Assert.assertArrayEquals(new long[] {7L, 8L, 6L, 2L, 3L, 5L, 4L, 1L}, backendIds); // 0,1 bucket on same be and 5, 6 on same be flatBackendsPerBucketSeq = Lists.newArrayList(1L, 1L, 3L, 4L, 5L, 6L, 7L, 7L, 9L); backends = Deencapsulation - .invoke(balancer, "getSortedBackendReplicaNumPairs", allAvailBackendIds, unavailBackendIds, - statistic, flatBackendsPerBucketSeq); + .invoke(balancer, "getSortedBackendReplicaNumPairs", allAvailBackendIds, unavailBackendIds, + statistic, flatBackendsPerBucketSeq); backendIds = backends.stream().mapToLong(Map.Entry::getKey).toArray(); Assert.assertArrayEquals(new long[] {7L, 1L, 6L, 3L, 5L, 4L, 8L, 2L}, backendIds); } @@ -867,7 +867,7 @@ public double getMixLoadScore() { public void testGetBeSeqIndexes() { List flatBackendsPerBucketSeq = Lists.newArrayList(1L, 2L, 2L, 3L, 4L, 2L); List indexes = Deencapsulation.invoke(balancer, - "getBeSeqIndexes", flatBackendsPerBucketSeq, 2L); + "getBeSeqIndexes", flatBackendsPerBucketSeq, 2L); Assert.assertArrayEquals(new int[] {1, 2, 5}, indexes.stream().mapToInt(i -> i).toArray()); System.out.println("backend1 id is " + backend1.getId()); } @@ -903,9 +903,9 @@ public void testGetUnavailableBeIdsInGroup() { GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getIdToBackend(); Set unavailableBeIds = Deencapsulation - .invoke(balancer, "getUnavailableBeIdsInGroup", infoService, colocateTableIndex, groupId); + .invoke(balancer, "getUnavailableBeIdsInGroup", infoService, colocateTableIndex, groupId); Assert.assertArrayEquals(new long[] {1L, 3L, 5L}, - unavailableBeIds.stream().mapToLong(i -> i).sorted().toArray()); + unavailableBeIds.stream().mapToLong(i -> i).sorted().toArray()); } @Test @@ -1034,18 +1034,18 @@ BackendLoadStatistic delegate(Long beId) { // group is balanced before backend 9 is dropped ColocateTableIndex colocateTableIndex = createColocateIndex(groupId, - Lists.newArrayList(9L, 8L, 7L, 8L, 6L, 5L, 9L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L), 3); + Lists.newArrayList(9L, 8L, 7L, 8L, 6L, 5L, 9L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L), 3); Deencapsulation.setField(colocateTableIndex, "group2Schema", group2Schema); List> balancedBackendsPerBucketSeq = Lists.newArrayList(); Set unavailableBeIds = Sets.newHashSet(9L); List allAvailBackendIds = Lists.newArrayList(1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L); boolean changed = (Boolean) Deencapsulation - .invoke(balancer, "doRelocateAndBalance", groupId, unavailableBeIds, allAvailBackendIds, - colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); + .invoke(balancer, "doRelocateAndBalance", groupId, unavailableBeIds, allAvailBackendIds, + colocateTableIndex, infoService, statistic, balancedBackendsPerBucketSeq); System.out.println(balancedBackendsPerBucketSeq); Assert.assertTrue(changed); List> expected = Lists.partition( - Lists.newArrayList(5L, 8L, 7L, 8L, 6L, 5L, 6L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L), 3); + Lists.newArrayList(5L, 8L, 7L, 8L, 6L, 5L, 6L, 4L, 1L, 2L, 3L, 4L, 1L, 2L, 3L), 3); Assert.assertEquals(expected, balancedBackendsPerBucketSeq); } @@ -1065,10 +1065,10 @@ public void testSystemStable() throws Exception { // set stable last time to 1s, and sleep 1s, the system becomes to stable Config.tablet_sched_colocate_balance_wait_system_stable_time_s = 1; System.out.println("before sleep, time: " + System.currentTimeMillis() - + "alive backend is: " + infoService.getBackendIds(true)); + + "alive backend is: " + infoService.getBackendIds(true)); Thread.sleep(2000L); System.out.println("after sleep, time: " + System.currentTimeMillis() - + "alive backend is: " + infoService.getBackendIds(true)); + + "alive backend is: " + infoService.getBackendIds(true)); Assert.assertTrue(balancer.isSystemStable(infoService)); Assert.assertTrue(balancer.isSystemStable(infoService)); @@ -1077,10 +1077,10 @@ public void testSystemStable() throws Exception { Assert.assertFalse(balancer.isSystemStable(infoService)); Assert.assertFalse(balancer.isSystemStable(infoService)); System.out.println("before sleep, time: " + System.currentTimeMillis() - + "alive backend is: " + infoService.getBackendIds(true)); + + "alive backend is: " + infoService.getBackendIds(true)); Thread.sleep(2000L); System.out.println("after sleep, time: " + System.currentTimeMillis() - + "alive backend is: " + infoService.getBackendIds(true)); + + "alive backend is: " + infoService.getBackendIds(true)); Assert.assertTrue(balancer.isSystemStable(infoService)); } } diff --git a/fe/fe-core/src/test/java/com/starrocks/clone/DiskAndTabletLoadReBalancerTest.java b/fe/fe-core/src/test/java/com/starrocks/clone/DiskAndTabletLoadReBalancerTest.java index b488978a879bdb..a3ce5e1d44fa13 100644 --- a/fe/fe-core/src/test/java/com/starrocks/clone/DiskAndTabletLoadReBalancerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/clone/DiskAndTabletLoadReBalancerTest.java @@ -134,36 +134,36 @@ public void testBalance(@Mocked GlobalStateMgr globalStateMgr) { result = globalStateMgr; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIdsIncludeRecycleBin(); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIdsIncludeRecycleBin(); result = Lists.newArrayList(dbId); minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIncludeRecycleBin(dbId); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIncludeRecycleBin(dbId); result = database; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getTableIncludeRecycleBin((Database) any, anyLong); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTableIncludeRecycleBin((Database) any, anyLong); result = table; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getTablesIncludeRecycleBin((Database) any); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTablesIncludeRecycleBin((Database) any); result = Lists.newArrayList(table); minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getPartitionIncludeRecycleBin((OlapTable) any, anyLong); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getPartitionIncludeRecycleBin((OlapTable) any, anyLong); result = partition; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getAllPartitionsIncludeRecycleBin((OlapTable) any); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getAllPartitionsIncludeRecycleBin((OlapTable) any); result = Lists.newArrayList(partition); minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getReplicationNumIncludeRecycleBin((PartitionInfo) any, anyLong); result = (short) 1; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getDataPropertyIncludeRecycleBin((PartitionInfo) any, anyLong); result = dataProperty; minTimes = 0; @@ -296,36 +296,36 @@ public void testBalanceWithSameHost(@Mocked GlobalStateMgr globalStateMgr) { result = globalStateMgr; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIdsIncludeRecycleBin(); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIdsIncludeRecycleBin(); result = Lists.newArrayList(dbId); minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIncludeRecycleBin(dbId); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIncludeRecycleBin(dbId); result = database; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getTableIncludeRecycleBin((Database) any, anyLong); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTableIncludeRecycleBin((Database) any, anyLong); result = table; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getTablesIncludeRecycleBin((Database) any); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTablesIncludeRecycleBin((Database) any); result = Lists.newArrayList(table); minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getPartitionIncludeRecycleBin((OlapTable) any, anyLong); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getPartitionIncludeRecycleBin((OlapTable) any, anyLong); result = partition; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getPartitionsIncludeRecycleBin((OlapTable) any); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getPartitionsIncludeRecycleBin((OlapTable) any); result = Lists.newArrayList(partition); minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getReplicationNumIncludeRecycleBin((PartitionInfo) any, anyLong); result = (short) 1; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getDataPropertyIncludeRecycleBin((PartitionInfo) any, anyLong); result = dataProperty; minTimes = 0; @@ -478,45 +478,47 @@ public void testBalanceBackendTablet(@Mocked GlobalStateMgr globalStateMgr) { result = globalStateMgr; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIdsIncludeRecycleBin(); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIdsIncludeRecycleBin(); result = Lists.newArrayList(dbId); minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIncludeRecycleBin(dbId); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIncludeRecycleBin(dbId); result = database; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getTableIncludeRecycleBin((Database) any, anyLong); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTableIncludeRecycleBin((Database) any, anyLong); result = table; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getTablesIncludeRecycleBin((Database) any); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTablesIncludeRecycleBin((Database) any); result = Lists.newArrayList(table); minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getPartitionIncludeRecycleBin((OlapTable) any, partitionId1); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .getPartitionIncludeRecycleBin((OlapTable) any, partitionId1); result = partition1; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getPartitionIncludeRecycleBin((OlapTable) any, partitionId2); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .getPartitionIncludeRecycleBin((OlapTable) any, partitionId2); result = partition2; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getAllPartitionsIncludeRecycleBin((OlapTable) any); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getAllPartitionsIncludeRecycleBin((OlapTable) any); result = Lists.newArrayList(partition1, partition2); minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getReplicationNumIncludeRecycleBin((PartitionInfo) any, anyLong); result = (short) 1; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getDataPropertyIncludeRecycleBin((PartitionInfo) any, partitionId1); result = dataProperty1; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getDataPropertyIncludeRecycleBin((PartitionInfo) any, partitionId2); result = dataProperty2; minTimes = 0; @@ -669,36 +671,36 @@ public void testBalanceParallel(@Mocked GlobalStateMgr globalStateMgr) { result = globalStateMgr; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIdsIncludeRecycleBin(); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIdsIncludeRecycleBin(); result = Lists.newArrayList(dbId); minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getDbIncludeRecycleBin(dbId); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getDbIncludeRecycleBin(dbId); result = database; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getTableIncludeRecycleBin((Database) any, anyLong); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTableIncludeRecycleBin((Database) any, anyLong); result = table; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getTablesIncludeRecycleBin((Database) any); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getTablesIncludeRecycleBin((Database) any); result = Lists.newArrayList(table); minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getPartitionIncludeRecycleBin((OlapTable) any, anyLong); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getPartitionIncludeRecycleBin((OlapTable) any, anyLong); result = partition; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore().getAllPartitionsIncludeRecycleBin((OlapTable) any); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().getAllPartitionsIncludeRecycleBin((OlapTable) any); result = Lists.newArrayList(partition); minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getReplicationNumIncludeRecycleBin((PartitionInfo) any, anyLong); result = (short) 1; minTimes = 0; - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .getDataPropertyIncludeRecycleBin((PartitionInfo) any, anyLong); result = dataProperty; minTimes = 0; diff --git a/fe/fe-core/src/test/java/com/starrocks/clone/DynamicPartitionSchedulerTest.java b/fe/fe-core/src/test/java/com/starrocks/clone/DynamicPartitionSchedulerTest.java index e5e63ae28a7609..406ff26ae7b96b 100644 --- a/fe/fe-core/src/test/java/com/starrocks/clone/DynamicPartitionSchedulerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/clone/DynamicPartitionSchedulerTest.java @@ -119,7 +119,7 @@ public void testPartitionTTLPropertiesZero() throws Exception { CreateMaterializedViewStatement createMaterializedViewStatement = (CreateMaterializedViewStatement) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createMaterializedViewStatement); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createMaterializedViewStatement); Assert.fail(); } catch (Exception ex) { Assert.assertTrue(ex.getMessage().contains("Illegal Partition TTL Number")); diff --git a/fe/fe-core/src/test/java/com/starrocks/clone/TabletSchedulerTest.java b/fe/fe-core/src/test/java/com/starrocks/clone/TabletSchedulerTest.java index 6962a63ef8fea6..50bf50f52f1155 100644 --- a/fe/fe-core/src/test/java/com/starrocks/clone/TabletSchedulerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/clone/TabletSchedulerTest.java @@ -154,7 +154,7 @@ public void testSubmitBatchTaskIfNotExpired() { long now = System.currentTimeMillis(); CatalogRecycleBin recycleBin = new CatalogRecycleBin(); - recycleBin.recycleDatabase(badDb, new HashSet<>()); + recycleBin.recycleDatabase(badDb, new HashSet<>(), false); recycleBin.recycleTable(goodDB.getId(), badTable, true); RecyclePartitionInfo recyclePartitionInfo = new RecycleRangePartitionInfo(goodDB.getId(), goodTable.getId(), badPartition, null, new DataProperty(TStorageMedium.HDD), (short) 2, false, null); diff --git a/fe/fe-core/src/test/java/com/starrocks/cluster/SystemInfoServiceTest.java b/fe/fe-core/src/test/java/com/starrocks/cluster/SystemInfoServiceTest.java index 22168d1ad5ae0d..f48d6480caa6af 100644 --- a/fe/fe-core/src/test/java/com/starrocks/cluster/SystemInfoServiceTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/cluster/SystemInfoServiceTest.java @@ -123,7 +123,7 @@ public void setUp() throws IOException { minTimes = 0; result = globalStateMgr; - localMetastore = new LocalMetastore(globalStateMgr, null, null); + localMetastore = new LocalMetastore(globalStateMgr); globalStateMgr.getLocalMetastore(); minTimes = 0; result = localMetastore; diff --git a/fe/fe-core/src/test/java/com/starrocks/connector/MockedMetadataMgr.java b/fe/fe-core/src/test/java/com/starrocks/connector/MockedMetadataMgr.java index 2d8397e3c04a41..5a5a708170ffdd 100644 --- a/fe/fe-core/src/test/java/com/starrocks/connector/MockedMetadataMgr.java +++ b/fe/fe-core/src/test/java/com/starrocks/connector/MockedMetadataMgr.java @@ -16,8 +16,8 @@ package com.starrocks.connector; import com.google.common.base.Strings; +import com.starrocks.meta.StarRocksMetadata; import com.starrocks.server.CatalogMgr; -import com.starrocks.server.LocalMetastore; import com.starrocks.server.MetadataMgr; import com.starrocks.server.TemporaryTableMgr; @@ -27,9 +27,9 @@ public class MockedMetadataMgr extends MetadataMgr { private final Map metadatas = new HashMap<>(); - private final LocalMetastore localMetastore; + private final StarRocksMetadata localMetastore; - public MockedMetadataMgr(LocalMetastore localMetastore, ConnectorMgr connectorMgr) { + public MockedMetadataMgr(StarRocksMetadata localMetastore, ConnectorMgr connectorMgr) { super(localMetastore, new TemporaryTableMgr(), connectorMgr, new ConnectorTblMetaInfoMgr()); this.localMetastore = localMetastore; } diff --git a/fe/fe-core/src/test/java/com/starrocks/connector/hive/ReplayMetadataMgr.java b/fe/fe-core/src/test/java/com/starrocks/connector/hive/ReplayMetadataMgr.java index 0621d9d90358e3..4d827f724a0a1d 100644 --- a/fe/fe-core/src/test/java/com/starrocks/connector/hive/ReplayMetadataMgr.java +++ b/fe/fe-core/src/test/java/com/starrocks/connector/hive/ReplayMetadataMgr.java @@ -30,8 +30,8 @@ import com.starrocks.connector.ConnectorTblMetaInfoMgr; import com.starrocks.connector.GetRemoteFilesParams; import com.starrocks.connector.RemoteFileInfo; +import com.starrocks.meta.StarRocksMetadata; import com.starrocks.server.CatalogMgr; -import com.starrocks.server.LocalMetastore; import com.starrocks.server.MetadataMgr; import com.starrocks.server.TemporaryTableMgr; import com.starrocks.sql.optimizer.OptimizerContext; @@ -54,7 +54,7 @@ public class ReplayMetadataMgr extends MetadataMgr { private Map>> replayTableMap; private long idGen = 0; - public ReplayMetadataMgr(LocalMetastore localMetastore, + public ReplayMetadataMgr(StarRocksMetadata localMetastore, ConnectorMgr connectorMgr, ResourceMgr resourceMgr, Map>> externalTableInfoMap, diff --git a/fe/fe-core/src/test/java/com/starrocks/connector/iceberg/IcebergMetadataTest.java b/fe/fe-core/src/test/java/com/starrocks/connector/iceberg/IcebergMetadataTest.java index 3c0bcd2affc681..caed255fa8317d 100644 --- a/fe/fe-core/src/test/java/com/starrocks/connector/iceberg/IcebergMetadataTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/connector/iceberg/IcebergMetadataTest.java @@ -55,10 +55,10 @@ import com.starrocks.connector.metadata.MetadataCollectJob; import com.starrocks.connector.metadata.MetadataTableType; import com.starrocks.connector.metadata.iceberg.IcebergMetadataCollectJob; +import com.starrocks.meta.StarRocksMetadata; import com.starrocks.persist.EditLog; import com.starrocks.qe.ConnectContext; import com.starrocks.server.GlobalStateMgr; -import com.starrocks.server.LocalMetastore; import com.starrocks.server.MetadataMgr; import com.starrocks.server.TemporaryTableMgr; import com.starrocks.sql.analyzer.AnalyzeTestUtil; @@ -1260,7 +1260,7 @@ public void testPlanMode() { } @Test - public void testGetMetaSpec(@Mocked LocalMetastore localMetastore, @Mocked TemporaryTableMgr temporaryTableMgr) { + public void testGetMetaSpec(@Mocked StarRocksMetadata localMetastore, @Mocked TemporaryTableMgr temporaryTableMgr) { mockedNativeTableG.newAppend().appendFile(FILE_B_5).commit(); new MockUp() { @Mock @@ -1295,7 +1295,7 @@ public Optional getOptionalMetadata(String catalogName) { } @Test - public void testGetMetaSpecWithDeleteFile(@Mocked LocalMetastore localMetastore, + public void testGetMetaSpecWithDeleteFile(@Mocked StarRocksMetadata localMetastore, @Mocked TemporaryTableMgr temporaryTableMgr) { mockedNativeTableA.newAppend().appendFile(FILE_A).commit(); // FILE_A_DELETES = positionalDelete / FILE_A2_DELETES = equalityDelete diff --git a/fe/fe-core/src/test/java/com/starrocks/consistency/ConsistencyCheckerTest.java b/fe/fe-core/src/test/java/com/starrocks/consistency/ConsistencyCheckerTest.java index b7de28181515ca..0cc4030ad1ec9c 100644 --- a/fe/fe-core/src/test/java/com/starrocks/consistency/ConsistencyCheckerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/consistency/ConsistencyCheckerTest.java @@ -60,7 +60,7 @@ public void testChooseTablets(@Mocked GlobalStateMgr globalStateMgr) { partitionInfo.addPartition(partitionId, dataProperty, (short) 3, false); DistributionInfo distributionInfo = new HashDistributionInfo(1, Lists.newArrayList()); Partition partition = new Partition(partitionId, "partition", materializedIndex, distributionInfo); - partition.setVisibleVersion(2L, System.currentTimeMillis()); + partition.getDefaultPhysicalPartition().setVisibleVersion(2L, System.currentTimeMillis()); OlapTable table = new OlapTable(tableId, "table", Lists.newArrayList(), KeysType.AGG_KEYS, partitionInfo, distributionInfo); table.addPartition(partition); diff --git a/fe/fe-core/src/test/java/com/starrocks/consistency/MetaRecoveryDaemonTest.java b/fe/fe-core/src/test/java/com/starrocks/consistency/MetaRecoveryDaemonTest.java index fa07ad4e1430b1..cfeee6472b2ccf 100644 --- a/fe/fe-core/src/test/java/com/starrocks/consistency/MetaRecoveryDaemonTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/consistency/MetaRecoveryDaemonTest.java @@ -53,16 +53,16 @@ public static void tearDown() throws Exception { public void testRecover() throws Exception { PseudoCluster cluster = PseudoCluster.getInstance(); String sql = "CREATE TABLE test.`tbl_recover` (\n" + - " k1 int,\n" + - " k2 VARCHAR NOT NULL\n" + - ") ENGINE=OLAP\n" + - "DUPLICATE KEY(`k1`)\n" + - "COMMENT \"OLAP\"\n" + - "DISTRIBUTED BY HASH(`k1`) BUCKETS 8\n" + - "PROPERTIES (\n" + - " \"replication_num\" = \"3\",\n" + - " \"in_memory\" = \"false\"\n" + - ");"; + " k1 int,\n" + + " k2 VARCHAR NOT NULL\n" + + ") ENGINE=OLAP\n" + + "DUPLICATE KEY(`k1`)\n" + + "COMMENT \"OLAP\"\n" + + "DISTRIBUTED BY HASH(`k1`) BUCKETS 8\n" + + "PROPERTIES (\n" + + " \"replication_num\" = \"3\",\n" + + " \"in_memory\" = \"false\"\n" + + ");"; cluster.runSql("test", sql); cluster.runSql("test", "insert into test.tbl_recover values (1, 'a'), (2, 'b')"); @@ -71,42 +71,43 @@ public void testRecover() throws Exception { Database database = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); OlapTable table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(database.getFullName(), "tbl_recover"); + .getTable(database.getFullName(), "tbl_recover"); Partition partition = table.getPartition("tbl_recover"); - MaterializedIndex index = partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL).get(0); + MaterializedIndex index = partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL).get(0); for (Tablet tablet : index.getTablets()) { for (Replica replica : tablet.getAllReplicas()) { Assert.assertEquals(2L, replica.getVersion()); } } - Assert.assertEquals(2L, partition.getVisibleVersion()); - Assert.assertEquals(3L, partition.getNextVersion()); + Assert.assertEquals(2L, partition.getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(3L, partition.getDefaultPhysicalPartition().getNextVersion()); // set partition version to a lower value - partition.setVisibleVersion(1L, System.currentTimeMillis()); - partition.setNextVersion(2L); + partition.getDefaultPhysicalPartition().setVisibleVersion(1L, System.currentTimeMillis()); + partition.getDefaultPhysicalPartition().setNextVersion(2L); for (Backend backend : GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getBackends()) { backend.getBackendStatus().lastSuccessReportTabletsTime = TimeUtils - .longToTimeString(System.currentTimeMillis()); + .longToTimeString(System.currentTimeMillis()); } // add a committed txn TransactionState transactionState = new TransactionState(database.getId(), Lists.newArrayList(table.getId()), - 11111, "xxxx", null, TransactionState.LoadJobSourceType.FRONTEND, null, 2222, 100000); + 11111, "xxxx", null, TransactionState.LoadJobSourceType.FRONTEND, null, 2222, 100000); TableCommitInfo tableCommitInfo = new TableCommitInfo(table.getId()); PartitionCommitInfo partitionCommitInfo = new PartitionCommitInfo(partition.getId(), 4, -1L); tableCommitInfo.addPartitionCommitInfo(partitionCommitInfo); transactionState.putIdToTableCommitInfo(table.getId(), tableCommitInfo); transactionState.setTransactionStatus(TransactionStatus.COMMITTED); GlobalStateMgr.getCurrentState().getGlobalTransactionMgr() - .getDatabaseTransactionMgr(database.getId()).replayUpsertTransactionState(transactionState); + .getDatabaseTransactionMgr(database.getId()).replayUpsertTransactionState(transactionState); // recover will fail, because there is a committed txn on that partition MetaRecoveryDaemon recovery = new MetaRecoveryDaemon(); recovery.recover(); - Assert.assertEquals(1L, partition.getVisibleVersion()); + Assert.assertEquals(1L, partition.getDefaultPhysicalPartition().getVisibleVersion()); BaseProcResult baseProcResult = new BaseProcResult(); recovery.fetchProcNodeResult(baseProcResult); Assert.assertEquals(1, baseProcResult.getRows().size()); @@ -114,33 +115,35 @@ public void testRecover() throws Exception { // change the txn state to visible, recover will succeed transactionState.setTransactionStatus(TransactionStatus.VISIBLE); recovery.recover(); - Assert.assertEquals(2L, partition.getVisibleVersion()); - Assert.assertEquals(3L, partition.getNextVersion()); + Assert.assertEquals(2L, partition.getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(3L, partition.getDefaultPhysicalPartition().getNextVersion()); baseProcResult = new BaseProcResult(); recovery.fetchProcNodeResult(baseProcResult); Assert.assertEquals(0, baseProcResult.getRows().size()); // change replica version - LocalTablet localTablet = (LocalTablet) partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL) - .get(0).getTablets().get(0); + LocalTablet localTablet = (LocalTablet) partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL) + .get(0).getTablets().get(0); long version = 3; for (Replica replica : localTablet.getAllReplicas()) { replica.updateForRestore(++version, 10, 10); } - LocalTablet localTablet2 = (LocalTablet) partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL) - .get(0).getTablets().get(0); + LocalTablet localTablet2 = (LocalTablet) partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL) + .get(0).getTablets().get(0); for (Replica replica : localTablet2.getAllReplicas()) { replica.updateForRestore(4, 10, 10); } // set partition version to a lower value - partition.setVisibleVersion(1L, System.currentTimeMillis()); - partition.setNextVersion(2L); + partition.getDefaultPhysicalPartition().setVisibleVersion(1L, System.currentTimeMillis()); + partition.getDefaultPhysicalPartition().setNextVersion(2L); // recover will fail, because there is no common version on tablets. recovery.recover(); - Assert.assertEquals(1L, partition.getVisibleVersion()); - Assert.assertEquals(2L, partition.getNextVersion()); + Assert.assertEquals(1L, partition.getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(2L, partition.getDefaultPhysicalPartition().getNextVersion()); baseProcResult = new BaseProcResult(); recovery.fetchProcNodeResult(baseProcResult); Assert.assertEquals(1, baseProcResult.getRows().size()); @@ -158,8 +161,8 @@ public void testRecover() throws Exception { // recover will succeed recovery.recover(); - Assert.assertEquals(2L, partition.getVisibleVersion()); - Assert.assertEquals(3L, partition.getNextVersion()); + Assert.assertEquals(2L, partition.getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(3L, partition.getDefaultPhysicalPartition().getNextVersion()); baseProcResult = new BaseProcResult(); recovery.fetchProcNodeResult(baseProcResult); Assert.assertEquals(0, baseProcResult.getRows().size()); @@ -173,7 +176,7 @@ public void testCheckTabletReportCacheUp() { MetaRecoveryDaemon metaRecoveryDaemon = new MetaRecoveryDaemon(); for (Backend backend : GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getBackends()) { backend.getBackendStatus().lastSuccessReportTabletsTime = TimeUtils - .longToTimeString(timeMs); + .longToTimeString(timeMs); } Assert.assertFalse(metaRecoveryDaemon.checkTabletReportCacheUp(timeMs + 1000L)); Assert.assertTrue(metaRecoveryDaemon.checkTabletReportCacheUp(timeMs - 1000L)); diff --git a/fe/fe-core/src/test/java/com/starrocks/http/StarRocksHttpTestCase.java b/fe/fe-core/src/test/java/com/starrocks/http/StarRocksHttpTestCase.java index 1bcc2c1ffe10e6..566b4c74b4af07 100644 --- a/fe/fe-core/src/test/java/com/starrocks/http/StarRocksHttpTestCase.java +++ b/fe/fe-core/src/test/java/com/starrocks/http/StarRocksHttpTestCase.java @@ -61,9 +61,9 @@ import com.starrocks.common.jmockit.Deencapsulation; import com.starrocks.common.util.PropertyAnalyzer; import com.starrocks.load.Load; +import com.starrocks.meta.StarRocksMetadata; import com.starrocks.persist.EditLog; import com.starrocks.server.GlobalStateMgr; -import com.starrocks.server.LocalMetastore; import com.starrocks.server.MetadataMgr; import com.starrocks.server.NodeMgr; import com.starrocks.server.TemporaryTableMgr; @@ -172,8 +172,8 @@ public static OlapTable newEmptyTable(String name) { // partition HashDistributionInfo distributionInfo = new HashDistributionInfo(10, Lists.newArrayList(k1)); Partition partition = new Partition(testPartitionId, "testPartition", baseIndex, distributionInfo); - partition.updateVisibleVersion(testStartVersion); - partition.setNextVersion(testStartVersion + 1); + partition.getDefaultPhysicalPartition().updateVisibleVersion(testStartVersion); + partition.getDefaultPhysicalPartition().setNextVersion(testStartVersion + 1); // table PartitionInfo partitionInfo = new SinglePartitionInfo(); @@ -225,8 +225,8 @@ public static OlapTable newTable(String name) { // partition HashDistributionInfo distributionInfo = new HashDistributionInfo(10, Lists.newArrayList(k1)); Partition partition = new Partition(testPartitionId, "testPartition", baseIndex, distributionInfo); - partition.updateVisibleVersion(testStartVersion); - partition.setNextVersion(testStartVersion + 1); + partition.getDefaultPhysicalPartition().updateVisibleVersion(testStartVersion); + partition.getDefaultPhysicalPartition().setNextVersion(testStartVersion + 1); // table PartitionInfo partitionInfo = new SinglePartitionInfo(); @@ -280,7 +280,7 @@ private static GlobalStateMgr newDelegateCatalog() { db.registerTableUnlocked(newEmptyTable); ConcurrentHashMap nameToDb = new ConcurrentHashMap<>(); nameToDb.put(db.getFullName(), db); - LocalMetastore localMetastore = new LocalMetastore(globalStateMgr, null, null); + StarRocksMetadata starRocksMetadata = new StarRocksMetadata(); new Expectations(globalStateMgr) { { @@ -304,19 +304,19 @@ private static GlobalStateMgr newDelegateCatalog() { minTimes = 0; result = editLog; - globalStateMgr.getLocalMetastore(); + globalStateMgr.getStarRocksMetadata(); minTimes = 0; - result = localMetastore; + result = starRocksMetadata; } }; - new Expectations(localMetastore) { + new Expectations(starRocksMetadata) { { - localMetastore.getDb("testDb"); + starRocksMetadata.getDb("testDb"); minTimes = 0; result = db; - localMetastore.getDb(testDbId); + starRocksMetadata.getDb(testDbId); minTimes = 0; result = db; } @@ -338,9 +338,8 @@ private static GlobalStateMgr newDelegateGlobalStateMgr() { db.registerTableUnlocked(esTable); OlapTable newEmptyTable = newEmptyTable("test_empty_table"); db.registerTableUnlocked(newEmptyTable); - - LocalMetastore localMetastore = new LocalMetastore(globalStateMgr, null, null); - MetadataMgr metadataMgr = new MetadataMgr(localMetastore, new TemporaryTableMgr(), null, null); + StarRocksMetadata starRocksMetadata = new StarRocksMetadata(); + MetadataMgr metadataMgr = new MetadataMgr(starRocksMetadata, new TemporaryTableMgr(), null, null); new Expectations(globalStateMgr) { { @@ -360,9 +359,9 @@ private static GlobalStateMgr newDelegateGlobalStateMgr() { minTimes = 0; result = metadataMgr; - globalStateMgr.getLocalMetastore(); + globalStateMgr.getStarRocksMetadata(); minTimes = 0; - result = localMetastore; + result = starRocksMetadata; } }; @@ -382,13 +381,13 @@ private static GlobalStateMgr newDelegateGlobalStateMgr() { } }; - new Expectations(localMetastore) { + new Expectations(starRocksMetadata) { { - localMetastore.getDb("testDb"); + starRocksMetadata.getDb("testDb"); minTimes = 0; result = db; - localMetastore.getDb(testDbId); + starRocksMetadata.getDb(testDbId); minTimes = 0; result = db; } diff --git a/fe/fe-core/src/test/java/com/starrocks/http/rest/v2/TablePartitionActionTest.java b/fe/fe-core/src/test/java/com/starrocks/http/rest/v2/TablePartitionActionTest.java index bace9e50e5ff42..f7344f5669c93a 100644 --- a/fe/fe-core/src/test/java/com/starrocks/http/rest/v2/TablePartitionActionTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/http/rest/v2/TablePartitionActionTest.java @@ -369,8 +369,8 @@ private static OlapTable newOlapTable(Long tableId, String tableName, int partit long partitionId = BASE_PARTITION_ID + i; Partition partition = new Partition(partitionId, "testPartition_" + i, baseIndex, distributionInfo); - partition.setVisibleVersion(testStartVersion, System.currentTimeMillis()); - partition.setNextVersion(testStartVersion + 1); + partition.getDefaultPhysicalPartition().setVisibleVersion(testStartVersion, System.currentTimeMillis()); + partition.getDefaultPhysicalPartition().setNextVersion(testStartVersion + 1); PartitionKey rangeLower = PartitionKey.createPartitionKey( Lists.newArrayList(new PartitionValue(String.valueOf(i * 10))), Lists.newArrayList(c1)); @@ -459,8 +459,8 @@ private static LakeTable newLakeTable(Long tableId, String tableName, int partit long partitionId = BASE_PARTITION_ID + i; Partition partition = new Partition(partitionId, "testPartition_" + i, baseIndex, distributionInfo); - partition.setVisibleVersion(testStartVersion, System.currentTimeMillis()); - partition.setNextVersion(testStartVersion + 1); + partition.getDefaultPhysicalPartition().setVisibleVersion(testStartVersion, System.currentTimeMillis()); + partition.getDefaultPhysicalPartition().setNextVersion(testStartVersion + 1); PartitionKey rangeLower = PartitionKey.createPartitionKey( Lists.newArrayList(new PartitionValue(String.valueOf(i * 10))), Lists.newArrayList(c1)); diff --git a/fe/fe-core/src/test/java/com/starrocks/lake/AlterTest.java b/fe/fe-core/src/test/java/com/starrocks/lake/AlterTest.java index 16ee0473f61c42..0e512845ab2e0e 100644 --- a/fe/fe-core/src/test/java/com/starrocks/lake/AlterTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/lake/AlterTest.java @@ -61,7 +61,7 @@ public void testAddPartitionForLakeTable() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String dropSQL = "drop table if exists test_lake_partition"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); String createSQL = "CREATE TABLE test.test_lake_partition (\n" + " k1 DATE,\n" + " k2 INT,\n" + @@ -87,7 +87,7 @@ public void testAddPartitionForLakeTable() throws Exception { " PARTITION p3 VALUES LESS THAN (\"2014-01-01\")"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL, ctx); AddPartitionClause addPartitionClause = (AddPartitionClause) alterTableStmt.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .addPartitions(Util.getOrCreateConnectContext(), db, "test_lake_partition", addPartitionClause); Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") @@ -99,7 +99,7 @@ public void testAddPartitionForLakeTable() throws Exception { dropSQL = "drop table test_lake_partition"; dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @Test @@ -107,7 +107,7 @@ public void testMultiRangePartitionForLakeTable() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String dropSQL = "drop table if exists site_access"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); String createSQL = "CREATE TABLE site_access (\n" + " datekey INT,\n" + " site_id INT,\n" + @@ -133,7 +133,7 @@ public void testMultiRangePartitionForLakeTable() throws Exception { AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(alterSQL, ctx); AddPartitionClause addPartitionClause = (AddPartitionClause) alterTableStmt.getAlterClauseList().get(0); - GlobalStateMgr.getCurrentState().getLocalMetastore() + GlobalStateMgr.getCurrentState().getStarRocksMetadata() .addPartitions(Util.getOrCreateConnectContext(), db, "site_access", addPartitionClause); Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test") @@ -148,7 +148,7 @@ public void testMultiRangePartitionForLakeTable() throws Exception { dropSQL = "drop table site_access"; dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } @Test @@ -219,7 +219,7 @@ public void testSingleRangePartitionPersistInfo() throws Exception { String dropSQL = "drop table new_table"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); file.delete(); } @@ -228,7 +228,7 @@ public void testAlterTableCompactionForLakeTable() throws Exception { ConnectContext ctx = starRocksAssert.getCtx(); String dropSQL = "drop table if exists test_lake_partition"; DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); String createSQL = "CREATE TABLE test.t1 (\n" + " k1 DATE,\n" + " k2 INT,\n" + @@ -252,7 +252,7 @@ public void testAlterTableCompactionForLakeTable() throws Exception { String sql = "ALTER TABLE t1 COMPACT p1"; AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(connectContext, alterTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(connectContext, alterTableStmt); } catch (Exception e) { e.printStackTrace(); Assert.fail(); diff --git a/fe/fe-core/src/test/java/com/starrocks/lake/CreateLakeTableTest.java b/fe/fe-core/src/test/java/com/starrocks/lake/CreateLakeTableTest.java index e97c57c371faf0..0ada09f4e38ca5 100644 --- a/fe/fe-core/src/test/java/com/starrocks/lake/CreateLakeTableTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/lake/CreateLakeTableTest.java @@ -54,7 +54,7 @@ public static void beforeClass() throws Exception { // create database String createDbStmtStr = "create database lake_test;"; CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); } @AfterClass @@ -63,7 +63,7 @@ public static void afterClass() { private static void createTable(String sql) throws Exception { CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createTable(createTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createTable(createTableStmt); } private void checkLakeTable(String dbName, String tableName) { diff --git a/fe/fe-core/src/test/java/com/starrocks/lake/LakeTableHelperTest.java b/fe/fe-core/src/test/java/com/starrocks/lake/LakeTableHelperTest.java index 9883c77d5c1cf0..c99af611af3554 100644 --- a/fe/fe-core/src/test/java/com/starrocks/lake/LakeTableHelperTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/lake/LakeTableHelperTest.java @@ -41,7 +41,7 @@ public static void beforeClass() throws Exception { // create database String createDbStmtStr = "create database " + DB_NAME; CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); } @AfterClass @@ -50,7 +50,7 @@ public static void afterClass() { private static LakeTable createTable(String sql) throws Exception { CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createTable(createTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createTable(createTableStmt); Table table = testDb().getTable(createTableStmt.getTableName()); return (LakeTable) table; } diff --git a/fe/fe-core/src/test/java/com/starrocks/lake/compaction/CompactionJobTest.java b/fe/fe-core/src/test/java/com/starrocks/lake/compaction/CompactionJobTest.java index ba64b99792b4ec..5a1ca0cfc7052d 100644 --- a/fe/fe-core/src/test/java/com/starrocks/lake/compaction/CompactionJobTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/lake/compaction/CompactionJobTest.java @@ -16,7 +16,7 @@ import com.starrocks.catalog.Database; import com.starrocks.catalog.PhysicalPartition; -import com.starrocks.catalog.PhysicalPartitionImpl; +import com.starrocks.catalog.PhysicalPartition; import com.starrocks.catalog.Table; import mockit.Mock; import mockit.MockUp; @@ -33,7 +33,7 @@ public class CompactionJobTest { public void testGetResult() { Database db = new Database(); Table table = new Table(Table.TableType.CLOUD_NATIVE); - PhysicalPartition partition = new PhysicalPartitionImpl(0, "", 1, 2, null); + PhysicalPartition partition = new PhysicalPartition(0, "", 1, 2, null); CompactionJob job = new CompactionJob(db, table, partition, 10010, true); Assert.assertTrue(job.getAllowPartialSuccess()); @@ -78,7 +78,7 @@ public CompactionTask.TaskResult getResult() { public void testBuildTabletCommitInfo() { Database db = new Database(); Table table = new Table(Table.TableType.CLOUD_NATIVE); - PhysicalPartition partition = new PhysicalPartitionImpl(0, "", 1, 2, null); + PhysicalPartition partition = new PhysicalPartition(0, "", 1, 2, null); CompactionJob job = new CompactionJob(db, table, partition, 10010, false); assertDoesNotThrow(() -> { job.buildTabletCommitInfo(); diff --git a/fe/fe-core/src/test/java/com/starrocks/lake/compaction/CompactionMgrTest.java b/fe/fe-core/src/test/java/com/starrocks/lake/compaction/CompactionMgrTest.java index 357a1653774ba1..3d59a41d3a17e5 100644 --- a/fe/fe-core/src/test/java/com/starrocks/lake/compaction/CompactionMgrTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/lake/compaction/CompactionMgrTest.java @@ -16,7 +16,6 @@ import com.google.common.collect.Lists; import com.starrocks.catalog.Database; -import com.starrocks.catalog.Partition; import com.starrocks.catalog.PhysicalPartition; import com.starrocks.catalog.Table; import com.starrocks.common.Config; @@ -143,7 +142,7 @@ public ConcurrentHashMap getRunningCompactio PartitionIdentifier partitionIdentifier = new PartitionIdentifier(1, 2, 3); Database db = new Database(); Table table = new LakeTable(); - PhysicalPartition partition = new Partition(123, "aaa", null, null); + PhysicalPartition partition = new PhysicalPartition(123, "aaa", 123, 0, null); CompactionJob job = new CompactionJob(db, table, partition, txnId, false); r.put(partitionIdentifier, job); return r; diff --git a/fe/fe-core/src/test/java/com/starrocks/lake/compaction/CompactionSchedulerTest.java b/fe/fe-core/src/test/java/com/starrocks/lake/compaction/CompactionSchedulerTest.java index 06c8e2e2615266..486111cc8a8199 100644 --- a/fe/fe-core/src/test/java/com/starrocks/lake/compaction/CompactionSchedulerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/lake/compaction/CompactionSchedulerTest.java @@ -17,7 +17,6 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.starrocks.catalog.Database; -import com.starrocks.catalog.Partition; import com.starrocks.catalog.PhysicalPartition; import com.starrocks.catalog.Table; import com.starrocks.common.Config; @@ -169,8 +168,8 @@ public ConcurrentHashMap getRunningCompactio Table table = new LakeTable(); PartitionIdentifier partitionIdentifier1 = new PartitionIdentifier(1, 2, 3); PartitionIdentifier partitionIdentifier2 = new PartitionIdentifier(1, 2, 4); - PhysicalPartition partition1 = new Partition(123, "aaa", null, null); - PhysicalPartition partition2 = new Partition(124, "bbb", null, null); + PhysicalPartition partition1 = new PhysicalPartition(123, "aaa", 123, 0, null); + PhysicalPartition partition2 = new PhysicalPartition(124, "bbb", 124, 0, null); CompactionJob job1 = new CompactionJob(db, table, partition1, 100, false); try { Thread.sleep(10); diff --git a/fe/fe-core/src/test/java/com/starrocks/leader/LeaderImplTest.java b/fe/fe-core/src/test/java/com/starrocks/leader/LeaderImplTest.java index 78767f2455c656..0a28a075afe64c 100644 --- a/fe/fe-core/src/test/java/com/starrocks/leader/LeaderImplTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/leader/LeaderImplTest.java @@ -71,7 +71,7 @@ public void testFindRelatedReplica(@Mocked OlapTable olapTable, @Mocked LakeTabl // olap table new Expectations() { { - partition.getIndex(indexId); + partition.getDefaultPhysicalPartition().getIndex(indexId); result = index; index.getTablet(tabletId); result = new LocalTablet(tabletId); @@ -90,7 +90,7 @@ public Set getBackendIds() { new Expectations() { { - partition.getIndex(indexId); + partition.getDefaultPhysicalPartition().getIndex(indexId); result = index; index.getTablet(tabletId); result = new LakeTablet(tabletId); diff --git a/fe/fe-core/src/test/java/com/starrocks/leader/ReportHandlerTest.java b/fe/fe-core/src/test/java/com/starrocks/leader/ReportHandlerTest.java index 51cb4efb1edd67..1f2c60cd275608 100644 --- a/fe/fe-core/src/test/java/com/starrocks/leader/ReportHandlerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/leader/ReportHandlerTest.java @@ -393,7 +393,7 @@ public void testHandleMigration() throws TException { } Partition partition = table.getPartition(tabletMeta.getPartitionId()); - MaterializedIndex idx = partition.getIndex(tabletMeta.getIndexId()); + MaterializedIndex idx = partition.getDefaultPhysicalPartition().getIndex(tabletMeta.getIndexId()); LocalTablet tablet = (LocalTablet) idx.getTablet(tabletId); for (Replica replica : tablet.getImmutableReplicas()) { @@ -421,7 +421,7 @@ public void submit(AgentBatchTask task) { ListMultimap tabletMetaMigrationMap = ArrayListMultimap.create(); List allTablets = new ArrayList<>(); for (MaterializedIndex index : olapTable.getPartition("binlog_report_handler_test") - .getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { + .getDefaultPhysicalPartition().getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) { for (Tablet tablet : index.getTablets()) { tabletMetaMigrationMap.put(TStorageMedium.HDD, tablet.getId()); allTablets.add(tablet.getId()); diff --git a/fe/fe-core/src/test/java/com/starrocks/load/loadv2/SparkLoadJobTest.java b/fe/fe-core/src/test/java/com/starrocks/load/loadv2/SparkLoadJobTest.java index c0c5c5ce412850..5ddc6a75c79f0c 100644 --- a/fe/fe-core/src/test/java/com/starrocks/load/loadv2/SparkLoadJobTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/load/loadv2/SparkLoadJobTest.java @@ -382,7 +382,7 @@ public void testUpdateEtlStatusFinishedAndCommitTransaction( result = partitionInfo; table.getSchemaByIndexId(Long.valueOf(12)); result = Lists.newArrayList(new Column("k1", Type.VARCHAR)); - partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL); + partition.getDefaultPhysicalPartition().getMaterializedIndices(MaterializedIndex.IndexExtState.ALL); result = Lists.newArrayList(index); index.getId(); result = indexId; @@ -480,7 +480,7 @@ public void testUpdateEtlStatusFinishedAndCommitTransactionForLake( result = partitionInfo; table.getSchemaByIndexId(Long.valueOf(12)); result = Lists.newArrayList(new Column("k1", Type.VARCHAR)); - partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL); + partition.getDefaultPhysicalPartition().getMaterializedIndices(MaterializedIndex.IndexExtState.ALL); result = Lists.newArrayList(index); index.getId(); result = indexId; diff --git a/fe/fe-core/src/test/java/com/starrocks/load/streamload/ShowStreamLoadTest.java b/fe/fe-core/src/test/java/com/starrocks/load/streamload/ShowStreamLoadTest.java index e5d4713ea9ed1a..7d01ee65beac3d 100644 --- a/fe/fe-core/src/test/java/com/starrocks/load/streamload/ShowStreamLoadTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/load/streamload/ShowStreamLoadTest.java @@ -57,7 +57,7 @@ public static void beforeClass() throws Exception { // create database String createDbStmtStr = "create database test_db;"; CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser(createDbStmtStr, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); // create table String createTableStmtStr = "CREATE TABLE test_db.test_tbl (c0 int, c1 string, c2 int, c3 bigint) " + "DUPLICATE KEY (c0) DISTRIBUTED BY HASH (c0) BUCKETS 3 properties(\"replication_num\"=\"1\") ;;"; diff --git a/fe/fe-core/src/test/java/com/starrocks/persist/ChangeMaterializedViewRefreshSchemeLogTest.java b/fe/fe-core/src/test/java/com/starrocks/persist/ChangeMaterializedViewRefreshSchemeLogTest.java index 58e39d188da0ef..fb68264dbc71e9 100644 --- a/fe/fe-core/src/test/java/com/starrocks/persist/ChangeMaterializedViewRefreshSchemeLogTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/persist/ChangeMaterializedViewRefreshSchemeLogTest.java @@ -15,10 +15,6 @@ package com.starrocks.persist; -import com.starrocks.alter.AlterJobMgr; -import com.starrocks.alter.MaterializedViewHandler; -import com.starrocks.alter.SchemaChangeHandler; -import com.starrocks.alter.SystemHandler; import com.starrocks.catalog.AggregateType; import com.starrocks.catalog.Column; import com.starrocks.catalog.DataProperty; @@ -31,6 +27,7 @@ import com.starrocks.catalog.SinglePartitionInfo; import com.starrocks.common.Config; import com.starrocks.common.io.Text; +import com.starrocks.server.LocalMetastore; import com.starrocks.thrift.TTabletType; import org.junit.After; import org.junit.Assert; @@ -113,11 +110,7 @@ public void testFallBack() throws IOException { @Test public void testReplayWhenDbIsEmpty() { - AlterJobMgr alterJobMgr = new AlterJobMgr( - new SchemaChangeHandler(), - new MaterializedViewHandler(), - new SystemHandler()); - alterJobMgr.replayChangeMaterializedViewRefreshScheme(new ChangeMaterializedViewRefreshSchemeLog()); + LocalMetastore localMetastore = new LocalMetastore(null); + localMetastore.replayChangeMaterializedViewRefreshScheme(new ChangeMaterializedViewRefreshSchemeLog()); } - } \ No newline at end of file diff --git a/fe/fe-core/src/test/java/com/starrocks/persist/EditLogTest.java b/fe/fe-core/src/test/java/com/starrocks/persist/EditLogTest.java index 868bf8dd06bbbc..dbf78ede14e266 100644 --- a/fe/fe-core/src/test/java/com/starrocks/persist/EditLogTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/persist/EditLogTest.java @@ -165,23 +165,6 @@ private GlobalStateMgr mockGlobalStateMgr() throws Exception { return globalStateMgr; } - @Test - public void testOpUpdateFrontend() throws Exception { - GlobalStateMgr mgr = mockGlobalStateMgr(); - List frontends = mgr.getNodeMgr().getFrontends(null); - Frontend fe = frontends.get(0); - fe.updateHostAndEditLogPort("testHost", 1000); - JournalEntity journal = new JournalEntity(); - journal.setData(fe); - journal.setOpCode(OperationType.OP_UPDATE_FRONTEND); - EditLog editLog = new EditLog(null); - editLog.loadJournal(mgr, journal); - List updatedFrontends = mgr.getNodeMgr().getFrontends(null); - Frontend updatedfFe = updatedFrontends.get(0); - Assert.assertEquals("testHost", updatedfFe.getHost()); - Assert.assertTrue(updatedfFe.getEditLogPort() == 1000); - } - @Test public void testOpAddKeyJournalEntity() throws Exception { EncryptionKeyPB pb = new EncryptionKeyPB(); diff --git a/fe/fe-core/src/test/java/com/starrocks/persist/OperationTypeTest.java b/fe/fe-core/src/test/java/com/starrocks/persist/OperationTypeTest.java index 541d9aab41b531..8708512f30c286 100644 --- a/fe/fe-core/src/test/java/com/starrocks/persist/OperationTypeTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/persist/OperationTypeTest.java @@ -21,70 +21,17 @@ public class OperationTypeTest { @Test public void testRecoverableOperations() { - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ALTER_DB)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ERASE_DB)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_RENAME_DB)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DROP_TABLE)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DROP_PARTITION)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_MODIFY_PARTITION)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ERASE_TABLE)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ERASE_PARTITION)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_RENAME_TABLE)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_RENAME_PARTITION)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_BACKUP_JOB)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_MODIFY_VIEW_DEF)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_BATCH_MODIFY_PARTITION)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DROP_ROLLUP)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CLEAR_ROLLUP_INFO)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_FINISH_CONSISTENCY_CHECK)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_RENAME_ROLLUP)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_MODIFY_DISTRIBUTION_TYPE)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_BATCH_ADD_ROLLUP)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_BATCH_DROP_ROLLUP)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_REMOVE_ALTER_JOB_V2)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_EXPORT_CREATE)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_EXPORT_UPDATE_STATE)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_EXPORT_UPDATE_INFO)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DELETE_REPLICA)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DROP_BACKEND)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_BACKEND_STATE_CHANGE)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_REMOVE_FRONTEND)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_SET_LOAD_ERROR_HUB)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_HEARTBEAT)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_NEW_DROP_USER)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_GRANT_PRIV)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_REVOKE_PRIV)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_SET_PASSWORD)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_ROLE)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DROP_ROLE)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_UPDATE_USER_PROPERTY)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_TIMESTAMP)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_GLOBAL_VARIABLE)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_GLOBAL_VARIABLE_V2)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ADD_BROKER)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DROP_BROKER)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DROP_ALL_BROKER)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_REPOSITORY)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DROP_REPOSITORY)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_COLOCATE_ADD_TABLE)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_COLOCATE_REMOVE_TABLE)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_COLOCATE_BACKENDS_PER_BUCKETSEQ)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_COLOCATE_MARK_UNSTABLE)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_COLOCATE_MARK_STABLE)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_MODIFY_TABLE_COLOCATE)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DELETE_TRANSACTION_STATE)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DELETE_AUTO_INCREMENT_ID)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ALTER_ROUTINE_LOAD_JOB)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ADD_FUNCTION)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DROP_FUNCTION)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_ROUTINE_LOAD_JOB)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CHANGE_ROUTINE_LOAD_JOB)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_REMOVE_ROUTINE_LOAD_JOB)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_LOAD_JOB)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_END_LOAD_JOB)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_UPDATE_LOAD_JOB)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_SMALL_FILE)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DROP_SMALL_FILE)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DYNAMIC_PARTITION)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_MODIFY_REPLICATION_NUM)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_MODIFY_IN_MEMORY)); @@ -114,22 +61,16 @@ public void testRecoverableOperations() { Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_HEARTBEAT_V2)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_CATALOG)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DROP_CATALOG)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_GRANT_IMPERSONATE)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_REVOKE_IMPERSONATE)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_GRANT_ROLE)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_REVOKE_ROLE)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_TASK)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DROP_TASKS)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_TASK_RUN)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_UPDATE_TASK_RUN)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DROP_TASK_RUNS)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_UPDATE_TASK_RUN_STATE)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ALTER_TASK)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_RENAME_MATERIALIZED_VIEW)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains( OperationType.OP_CHANGE_MATERIALIZED_VIEW_REFRESH_SCHEME)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ALTER_MATERIALIZED_VIEW_PROPERTIES)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_MATERIALIZED_VIEW)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ALTER_MATERIALIZED_VIEW_STATUS)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ADD_COMPUTE_NODE)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DROP_COMPUTE_NODE)); @@ -138,8 +79,6 @@ public void testRecoverableOperations() { Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ALTER_USER_V2)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_UPDATE_ROLE_PRIVILEGE_V2)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DROP_ROLE_V2)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_UPDATE_USER_PROP_V2)); - Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_STREAM_LOAD_TASK)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_MV_EPOCH_UPDATE)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_MV_JOB_STATE)); Assert.assertTrue(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ALTER_LOAD_JOB)); @@ -212,78 +151,40 @@ public void testRecoverableOperations() { @Test public void testUnRecoverableOperations() { - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ADD_BACKEND)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ADD_BACKEND_V2)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ADD_FIRST_FRONTEND)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ADD_FIRST_FRONTEND_V2)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ADD_FRONTEND)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ADD_FRONTEND_V2)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ADD_PARTITION)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ADD_PARTITIONS)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ADD_PARTITIONS_V2)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ADD_PARTITION_V2)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ADD_REPLICA)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ADD_REPLICA_V2)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ADD_SUB_PARTITIONS_V2)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ADD_UNUSED_SHARD)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_ALTER_JOB_V2)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_BACKEND_TABLETS_INFO)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_BACKEND_TABLETS_INFO_V2)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_BATCH_DELETE_REPLICA)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CANCEL_ROLLUP)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CANCEL_SCHEMA_CHANGE)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_CLUSTER)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_DB)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_DB_V2)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_INSERT_OVERWRITE)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_TABLE)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_TABLE_V2)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_USER)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_CREATE_USER_V2)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DELETE_UNUSED_SHARD)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DROP_USER_V2)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_DROP_USER_V3)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_FINISH_ASYNC_DELETE)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_FINISH_DECOMMISSION_BACKEND)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_FINISH_DELETE)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_FINISHING_ROLLUP)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_FINISHING_SCHEMA_CHANGE)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_FINISH_ROLLUP)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_FINISH_SCHEMA_CHANGE)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_FINISH_SYNC_DELETE)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_INSERT_OVERWRITE_STATE_CHANGE)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_INVALID)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_LEADER_INFO_CHANGE)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_LEADER_INFO_CHANGE_V2)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_META_VERSION)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_META_VERSION_V2)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_MODIFY_TABLE_ADD_OR_DROP_COLUMNS)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_RECOVER_DB)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_RECOVER_DB_V2)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_RECOVER_PARTITION)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_RECOVER_PARTITION_V2)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_RECOVER_TABLE)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_RECOVER_TABLE_V2)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_REPLACE_TEMP_PARTITION)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_RESTORE_JOB)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_RESTORE_JOB_V2)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_SAVE_AUTO_INCREMENT_ID)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_SAVE_NEXTID)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_SAVE_TRANSACTION_ID)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_SAVE_TRANSACTION_ID_V2)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_SET_REPLICA_STATUS)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_STARMGR)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_START_DECOMMISSION_BACKEND)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_START_ROLLUP)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_START_SCHEMA_CHANGE)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_TRUNCATE_TABLE)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_TYPE_EOF)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_UPDATE_CLUSTER_AND_BACKENDS)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_UPDATE_FRONTEND)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_UPDATE_FRONTEND_V2)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_UPDATE_REPLICA)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_UPDATE_REPLICA_V2)); - Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_UPSERT_TRANSACTION_STATE)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_UPSERT_TRANSACTION_STATE_BATCH)); Assert.assertFalse(OperationType.IGNORABLE_OPERATIONS.contains(OperationType.OP_UPSERT_TRANSACTION_STATE_V2)); } diff --git a/fe/fe-core/src/test/java/com/starrocks/persist/RenameMaterializedViewLogTest.java b/fe/fe-core/src/test/java/com/starrocks/persist/RenameMaterializedViewLogTest.java index d7a7b75e9fabb1..5a66e2856b672f 100644 --- a/fe/fe-core/src/test/java/com/starrocks/persist/RenameMaterializedViewLogTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/persist/RenameMaterializedViewLogTest.java @@ -14,10 +14,10 @@ package com.starrocks.persist; -import com.starrocks.alter.AlterJobMgr; import com.starrocks.catalog.Database; import com.starrocks.catalog.MaterializedView; import com.starrocks.server.GlobalStateMgr; +import com.starrocks.server.LocalMetastore; import mockit.Expectations; import mockit.Injectable; import mockit.Mocked; @@ -73,7 +73,7 @@ public void testNormal(@Mocked GlobalStateMgr globalStateMgr, } }; - new AlterJobMgr(null, null, null) + new LocalMetastore(null) .replayRenameMaterializedView(renameMaterializedViewLog); } diff --git a/fe/fe-core/src/test/java/com/starrocks/planner/OlapTableSinkTest.java b/fe/fe-core/src/test/java/com/starrocks/planner/OlapTableSinkTest.java index 32385a3539cc7b..6f70c479643bf5 100644 --- a/fe/fe-core/src/test/java/com/starrocks/planner/OlapTableSinkTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/planner/OlapTableSinkTest.java @@ -39,7 +39,7 @@ import com.starrocks.catalog.PartitionKey; import com.starrocks.catalog.PartitionType; import com.starrocks.catalog.ColumnId; -import com.starrocks.catalog.PhysicalPartitionImpl; +import com.starrocks.catalog.PhysicalPartition; import com.starrocks.catalog.RandomDistributionInfo; import com.starrocks.catalog.RangePartitionInfo; import com.starrocks.catalog.Replica; @@ -465,10 +465,10 @@ public void testImmutablePartition() throws UserException { RandomDistributionInfo distInfo = new RandomDistributionInfo(3); Partition partition = new Partition(2, "p1", index, distInfo); - PhysicalPartitionImpl physicalPartition = new PhysicalPartitionImpl(3, "", 2, 0, index); + PhysicalPartition physicalPartition = new PhysicalPartition(3, "", 2, 0, index); partition.addSubPartition(physicalPartition); - physicalPartition = new PhysicalPartitionImpl(4, "", 2, 0, index); + physicalPartition = new PhysicalPartition(4, "", 2, 0, index); physicalPartition.setImmutable(true); partition.addSubPartition(physicalPartition); @@ -505,10 +505,10 @@ public void testInitialOpenPartition() throws UserException { RandomDistributionInfo distInfo = new RandomDistributionInfo(3); Partition partition = new Partition(2, "p1", index, distInfo); - PhysicalPartitionImpl physicalPartition = new PhysicalPartitionImpl(3, "", 2, 0, index); + PhysicalPartition physicalPartition = new PhysicalPartition(3, "", 2, 0, index); partition.addSubPartition(physicalPartition); - physicalPartition = new PhysicalPartitionImpl(4, "", 2, 0, index); + physicalPartition = new PhysicalPartition(4, "", 2, 0, index); physicalPartition.setImmutable(true); partition.addSubPartition(physicalPartition); diff --git a/fe/fe-core/src/test/java/com/starrocks/planner/mv/MVMetaVersionRepairerTest.java b/fe/fe-core/src/test/java/com/starrocks/planner/mv/MVMetaVersionRepairerTest.java index 5287a5dbea28e8..9d617dc257cd29 100644 --- a/fe/fe-core/src/test/java/com/starrocks/planner/mv/MVMetaVersionRepairerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/planner/mv/MVMetaVersionRepairerTest.java @@ -55,7 +55,7 @@ public static void beforeClass() throws Exception { private MVRepairHandler.PartitionRepairInfo toPartitionInfo(Partition partition, long version, long versionTime) { return new MVRepairHandler.PartitionRepairInfo(partition.getId(), partition.getName(), - partition.getVisibleVersion(), version, versionTime); + partition.getDefaultPhysicalPartition().getVisibleVersion(), version, versionTime); } @Test @@ -205,8 +205,9 @@ public void testRepairBaseTableVersionChanges3() { String baseTablePartitionName = value.keySet().iterator().next(); MaterializedView.BasePartitionInfo basePartitionInfo = value.get(baseTablePartitionName); Partition p1 = m1.getPartition("p1"); - Assert.assertEquals(basePartitionInfo.getVersion(), p1.getVisibleVersion()); - Assert.assertEquals(basePartitionInfo.getLastRefreshTime(), p1.getVisibleVersionTime()); + Assert.assertEquals(basePartitionInfo.getVersion(), p1.getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(basePartitionInfo.getLastRefreshTime(), + p1.getDefaultPhysicalPartition().getVisibleVersionTime()); Partition p2 = m1.getPartition("p2"); long currentTs = System.currentTimeMillis(); @@ -269,8 +270,8 @@ public void testRepairBaseTableVersionChanges4() { String baseTablePartitionName = value.keySet().iterator().next(); MaterializedView.BasePartitionInfo basePartitionInfo = value.get(baseTablePartitionName); Partition p1 = m1.getPartition("p1"); - long lastRefreshVersion = p1.getVisibleVersion(); - long lastRefreshVersionTime = p1.getVisibleVersionTime(); + long lastRefreshVersion = p1.getDefaultPhysicalPartition().getVisibleVersion(); + long lastRefreshVersionTime = p1.getDefaultPhysicalPartition().getVisibleVersionTime(); Assert.assertEquals(basePartitionInfo.getVersion(), lastRefreshVersion); Assert.assertEquals(basePartitionInfo.getLastRefreshTime(), lastRefreshVersionTime); @@ -278,7 +279,8 @@ public void testRepairBaseTableVersionChanges4() { // p1 has been refreshed, but p2 has been compaction or fast schema changed, use curPartition as its // partition // p1 has been updated, so the version of p1 should be updated - p1.setVisibleVersion(lastRefreshVersion + 1, lastRefreshVersionTime + 1); + p1.getDefaultPhysicalPartition() + .setVisibleVersion(lastRefreshVersion + 1, lastRefreshVersionTime + 1); MVRepairHandler.PartitionRepairInfo partitionRepairInfo = toPartitionInfo(p1, 100L, currentTs); diff --git a/fe/fe-core/src/test/java/com/starrocks/privilege/AuthorizationMgrTest.java b/fe/fe-core/src/test/java/com/starrocks/privilege/AuthorizationMgrTest.java index 85b9eac6d9c290..a8972e108de4f2 100644 --- a/fe/fe-core/src/test/java/com/starrocks/privilege/AuthorizationMgrTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/privilege/AuthorizationMgrTest.java @@ -110,7 +110,7 @@ public void setUp() throws Exception { GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); RBACMockedMetadataMgr metadataMgr = - new RBACMockedMetadataMgr(globalStateMgr.getLocalMetastore(), globalStateMgr.getConnectorMgr()); + new RBACMockedMetadataMgr(globalStateMgr.getStarRocksMetadata(), globalStateMgr.getConnectorMgr()); metadataMgr.init(); globalStateMgr.setMetadataMgr(metadataMgr); @@ -128,7 +128,7 @@ public void setUp() throws Exception { private static void createMaterializedView(String sql, ConnectContext connectContext) throws Exception { CreateMaterializedViewStatement createMaterializedViewStatement = (CreateMaterializedViewStatement) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createMaterializedViewStatement); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createMaterializedViewStatement); } @After diff --git a/fe/fe-core/src/test/java/com/starrocks/privilege/InvalidateObjectTest.java b/fe/fe-core/src/test/java/com/starrocks/privilege/InvalidateObjectTest.java index fb018862dd8c12..82a9087699893a 100644 --- a/fe/fe-core/src/test/java/com/starrocks/privilege/InvalidateObjectTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/privilege/InvalidateObjectTest.java @@ -91,7 +91,7 @@ private void setCurrentUserAndRoles(ConnectContext ctx, UserIdentity userIdentit private static void createMaterializedView(String sql, ConnectContext connectContext) throws Exception { CreateMaterializedViewStatement createMaterializedViewStatement = (CreateMaterializedViewStatement) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createMaterializedViewStatement); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createMaterializedViewStatement); } private static void createMvForTest(StarRocksAssert starRocksAssert, diff --git a/fe/fe-core/src/test/java/com/starrocks/privilege/RBACMockedMetadataMgr.java b/fe/fe-core/src/test/java/com/starrocks/privilege/RBACMockedMetadataMgr.java index e9d2a338d4c567..b24ea9e9490a32 100644 --- a/fe/fe-core/src/test/java/com/starrocks/privilege/RBACMockedMetadataMgr.java +++ b/fe/fe-core/src/test/java/com/starrocks/privilege/RBACMockedMetadataMgr.java @@ -21,7 +21,7 @@ import com.starrocks.catalog.View; import com.starrocks.connector.ConnectorMgr; import com.starrocks.connector.ConnectorTblMetaInfoMgr; -import com.starrocks.server.LocalMetastore; +import com.starrocks.meta.StarRocksMetadata; import com.starrocks.server.MetadataMgr; import com.starrocks.server.TemporaryTableMgr; @@ -32,12 +32,12 @@ import java.util.Optional; public class RBACMockedMetadataMgr extends MetadataMgr { - private final LocalMetastore localMetastore; + private final StarRocksMetadata localMetastore; private final IdGenerator idGenerator; private final Map databaseSet; private final Map tableMap; - public RBACMockedMetadataMgr(LocalMetastore localMetastore, ConnectorMgr connectorMgr) { + public RBACMockedMetadataMgr(StarRocksMetadata localMetastore, ConnectorMgr connectorMgr) { super(localMetastore, new TemporaryTableMgr(), connectorMgr, new ConnectorTblMetaInfoMgr()); this.localMetastore = localMetastore; idGenerator = new IdGenerator(); diff --git a/fe/fe-core/src/test/java/com/starrocks/pseudocluster/LocationLabeledTableBalanceTest.java b/fe/fe-core/src/test/java/com/starrocks/pseudocluster/LocationLabeledTableBalanceTest.java index eeecf69483ac40..ad28ac13ca933f 100644 --- a/fe/fe-core/src/test/java/com/starrocks/pseudocluster/LocationLabeledTableBalanceTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/pseudocluster/LocationLabeledTableBalanceTest.java @@ -160,7 +160,7 @@ public void test1BestEffortBalance() throws SQLException, InterruptedException { private void printTabletReplicaInfo(OlapTable table) { table.getPartitions().forEach(partition -> { - partition.getBaseIndex().getTablets().forEach(tablet -> { + partition.getDefaultPhysicalPartition().getBaseIndex().getTablets().forEach(tablet -> { StringBuffer stringBuffer = new StringBuffer(); stringBuffer.append("tablet ").append(tablet.getId()).append(": "); for (Replica replica : tablet.getAllReplicas()) { diff --git a/fe/fe-core/src/test/java/com/starrocks/pseudocluster/LocationMismatchRepairTest.java b/fe/fe-core/src/test/java/com/starrocks/pseudocluster/LocationMismatchRepairTest.java index 77652e2b487a7c..aa1c3b99f14fb6 100644 --- a/fe/fe-core/src/test/java/com/starrocks/pseudocluster/LocationMismatchRepairTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/pseudocluster/LocationMismatchRepairTest.java @@ -107,7 +107,7 @@ private Set getBackendIdsWithLocProp(String locationKey, String locationVa private void printTabletReplicaInfo(OlapTable table) { table.getPartitions().forEach(partition -> { - partition.getBaseIndex().getTablets().forEach(tablet -> { + partition.getDefaultPhysicalPartition().getBaseIndex().getTablets().forEach(tablet -> { StringBuffer stringBuffer = new StringBuffer(); stringBuffer.append("tablet ").append(tablet.getId()).append(": "); for (Replica replica : tablet.getAllReplicas()) { diff --git a/fe/fe-core/src/test/java/com/starrocks/qe/CoordinatorTest.java b/fe/fe-core/src/test/java/com/starrocks/qe/CoordinatorTest.java index 3929ec49600225..88ffb5333d4f08 100644 --- a/fe/fe-core/src/test/java/com/starrocks/qe/CoordinatorTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/qe/CoordinatorTest.java @@ -138,7 +138,8 @@ public void testBinlogScan() throws Exception { OlapTable olapTable = getOlapTable("t0"); List olapTableTabletIds = - olapTable.getAllPartitions().stream().flatMap(x -> x.getBaseIndex().getTabletIdsInOrder().stream()) + olapTable.getAllPartitions().stream().flatMap(x -> x.getDefaultPhysicalPartition().getBaseIndex() + .getTabletIdsInOrder().stream()) .collect(Collectors.toList()); Assert.assertFalse(olapTableTabletIds.isEmpty()); tupleDesc.setTable(olapTable); diff --git a/fe/fe-core/src/test/java/com/starrocks/qe/ShowExecutorTest.java b/fe/fe-core/src/test/java/com/starrocks/qe/ShowExecutorTest.java index b842143bc71611..f49e9399782066 100644 --- a/fe/fe-core/src/test/java/com/starrocks/qe/ShowExecutorTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/qe/ShowExecutorTest.java @@ -192,7 +192,7 @@ public void setUp() throws Exception { Partition partition = Deencapsulation.newInstance(Partition.class); new Expectations(partition) { { - partition.getBaseIndex(); + partition.getDefaultPhysicalPartition().getBaseIndex(); minTimes = 0; result = index1; } @@ -367,7 +367,7 @@ public void setUp() throws Exception { // mock globalStateMgr. globalStateMgr = Deencapsulation.newInstance(GlobalStateMgr.class); - LocalMetastore localMetastore = new LocalMetastore(globalStateMgr, null, null); + LocalMetastore localMetastore = new LocalMetastore(globalStateMgr); new Expectations(globalStateMgr) { { /* diff --git a/fe/fe-core/src/test/java/com/starrocks/qe/ShowTableMockMeta.java b/fe/fe-core/src/test/java/com/starrocks/qe/ShowTableMockMeta.java index 9cb52e455cdcc1..baed74eaca3b0b 100644 --- a/fe/fe-core/src/test/java/com/starrocks/qe/ShowTableMockMeta.java +++ b/fe/fe-core/src/test/java/com/starrocks/qe/ShowTableMockMeta.java @@ -23,9 +23,9 @@ import com.starrocks.common.DdlException; import com.starrocks.connector.ConnectorMgr; import com.starrocks.connector.ConnectorTblMetaInfoMgr; +import com.starrocks.meta.StarRocksMetadata; import com.starrocks.privilege.IdGenerator; import com.starrocks.server.GlobalStateMgr; -import com.starrocks.server.LocalMetastore; import com.starrocks.server.MetadataMgr; import com.starrocks.server.TemporaryTableMgr; @@ -36,7 +36,7 @@ import java.util.Optional; public class ShowTableMockMeta extends MetadataMgr { - private final LocalMetastore localMetastore; + private final StarRocksMetadata localMetastore; private final IdGenerator idGenerator; private final Map databaseSet; @@ -45,7 +45,7 @@ public class ShowTableMockMeta extends MetadataMgr { private final Map tableMap; private final Map externalTbSet; - public ShowTableMockMeta(LocalMetastore localMetastore, ConnectorMgr connectorMgr) { + public ShowTableMockMeta(StarRocksMetadata localMetastore, ConnectorMgr connectorMgr) { super(localMetastore, new TemporaryTableMgr(), connectorMgr, new ConnectorTblMetaInfoMgr()); this.localMetastore = localMetastore; idGenerator = new IdGenerator(); diff --git a/fe/fe-core/src/test/java/com/starrocks/qe/ShowTablesTest.java b/fe/fe-core/src/test/java/com/starrocks/qe/ShowTablesTest.java index efb011e0f23828..6795a976a94e39 100644 --- a/fe/fe-core/src/test/java/com/starrocks/qe/ShowTablesTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/qe/ShowTablesTest.java @@ -39,7 +39,7 @@ public static void setUp() throws Exception { GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); ShowTableMockMeta metadataMgr = - new ShowTableMockMeta(globalStateMgr.getLocalMetastore(), globalStateMgr.getConnectorMgr()); + new ShowTableMockMeta(globalStateMgr.getStarRocksMetadata(), globalStateMgr.getConnectorMgr()); metadataMgr.init(); globalStateMgr.setMetadataMgr(metadataMgr); diff --git a/fe/fe-core/src/test/java/com/starrocks/qe/VariableMgrTest.java b/fe/fe-core/src/test/java/com/starrocks/qe/VariableMgrTest.java index 2a5f2bc02b9a14..2e195ad81dba21 100644 --- a/fe/fe-core/src/test/java/com/starrocks/qe/VariableMgrTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/qe/VariableMgrTest.java @@ -72,9 +72,6 @@ public void setUp() { globalStateMgr.getEditLog(); minTimes = 0; result = editLog; - - editLog.logGlobalVariable((SessionVariable) any); - minTimes = 0; } }; diff --git a/fe/fe-core/src/test/java/com/starrocks/qe/scheduler/SchedulerConnectorTestBase.java b/fe/fe-core/src/test/java/com/starrocks/qe/scheduler/SchedulerConnectorTestBase.java index 8b8968c45e97b3..5d461b7d4193b6 100644 --- a/fe/fe-core/src/test/java/com/starrocks/qe/scheduler/SchedulerConnectorTestBase.java +++ b/fe/fe-core/src/test/java/com/starrocks/qe/scheduler/SchedulerConnectorTestBase.java @@ -29,7 +29,7 @@ public static void beforeClass() throws Exception { SchedulerTestBase.beforeClass(); GlobalStateMgr gsmMgr = connectContext.getGlobalStateMgr(); - MockedMetadataMgr metadataMgr = new MockedMetadataMgr(gsmMgr.getLocalMetastore(), gsmMgr.getConnectorMgr()); + MockedMetadataMgr metadataMgr = new MockedMetadataMgr(gsmMgr.getStarRocksMetadata(), gsmMgr.getConnectorMgr()); gsmMgr.setMetadataMgr(metadataMgr); mockHiveCatalogImpl(metadataMgr); diff --git a/fe/fe-core/src/test/java/com/starrocks/replication/ReplicationJobTest.java b/fe/fe-core/src/test/java/com/starrocks/replication/ReplicationJobTest.java index 7d31dda4894c53..57f32465843929 100644 --- a/fe/fe-core/src/test/java/com/starrocks/replication/ReplicationJobTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/replication/ReplicationJobTest.java @@ -76,13 +76,13 @@ public static void beforeClass() throws Exception { db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); String sql = "create table single_partition_duplicate_key (key1 int, key2 varchar(10))\n" + - "distributed by hash(key1) buckets 1\n" + - "properties('replication_num' = '1'); "; + "distributed by hash(key1) buckets 1\n" + + "properties('replication_num' = '1'); "; CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, - AnalyzeTestUtil.getConnectContext()); + AnalyzeTestUtil.getConnectContext()); StarRocksAssert.utCreateTableWithRetry(createTableStmt); table = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), "single_partition_duplicate_key"); + .getTable(db.getFullName(), "single_partition_duplicate_key"); srcTable = DeepCopy.copyWithGson(table, OlapTable.class); partition = table.getPartitions().iterator().next(); @@ -98,24 +98,24 @@ public void submit(AgentBatchTask task) { @Before public void setUp() throws Exception { - partition.updateVersionForRestore(10); - srcPartition.updateVersionForRestore(100); - partition.setDataVersion(8); - partition.setNextDataVersion(9); - srcPartition.setDataVersion(98); - srcPartition.setNextDataVersion(99); + partition.getDefaultPhysicalPartition().updateVersionForRestore(10); + srcPartition.getDefaultPhysicalPartition().updateVersionForRestore(100); + partition.getDefaultPhysicalPartition().setDataVersion(8); + partition.getDefaultPhysicalPartition().setNextDataVersion(9); + srcPartition.getDefaultPhysicalPartition().setDataVersion(98); + srcPartition.getDefaultPhysicalPartition().setNextDataVersion(99); job = new ReplicationJob(null, "test_token", db.getId(), table, srcTable, - GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo()); + GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo()); } @Test public void testJobId() { ReplicationJob jobWithoutId = new ReplicationJob(null, "test_token", db.getId(), table, srcTable, - GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo()); + GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo()); Assert.assertFalse(jobWithoutId.getJobId().isEmpty()); ReplicationJob jobWithId = new ReplicationJob("fake_id", "test_token", db.getId(), table, srcTable, - GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo()); + GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo()); Assert.assertEquals("fake_id", jobWithId.getJobId()); } @@ -141,7 +141,7 @@ public void testNormal() throws Exception { job.finishRemoteSnapshotTask((RemoteSnapshotTask) task, request); Deencapsulation.invoke(new LeaderImpl(), "finishRemoteSnapshotTask", - (RemoteSnapshotTask) task, request); + (RemoteSnapshotTask) task, request); ((RemoteSnapshotTask) task).toThrift(); task.toString(); } @@ -155,7 +155,7 @@ public void testNormal() throws Exception { job.finishReplicateSnapshotTask((ReplicateSnapshotTask) task, request); Deencapsulation.invoke(new LeaderImpl(), "finishReplicateSnapshotTask", - (ReplicateSnapshotTask) task, request); + (ReplicateSnapshotTask) task, request); ((ReplicateSnapshotTask) task).toThrift(); task.toString(); } @@ -163,8 +163,10 @@ public void testNormal() throws Exception { job.run(); Assert.assertEquals(ReplicationJobState.COMMITTED, job.getState()); - Assert.assertEquals(partition.getCommittedVersion(), srcPartition.getVisibleVersion()); - Assert.assertEquals(partition.getCommittedDataVersion(), srcPartition.getDataVersion()); + Assert.assertEquals(partition.getDefaultPhysicalPartition().getCommittedVersion(), + srcPartition.getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(partition.getDefaultPhysicalPartition().getCommittedDataVersion(), + srcPartition.getDefaultPhysicalPartition().getDataVersion()); } @Test @@ -321,14 +323,14 @@ public void testInitializedByThrift() { Partition partition = table.getPartitions().iterator().next(); Partition srcPartition = srcTable.getPartitions().iterator().next(); partitionInfo.partition_id = partition.getId(); - partitionInfo.src_version = srcPartition.getVisibleVersion(); - partitionInfo.src_version_epoch = srcPartition.getVersionEpoch(); + partitionInfo.src_version = srcPartition.getDefaultPhysicalPartition().getVisibleVersion(); + partitionInfo.src_version_epoch = srcPartition.getDefaultPhysicalPartition().getVersionEpoch(); request.partition_replication_infos.put(partitionInfo.partition_id, partitionInfo); partitionInfo.index_replication_infos = new HashMap(); TIndexReplicationInfo indexInfo = new TIndexReplicationInfo(); - MaterializedIndex index = partition.getBaseIndex(); - MaterializedIndex srcIndex = srcPartition.getBaseIndex(); + MaterializedIndex index = partition.getDefaultPhysicalPartition().getBaseIndex(); + MaterializedIndex srcIndex = srcPartition.getDefaultPhysicalPartition().getBaseIndex(); indexInfo.index_id = index.getId(); indexInfo.src_schema_hash = srcTable.getSchemaHashByIndexId(srcIndex.getId()); partitionInfo.index_replication_infos.put(indexInfo.index_id, indexInfo); @@ -347,7 +349,7 @@ public void testInitializedByThrift() { tabletInfo.replica_replication_infos = new ArrayList(); TReplicaReplicationInfo replicaInfo = new TReplicaReplicationInfo(); Backend backend = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getBackends().iterator() - .next(); + .next(); replicaInfo.src_backend = new TBackend(backend.getHost(), backend.getBePort(), backend.getHttpPort()); tabletInfo.replica_replication_infos.add(replicaInfo); } diff --git a/fe/fe-core/src/test/java/com/starrocks/replication/ReplicationMgrTest.java b/fe/fe-core/src/test/java/com/starrocks/replication/ReplicationMgrTest.java index 5b359868a5b91f..3b406bb61b7707 100644 --- a/fe/fe-core/src/test/java/com/starrocks/replication/ReplicationMgrTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/replication/ReplicationMgrTest.java @@ -102,12 +102,12 @@ public void submit(AgentBatchTask task) { @Before public void setUp() throws Exception { - partition.updateVersionForRestore(10); - srcPartition.updateVersionForRestore(100); - partition.setDataVersion(8); - partition.setNextDataVersion(9); - srcPartition.setDataVersion(98); - srcPartition.setNextDataVersion(99); + partition.getDefaultPhysicalPartition().updateVersionForRestore(10); + srcPartition.getDefaultPhysicalPartition().updateVersionForRestore(100); + partition.getDefaultPhysicalPartition().setDataVersion(8); + partition.getDefaultPhysicalPartition().setNextDataVersion(9); + srcPartition.getDefaultPhysicalPartition().setDataVersion(98); + srcPartition.getDefaultPhysicalPartition().setNextDataVersion(99); job = new ReplicationJob(null, "test_token", db.getId(), table, srcTable, GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo()); @@ -163,8 +163,10 @@ public void testNormal() throws Exception { replicationMgr.runAfterCatalogReady(); Assert.assertEquals(ReplicationJobState.COMMITTED, job.getState()); - Assert.assertEquals(partition.getCommittedVersion(), srcPartition.getVisibleVersion()); - Assert.assertEquals(partition.getCommittedDataVersion(), srcPartition.getDataVersion()); + Assert.assertEquals(partition.getDefaultPhysicalPartition().getCommittedVersion(), + srcPartition.getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(partition.getDefaultPhysicalPartition().getCommittedDataVersion(), + srcPartition.getDefaultPhysicalPartition().getDataVersion()); replicationMgr.replayReplicationJob(job); @@ -356,14 +358,14 @@ public void testInitializedByThrift() { Partition partition = table.getPartitions().iterator().next(); Partition srcPartition = srcTable.getPartitions().iterator().next(); partitionInfo.partition_id = partition.getId(); - partitionInfo.src_version = srcPartition.getVisibleVersion(); - partitionInfo.src_version_epoch = srcPartition.getVersionEpoch(); + partitionInfo.src_version = srcPartition.getDefaultPhysicalPartition().getVisibleVersion(); + partitionInfo.src_version_epoch = srcPartition.getDefaultPhysicalPartition().getVersionEpoch(); request.partition_replication_infos.put(partitionInfo.partition_id, partitionInfo); partitionInfo.index_replication_infos = new HashMap(); TIndexReplicationInfo indexInfo = new TIndexReplicationInfo(); - MaterializedIndex index = partition.getBaseIndex(); - MaterializedIndex srcIndex = srcPartition.getBaseIndex(); + MaterializedIndex index = partition.getDefaultPhysicalPartition().getBaseIndex(); + MaterializedIndex srcIndex = srcPartition.getDefaultPhysicalPartition().getBaseIndex(); indexInfo.index_id = index.getId(); indexInfo.src_schema_hash = srcTable.getSchemaHashByIndexId(srcIndex.getId()); partitionInfo.index_replication_infos.put(indexInfo.index_id, indexInfo); diff --git a/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorHiveTest.java b/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorHiveTest.java index 7bbfd83abf96a7..afe911ac1cf72b 100644 --- a/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorHiveTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorHiveTest.java @@ -161,11 +161,16 @@ public void testAutoRefreshPartitionLimitWithHiveTable() throws Exception { Collection partitions = materializedView.getPartitions(); Assert.assertEquals(6, partitions.size()); - Assert.assertEquals(1, materializedView.getPartition("p19980101").getVisibleVersion()); - Assert.assertEquals(1, materializedView.getPartition("p19980102").getVisibleVersion()); - Assert.assertEquals(1, materializedView.getPartition("p19980103").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p19980104").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p19980105").getVisibleVersion()); + Assert.assertEquals(1, materializedView.getPartition("p19980101").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(1, materializedView.getPartition("p19980102").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(1, materializedView.getPartition("p19980103").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980104").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980105").getDefaultPhysicalPartition() + .getVisibleVersion()); MockedHiveMetadata mockedHiveMetadata = (MockedHiveMetadata) connectContext.getGlobalStateMgr().getMetadataMgr(). @@ -173,22 +178,32 @@ public void testAutoRefreshPartitionLimitWithHiveTable() throws Exception { mockedHiveMetadata.updatePartitions("partitioned_db", "lineitem_par", ImmutableList.of("l_shipdate=1998-01-02", "l_shipdate=1998-01-03")); initAndExecuteTaskRun(taskRun); - Assert.assertEquals(1, materializedView.getPartition("p19980101").getVisibleVersion()); - Assert.assertEquals(1, materializedView.getPartition("p19980102").getVisibleVersion()); - Assert.assertEquals(1, materializedView.getPartition("p19980103").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p19980104").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p19980105").getVisibleVersion()); + Assert.assertEquals(1, materializedView.getPartition("p19980101").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(1, materializedView.getPartition("p19980102").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(1, materializedView.getPartition("p19980103").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980104").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980105").getDefaultPhysicalPartition() + .getVisibleVersion()); task.setType(Constants.TaskType.MANUAL); taskRun = TaskRunBuilder.newBuilder(task).build(); initAndExecuteTaskRun(taskRun); Assert.assertEquals(6, partitions.size()); - Assert.assertEquals(2, materializedView.getPartition("p19980101").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p19980102").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p19980103").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p19980104").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p19980105").getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980101").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980102").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980103").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980104").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980105").getDefaultPhysicalPartition() + .getVisibleVersion()); starRocksAssert.useDatabase("test").dropMaterializedView("hive_parttbl_mv1"); } @@ -403,7 +418,8 @@ public void testAutoPartitionRefreshWithUnPartitionedHiveTable() throws Exceptio Collection partitions = materializedView.getPartitions(); Assert.assertEquals(1, partitions.size()); - Assert.assertEquals(3, materializedView.getPartition("hive_tbl_mv1").getVisibleVersion()); + Assert.assertEquals(3, materializedView.getPartition("hive_tbl_mv1").getDefaultPhysicalPartition() + .getVisibleVersion()); starRocksAssert.useDatabase("test").dropMaterializedView("hive_tbl_mv1"); } @@ -451,11 +467,16 @@ public void testAutoPartitionRefreshWithPartitionedHiveTable1() throws Exception Collection partitions = materializedView.getPartitions(); Assert.assertEquals(6, partitions.size()); - Assert.assertEquals(2, materializedView.getPartition("p19980101").getVisibleVersion()); - Assert.assertEquals(3, materializedView.getPartition("p19980102").getVisibleVersion()); - Assert.assertEquals(3, materializedView.getPartition("p19980103").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p19980104").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p19980105").getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980101").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(3, materializedView.getPartition("p19980102").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(3, materializedView.getPartition("p19980103").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980104").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980105").getDefaultPhysicalPartition() + .getVisibleVersion()); starRocksAssert.useDatabase("test").dropMaterializedView("hive_parttbl_mv1"); } @@ -499,7 +520,8 @@ public void testAutoPartitionRefreshWithPartitionedHiveTable2() throws Exception Collection partitions = materializedView.getPartitions(); Assert.assertEquals(1, partitions.size()); - Assert.assertEquals(3, materializedView.getPartition("hive_tbl_mv2").getVisibleVersion()); + Assert.assertEquals(3, materializedView.getPartition("hive_tbl_mv2").getDefaultPhysicalPartition() + .getVisibleVersion()); starRocksAssert.useDatabase("test").dropMaterializedView("hive_tbl_mv2"); } @@ -545,7 +567,8 @@ public void testAutoPartitionRefreshWithPartitionedHiveTableJoinInternalTable() Collection partitions = materializedView.getPartitions(); Assert.assertEquals(1, partitions.size()); - Assert.assertEquals(3, materializedView.getPartition("hive_join_internal_mv").getVisibleVersion()); + Assert.assertEquals(3, materializedView.getPartition("hive_join_internal_mv").getDefaultPhysicalPartition() + .getVisibleVersion()); starRocksAssert.useDatabase("test").dropMaterializedView("hive_join_internal_mv"); } @@ -593,11 +616,16 @@ public void testPartitionRefreshWithUpperCaseTable() throws Exception { Collection partitions = materializedView.getPartitions(); Assert.assertEquals(6, partitions.size()); - Assert.assertEquals(2, materializedView.getPartition("p19980101").getVisibleVersion()); - Assert.assertEquals(3, materializedView.getPartition("p19980102").getVisibleVersion()); - Assert.assertEquals(3, materializedView.getPartition("p19980103").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p19980104").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p19980105").getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980101").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(3, materializedView.getPartition("p19980102").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(3, materializedView.getPartition("p19980103").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980104").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980105").getDefaultPhysicalPartition() + .getVisibleVersion()); starRocksAssert.useDatabase("test").dropMaterializedView("hive_parttbl_mv1"); } @@ -644,11 +672,16 @@ public void testPartitionRefreshWithUpperCaseDb() throws Exception { Collection partitions = materializedView.getPartitions(); Assert.assertEquals(6, partitions.size()); - Assert.assertEquals(2, materializedView.getPartition("p19980101").getVisibleVersion()); - Assert.assertEquals(3, materializedView.getPartition("p19980102").getVisibleVersion()); - Assert.assertEquals(3, materializedView.getPartition("p19980103").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p19980104").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p19980105").getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980101").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(3, materializedView.getPartition("p19980102").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(3, materializedView.getPartition("p19980103").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980104").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980105").getDefaultPhysicalPartition() + .getVisibleVersion()); starRocksAssert.useDatabase("test").dropMaterializedView("hive_parttbl_mv1"); } @@ -694,9 +727,12 @@ public void testPartitionRefreshWithLowerCase() throws Exception { Collection partitions = materializedView.getPartitions(); Assert.assertEquals(3, partitions.size()); - Assert.assertEquals(3, materializedView.getPartition("p0").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p1").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p2").getVisibleVersion()); + Assert.assertEquals(3, materializedView.getPartition("p0").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p1").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p2").getDefaultPhysicalPartition() + .getVisibleVersion()); starRocksAssert.useDatabase("test").dropMaterializedView("hive_parttbl_mv1"); } @@ -716,8 +752,10 @@ public void testRangePartitionRefreshWithHiveTable() throws Exception { Collection partitions = materializedView.getPartitions(); Assert.assertEquals(2, partitions.size()); - Assert.assertEquals(2, materializedView.getPartition("p19980101").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p19980102").getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980101").getDefaultPhysicalPartition() + .getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p19980102").getDefaultPhysicalPartition() + .getVisibleVersion()); PartitionBasedMvRefreshProcessor processor = (PartitionBasedMvRefreshProcessor) taskRun.getProcessor(); @@ -1147,7 +1185,7 @@ public void testCancelRefreshMV() throws Exception { try { initAndExecuteTaskRun(taskRun); } catch (Exception e) { - Assert.assertTrue(e.getMessage().contains("User Cancelled")); + Assert.assertTrue(e.getMessage().contains("error-msg : User Cancelled")); starRocksAssert.dropMaterializedView("hive_parttbl_mv1"); return; } diff --git a/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorIcebergTest.java b/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorIcebergTest.java index 5faed2b0f44067..f05a4e6d607a9d 100644 --- a/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorIcebergTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorIcebergTest.java @@ -36,6 +36,7 @@ import java.time.Instant; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -68,7 +69,7 @@ public void after() throws Exception { } private static void triggerRefreshMv(Database testDb, MaterializedView partitionedMaterializedView) - throws Exception { + throws Exception { Task task = TaskBuilder.buildMvTask(partitionedMaterializedView, testDb.getFullName()); TaskRun taskRun = TaskRunBuilder.newBuilder(task).build(); initAndExecuteTaskRun(taskRun); @@ -77,32 +78,32 @@ private static void triggerRefreshMv(Database testDb, MaterializedView partition @Test public void testCreateNonPartitionedMVForIceberg() throws Exception { starRocksAssert.useDatabase("test") - .withMaterializedView("CREATE MATERIALIZED VIEW `test`.`iceberg_mv1` " + - "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + - "REFRESH DEFERRED MANUAL\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"storage_medium\" = \"HDD\"\n" + - ")\n" + - "AS SELECT id, data, date FROM `iceberg0`.`unpartitioned_db`.`t0` as a;") - .withMaterializedView("CREATE MATERIALIZED VIEW `test`.`iceberg_mv2` " + - "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + - "REFRESH DEFERRED MANUAL\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"storage_medium\" = \"HDD\"\n" + - ")\n" + - "AS SELECT id, data, date FROM `iceberg0`.`partitioned_db`.`t1` as a;"); + .withMaterializedView("CREATE MATERIALIZED VIEW `test`.`iceberg_mv1` " + + "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + + "REFRESH DEFERRED MANUAL\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"storage_medium\" = \"HDD\"\n" + + ")\n" + + "AS SELECT id, data, date FROM `iceberg0`.`unpartitioned_db`.`t0` as a;") + .withMaterializedView("CREATE MATERIALIZED VIEW `test`.`iceberg_mv2` " + + "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + + "REFRESH DEFERRED MANUAL\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"storage_medium\" = \"HDD\"\n" + + ")\n" + + "AS SELECT id, data, date FROM `iceberg0`.`partitioned_db`.`t1` as a;"); // Partitioned base table { String mvName = "iceberg_mv2"; Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); MaterializedView mv = ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(testDb.getFullName(), mvName)); + .getTable(testDb.getFullName(), mvName)); refreshMVRange(mvName, true); List partitionNames = mv.getPartitions().stream().map(Partition::getName) - .sorted().collect(Collectors.toList()); + .sorted().collect(Collectors.toList()); Assert.assertEquals(ImmutableList.of(mvName), partitionNames); String querySql = "SELECT id, data, date FROM `iceberg0`.`partitioned_db`.`t1`"; starRocksAssert.query(querySql).explainContains(mvName); @@ -114,10 +115,10 @@ public void testCreateNonPartitionedMVForIceberg() throws Exception { String mvName = "iceberg_mv1"; Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); MaterializedView mv = ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(testDb.getFullName(), mvName)); + .getTable(testDb.getFullName(), mvName)); refreshMVRange(mvName, true); List partitionNames = mv.getPartitions().stream().map(Partition::getName) - .sorted().collect(Collectors.toList()); + .sorted().collect(Collectors.toList()); Assert.assertEquals(ImmutableList.of(mvName), partitionNames); // test rewrite @@ -131,56 +132,59 @@ public void testCreateNonPartitionedMVForIceberg() throws Exception { public void testCreatePartitionedMVForIceberg() throws Exception { String mvName = "iceberg_parttbl_mv1"; starRocksAssert.useDatabase("test") - .withMaterializedView("CREATE MATERIALIZED VIEW `test`.`iceberg_parttbl_mv1`\n" + - "PARTITION BY str2date(`date`, '%Y-%m-%d')\n" + - "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + - "REFRESH DEFERRED MANUAL\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"storage_medium\" = \"HDD\"\n" + - ")\n" + - "AS SELECT id, data, date FROM `iceberg0`.`partitioned_db`.`t1` as a;"); + .withMaterializedView("CREATE MATERIALIZED VIEW `test`.`iceberg_parttbl_mv1`\n" + + "PARTITION BY str2date(`date`, '%Y-%m-%d')\n" + + "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + + "REFRESH DEFERRED MANUAL\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"storage_medium\" = \"HDD\"\n" + + ")\n" + + "AS SELECT id, data, date FROM `iceberg0`.`partitioned_db`.`t1` as a;"); Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); MaterializedView partitionedMaterializedView = ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(testDb.getFullName(), "iceberg_parttbl_mv1")); + .getTable(testDb.getFullName(), "iceberg_parttbl_mv1")); triggerRefreshMv(testDb, partitionedMaterializedView); Collection partitions = partitionedMaterializedView.getPartitions(); Assert.assertEquals(4, partitions.size()); MockIcebergMetadata mockIcebergMetadata = - (MockIcebergMetadata) connectContext.getGlobalStateMgr().getMetadataMgr(). - getOptionalMetadata(MockIcebergMetadata.MOCKED_ICEBERG_CATALOG_NAME).get(); + (MockIcebergMetadata) connectContext.getGlobalStateMgr().getMetadataMgr(). + getOptionalMetadata(MockIcebergMetadata.MOCKED_ICEBERG_CATALOG_NAME).get(); mockIcebergMetadata.updatePartitions("partitioned_db", "t1", - ImmutableList.of("date=2020-01-02")); + ImmutableList.of("date=2020-01-02")); // refresh only one partition Task task = TaskBuilder.buildMvTask(partitionedMaterializedView, testDb.getFullName()); TaskRun taskRun = TaskRunBuilder.newBuilder(task).build(); initAndExecuteTaskRun(taskRun); PartitionBasedMvRefreshProcessor processor = (PartitionBasedMvRefreshProcessor) - taskRun.getProcessor(); + taskRun.getProcessor(); MvTaskRunContext mvContext = processor.getMvContext(); ExecPlan execPlan = mvContext.getExecPlan(); assertPlanContains(execPlan, "3: date >= '2020-01-02', 3: date < '2020-01-03'"); - Map partitionVersionMap = partitionedMaterializedView.getPartitions().stream() - .collect(Collectors.toMap(Partition::getName, Partition::getVisibleVersion)); + Map partitionVersionMap = new HashMap<>(); + for (Partition p : partitionedMaterializedView.getPartitions()) { + partitionVersionMap.put(p.getName(), p.getDefaultPhysicalPartition().getVisibleVersion()); + } + Assert.assertEquals( - ImmutableMap.of("p20200104_20200105", 2L, - "p20200101_20200102", 2L, - "p20200103_20200104", 2L, - "p20200102_20200103", 3L), - ImmutableMap.copyOf(partitionVersionMap)); + ImmutableMap.of("p20200104_20200105", 2L, + "p20200101_20200102", 2L, + "p20200103_20200104", 2L, + "p20200102_20200103", 3L), + ImmutableMap.copyOf(partitionVersionMap)); // add new row and refresh again mockIcebergMetadata.updatePartitions("partitioned_db", "t1", - ImmutableList.of("date=2020-01-01")); + ImmutableList.of("date=2020-01-01")); taskRun = TaskRunBuilder.newBuilder(task).build(); initAndExecuteTaskRun(taskRun); processor = (PartitionBasedMvRefreshProcessor) - taskRun.getProcessor(); + taskRun.getProcessor(); mvContext = processor.getMvContext(); execPlan = mvContext.getExecPlan(); @@ -188,9 +192,9 @@ public void testCreatePartitionedMVForIceberg() throws Exception { // test rewrite starRocksAssert.query("SELECT id, data, date FROM `iceberg0`.`partitioned_db`.`t1`") - .explainContains(mvName); + .explainContains(mvName); starRocksAssert.query("SELECT id, data, date FROM `iceberg0`.`partitioned_db`.`t1` where date = '2020-01-01'") - .explainContains(mvName); + .explainContains(mvName); starRocksAssert.dropMaterializedView(mvName); } @@ -200,39 +204,39 @@ public void testCreatePartitionedMVForIcebergWithPartitionTransform() throws Exc { String mvName = "iceberg_year_mv1"; starRocksAssert.useDatabase("test") - .withMaterializedView("CREATE MATERIALIZED VIEW `test`.`iceberg_year_mv1`\n" + - "PARTITION BY ts\n" + - "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + - "REFRESH DEFERRED MANUAL\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"storage_medium\" = \"HDD\"\n" + - ")\n" + - "AS SELECT id, data, ts FROM `iceberg0`.`partitioned_transforms_db`.`t0_year` as a;"); + .withMaterializedView("CREATE MATERIALIZED VIEW `test`.`iceberg_year_mv1`\n" + + "PARTITION BY ts\n" + + "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + + "REFRESH DEFERRED MANUAL\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"storage_medium\" = \"HDD\"\n" + + ")\n" + + "AS SELECT id, data, ts FROM `iceberg0`.`partitioned_transforms_db`.`t0_year` as a;"); Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); MaterializedView partitionedMaterializedView = - ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(testDb.getFullName(), "iceberg_year_mv1")); + ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() + .getTable(testDb.getFullName(), "iceberg_year_mv1")); triggerRefreshMv(testDb, partitionedMaterializedView); Collection partitions = partitionedMaterializedView.getPartitions(); Assert.assertEquals(5, partitions.size()); List partitionNames = ImmutableList.of("p20190101000000", "p20200101000000", "p20210101000000", - "p20220101000000", "p20230101000000"); + "p20220101000000", "p20230101000000"); Assert.assertTrue(partitions.stream().map(Partition::getName).allMatch(partitionNames::contains)); MockIcebergMetadata mockIcebergMetadata = - (MockIcebergMetadata) connectContext.getGlobalStateMgr().getMetadataMgr(). - getOptionalMetadata(MockIcebergMetadata.MOCKED_ICEBERG_CATALOG_NAME).get(); + (MockIcebergMetadata) connectContext.getGlobalStateMgr().getMetadataMgr(). + getOptionalMetadata(MockIcebergMetadata.MOCKED_ICEBERG_CATALOG_NAME).get(); mockIcebergMetadata.updatePartitions("partitioned_transforms_db", "t0_year", - ImmutableList.of("ts_year=2020")); + ImmutableList.of("ts_year=2020")); // refresh only one partition Task task = TaskBuilder.buildMvTask(partitionedMaterializedView, testDb.getFullName()); TaskRun taskRun = TaskRunBuilder.newBuilder(task).build(); initAndExecuteTaskRun(taskRun); PartitionBasedMvRefreshProcessor processor = (PartitionBasedMvRefreshProcessor) - taskRun.getProcessor(); + taskRun.getProcessor(); MvTaskRunContext mvContext = processor.getMvContext(); ExecPlan execPlan = mvContext.getExecPlan(); @@ -240,97 +244,97 @@ public void testCreatePartitionedMVForIcebergWithPartitionTransform() throws Exc // test rewrite starRocksAssert.query("SELECT id, data, ts FROM `iceberg0`.`partitioned_transforms_db`.`t0_year`") - .explainContains(mvName); + .explainContains(mvName); starRocksAssert.dropMaterializedView(mvName); } // test partition by month(ts) { String mvName = "iceberg_month_mv1"; starRocksAssert.useDatabase("test") - .withMaterializedView("CREATE MATERIALIZED VIEW `test`.`iceberg_month_mv1`\n" + - "PARTITION BY ts\n" + - "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + - "REFRESH DEFERRED MANUAL\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"storage_medium\" = \"HDD\"\n" + - ")\n" + - "AS SELECT id, data, ts FROM `iceberg0`.`partitioned_transforms_db`.`t0_month` as a;"); + .withMaterializedView("CREATE MATERIALIZED VIEW `test`.`iceberg_month_mv1`\n" + + "PARTITION BY ts\n" + + "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + + "REFRESH DEFERRED MANUAL\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"storage_medium\" = \"HDD\"\n" + + ")\n" + + "AS SELECT id, data, ts FROM `iceberg0`.`partitioned_transforms_db`.`t0_month` as a;"); Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); MaterializedView partitionedMaterializedView = - ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(testDb.getFullName(), "iceberg_month_mv1")); + ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() + .getTable(testDb.getFullName(), "iceberg_month_mv1")); triggerRefreshMv(testDb, partitionedMaterializedView); Collection partitions = partitionedMaterializedView.getPartitions(); Assert.assertEquals(5, partitions.size()); List partitionNames = ImmutableList.of("p20220101000000", "p20220201000000", "p20220301000000", - "p20220401000000", "p20220501000000"); + "p20220401000000", "p20220501000000"); Assert.assertTrue(partitions.stream().map(Partition::getName).allMatch(partitionNames::contains)); // test rewrite starRocksAssert.query("SELECT id, data, ts FROM `iceberg0`.`partitioned_transforms_db`.`t0_month`") - .explainContains(mvName); + .explainContains(mvName); starRocksAssert.dropMaterializedView(mvName); } // test partition by day(ts) { String mvName = "iceberg_day_mv1"; starRocksAssert.useDatabase("test") - .withMaterializedView("CREATE MATERIALIZED VIEW `test`.`iceberg_day_mv1`\n" + - "PARTITION BY ts\n" + - "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + - "REFRESH DEFERRED MANUAL\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"storage_medium\" = \"HDD\"\n" + - ")\n" + - "AS SELECT id, data, ts FROM `iceberg0`.`partitioned_transforms_db`.`t0_day` as a;"); + .withMaterializedView("CREATE MATERIALIZED VIEW `test`.`iceberg_day_mv1`\n" + + "PARTITION BY ts\n" + + "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + + "REFRESH DEFERRED MANUAL\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"storage_medium\" = \"HDD\"\n" + + ")\n" + + "AS SELECT id, data, ts FROM `iceberg0`.`partitioned_transforms_db`.`t0_day` as a;"); Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); MaterializedView partitionedMaterializedView = - ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(testDb.getFullName(), "iceberg_day_mv1")); + ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() + .getTable(testDb.getFullName(), "iceberg_day_mv1")); triggerRefreshMv(testDb, partitionedMaterializedView); Collection partitions = partitionedMaterializedView.getPartitions(); Assert.assertEquals(5, partitions.size()); List partitionNames = ImmutableList.of("p20220101000000", "p20220102000000", "p20220103000000", - "p20220104000000", "p20220105000000"); + "p20220104000000", "p20220105000000"); Assert.assertTrue(partitions.stream().map(Partition::getName).allMatch(partitionNames::contains)); // test rewrite starRocksAssert.query("SELECT id, data, ts FROM `iceberg0`.`partitioned_transforms_db`.`t0_day`") - .explainContains(mvName); + .explainContains(mvName); starRocksAssert.dropMaterializedView(mvName); } // test partition by hour(ts) { String mvName = "iceberg_hour_mv1"; starRocksAssert.useDatabase("test") - .withMaterializedView("CREATE MATERIALIZED VIEW `test`.`iceberg_hour_mv1`\n" + - "PARTITION BY ts\n" + - "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + - "REFRESH DEFERRED MANUAL\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"storage_medium\" = \"HDD\"\n" + - ")\n" + - "AS SELECT id, data, ts FROM `iceberg0`.`partitioned_transforms_db`.`t0_hour` as a;"); + .withMaterializedView("CREATE MATERIALIZED VIEW `test`.`iceberg_hour_mv1`\n" + + "PARTITION BY ts\n" + + "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + + "REFRESH DEFERRED MANUAL\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"storage_medium\" = \"HDD\"\n" + + ")\n" + + "AS SELECT id, data, ts FROM `iceberg0`.`partitioned_transforms_db`.`t0_hour` as a;"); Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); MaterializedView partitionedMaterializedView = - ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(testDb.getFullName(), "iceberg_hour_mv1")); + ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() + .getTable(testDb.getFullName(), "iceberg_hour_mv1")); triggerRefreshMv(testDb, partitionedMaterializedView); Collection partitions = partitionedMaterializedView.getPartitions(); Assert.assertEquals(5, partitions.size()); List partitionNames = ImmutableList.of("p20220101000000", "p20220101010000", "p20220101020000", - "p20220101030000", "p20220101040000"); + "p20220101030000", "p20220101040000"); Assert.assertTrue(partitions.stream().map(Partition::getName).allMatch(partitionNames::contains)); // test rewrite starRocksAssert.query("SELECT id, data, ts FROM `iceberg0`.`partitioned_transforms_db`.`t0_hour`") - .explainContains(mvName); + .explainContains(mvName); starRocksAssert.dropMaterializedView(mvName); } } @@ -338,27 +342,27 @@ public void testCreatePartitionedMVForIcebergWithPartitionTransform() throws Exc @Test public void testRefreshWithCachePartitionTraits() { starRocksAssert.withMaterializedView("CREATE MATERIALIZED VIEW `test_mv1`\n" + - "PARTITION BY str2date(`date`, '%Y-%m-%d')\n" + - "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + - "REFRESH DEFERRED MANUAL\n" + - "AS SELECT id, data, date FROM `iceberg0`.`partitioned_db`.`t1` as a;", - () -> { - MaterializedView mv = getMv("test", "test_mv1"); - PartitionBasedMvRefreshProcessor processor = refreshMV("test", mv); - RuntimeProfile runtimeProfile = processor.getRuntimeProfile(); - QueryMaterializationContext.QueryCacheStats queryCacheStats = getQueryCacheStats(runtimeProfile); - Assert.assertTrue(queryCacheStats != null); - queryCacheStats.getCounter().forEach((key, value) -> { - if (key.contains("cache_partitionNames")) { - Assert.assertEquals(1L, value.longValue()); - } else if (key.contains("cache_getPartitionKeyRange")) { - Assert.assertEquals(3L, value.longValue()); - } else { - Assert.assertEquals(1L, value.longValue()); - } - }); - Set partitionsToRefresh1 = getPartitionNamesToRefreshForMv(mv); - Assert.assertTrue(partitionsToRefresh1.isEmpty()); + "PARTITION BY str2date(`date`, '%Y-%m-%d')\n" + + "DISTRIBUTED BY HASH(`id`) BUCKETS 10\n" + + "REFRESH DEFERRED MANUAL\n" + + "AS SELECT id, data, date FROM `iceberg0`.`partitioned_db`.`t1` as a;", + () -> { + MaterializedView mv = getMv("test", "test_mv1"); + PartitionBasedMvRefreshProcessor processor = refreshMV("test", mv); + RuntimeProfile runtimeProfile = processor.getRuntimeProfile(); + QueryMaterializationContext.QueryCacheStats queryCacheStats = getQueryCacheStats(runtimeProfile); + Assert.assertTrue(queryCacheStats != null); + queryCacheStats.getCounter().forEach((key, value) -> { + if (key.contains("cache_partitionNames")) { + Assert.assertEquals(1L, value.longValue()); + } else if (key.contains("cache_getPartitionKeyRange")) { + Assert.assertEquals(3L, value.longValue()); + } else { + Assert.assertEquals(1L, value.longValue()); + } }); + Set partitionsToRefresh1 = getPartitionNamesToRefreshForMv(mv); + Assert.assertTrue(partitionsToRefresh1.isEmpty()); + }); } } \ No newline at end of file diff --git a/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorJdbcTest.java b/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorJdbcTest.java index 1e2118d3c9e417..ab017b022f2227 100644 --- a/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorJdbcTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorJdbcTest.java @@ -54,57 +54,57 @@ public static void beforeClass() throws Exception { MVRefreshTestBase.beforeClass(); ConnectorPlanTestBase.mockCatalog(connectContext, MockedJDBCMetadata.MOCKED_JDBC_CATALOG_NAME); starRocksAssert - .withMaterializedView("CREATE MATERIALIZED VIEW `test`.`jdbc_parttbl_mv0`\n" + - "COMMENT \"MATERIALIZED_VIEW\"\n" + - "PARTITION BY (`d`)\n" + - "DISTRIBUTED BY HASH(`a`) BUCKETS 10\n" + - "REFRESH DEFERRED MANUAL\n" + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"storage_medium\" = \"HDD\"\n" + - ")\n" + - "AS SELECT `a`, `b`, `c`, `d` FROM `jdbc0`.`partitioned_db0`.`tbl0`;") - .withMaterializedView("create materialized view jdbc_parttbl_mv1 " + - "partition by ss " + - "distributed by hash(a) buckets 10 " + - "REFRESH DEFERRED MANUAL " + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ") " + - "as select str2date(d,'%Y%m%d') ss, a, b, c from jdbc0.partitioned_db0.tbl1;") - .withMaterializedView("create materialized view jdbc_parttbl_mv2 " + - "partition by ss " + - "distributed by hash(a) buckets 10 " + - "REFRESH DEFERRED MANUAL " + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ") " + - "as select str2date(d,'%Y%m%d') ss, a, b, c from jdbc0.partitioned_db0.tbl2;") - .withMaterializedView("create materialized view jdbc_parttbl_mv3 " + - "partition by str2date(d,'%Y%m%d') " + - "distributed by hash(a) buckets 10 " + - "REFRESH DEFERRED MANUAL " + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ") " + - "as select a, b, c, d from jdbc0.partitioned_db0.tbl1;") - .withMaterializedView("create materialized view jdbc_parttbl_mv5 " + - "partition by str2date(d,'%Y%m%d') " + - "distributed by hash(a) buckets 10 " + - "REFRESH DEFERRED MANUAL " + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\"\n" + - ") " + - "as select a, b, c, d from jdbc0.partitioned_db0.tbl3;") - .withMaterializedView("create materialized view jdbc_parttbl_mv6 " + - "partition by ss " + - "distributed by hash(a) buckets 10 " + - "REFRESH DEFERRED MANUAL " + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"partition_refresh_number\" = \"1\"" + - ") " + - "as select str2date(d,'%Y%m%d') ss, a, b, c from jdbc0.partitioned_db0.tbl1;"); + .withMaterializedView("CREATE MATERIALIZED VIEW `test`.`jdbc_parttbl_mv0`\n" + + "COMMENT \"MATERIALIZED_VIEW\"\n" + + "PARTITION BY (`d`)\n" + + "DISTRIBUTED BY HASH(`a`) BUCKETS 10\n" + + "REFRESH DEFERRED MANUAL\n" + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"storage_medium\" = \"HDD\"\n" + + ")\n" + + "AS SELECT `a`, `b`, `c`, `d` FROM `jdbc0`.`partitioned_db0`.`tbl0`;") + .withMaterializedView("create materialized view jdbc_parttbl_mv1 " + + "partition by ss " + + "distributed by hash(a) buckets 10 " + + "REFRESH DEFERRED MANUAL " + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ") " + + "as select str2date(d,'%Y%m%d') ss, a, b, c from jdbc0.partitioned_db0.tbl1;") + .withMaterializedView("create materialized view jdbc_parttbl_mv2 " + + "partition by ss " + + "distributed by hash(a) buckets 10 " + + "REFRESH DEFERRED MANUAL " + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ") " + + "as select str2date(d,'%Y%m%d') ss, a, b, c from jdbc0.partitioned_db0.tbl2;") + .withMaterializedView("create materialized view jdbc_parttbl_mv3 " + + "partition by str2date(d,'%Y%m%d') " + + "distributed by hash(a) buckets 10 " + + "REFRESH DEFERRED MANUAL " + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ") " + + "as select a, b, c, d from jdbc0.partitioned_db0.tbl1;") + .withMaterializedView("create materialized view jdbc_parttbl_mv5 " + + "partition by str2date(d,'%Y%m%d') " + + "distributed by hash(a) buckets 10 " + + "REFRESH DEFERRED MANUAL " + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\"\n" + + ") " + + "as select a, b, c, d from jdbc0.partitioned_db0.tbl3;") + .withMaterializedView("create materialized view jdbc_parttbl_mv6 " + + "partition by ss " + + "distributed by hash(a) buckets 10 " + + "REFRESH DEFERRED MANUAL " + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"partition_refresh_number\" = \"1\"" + + ") " + + "as select str2date(d,'%Y%m%d') ss, a, b, c from jdbc0.partitioned_db0.tbl1;"); } @AfterClass @@ -137,24 +137,24 @@ public void testJDBCProtocolType() { public void testPartitionJDBCSupported() throws Exception { // not supported Assert.assertThrows(AnalysisException.class, () -> - starRocksAssert.withMaterializedView("create materialized view mv_jdbc_postgres " + - "partition by d " + - "refresh deferred manual " + - "AS SELECT `a`, `b`, `c`, `d` FROM `jdbc_postgres`.`partitioned_db0`.`tbl0`;") + starRocksAssert.withMaterializedView("create materialized view mv_jdbc_postgres " + + "partition by d " + + "refresh deferred manual " + + "AS SELECT `a`, `b`, `c`, `d` FROM `jdbc_postgres`.`partitioned_db0`.`tbl0`;") ); // supported starRocksAssert.withMaterializedView("create materialized view mv_jdbc_mysql " + - "partition by d " + - "refresh deferred manual " + - "AS SELECT `a`, `b`, `c`, `d` FROM `jdbc0`.`partitioned_db0`.`tbl0`;"); + "partition by d " + + "refresh deferred manual " + + "AS SELECT `a`, `b`, `c`, `d` FROM `jdbc0`.`partitioned_db0`.`tbl0`;"); } @Test public void testRangePartitionChangeWithJDBCTable() throws Exception { MockedMetadataMgr metadataMgr = (MockedMetadataMgr) connectContext.getGlobalStateMgr().getMetadataMgr(); MockedJDBCMetadata mockedJDBCMetadata = - (MockedJDBCMetadata) metadataMgr.getOptionalMetadata(MockedJDBCMetadata.MOCKED_JDBC_CATALOG_NAME).get(); + (MockedJDBCMetadata) metadataMgr.getOptionalMetadata(MockedJDBCMetadata.MOCKED_JDBC_CATALOG_NAME).get(); mockedJDBCMetadata.initPartitions(); MaterializedView materializedView = refreshMaterializedView("jdbc_parttbl_mv0", "20230801", "20230805"); @@ -171,7 +171,7 @@ public void testRangePartitionChangeWithJDBCTable() throws Exception { private MaterializedView refreshMaterializedView(String materializedViewName, String start, String end) throws Exception { Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); MaterializedView materializedView = ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(testDb.getFullName(), materializedViewName)); + .getTable(testDb.getFullName(), materializedViewName)); refreshMVRange(materializedView.getName(), start, end, false); return materializedView; } @@ -180,15 +180,15 @@ private MaterializedView refreshMaterializedView(String materializedViewName, St public void testRangePartitionWithJDBCTableUseStr2Date() throws Exception { MockedMetadataMgr metadataMgr = (MockedMetadataMgr) connectContext.getGlobalStateMgr().getMetadataMgr(); MockedJDBCMetadata mockedJDBCMetadata = - (MockedJDBCMetadata) metadataMgr.getOptionalMetadata(MockedJDBCMetadata.MOCKED_JDBC_CATALOG_NAME).get(); + (MockedJDBCMetadata) metadataMgr.getOptionalMetadata(MockedJDBCMetadata.MOCKED_JDBC_CATALOG_NAME).get(); mockedJDBCMetadata.initPartitions(); MaterializedView materializedView = refreshMaterializedView("jdbc_parttbl_mv1", "20230731", "20230805"); List partitions = materializedView.getPartitions().stream() - .map(Partition::getName).sorted().collect(Collectors.toList()); + .map(Partition::getName).sorted().collect(Collectors.toList()); Assert.assertEquals(ImmutableList.of("p00010101_20230801", "p20230801_20230802", - "p20230802_20230803", "p20230803_99991231"), - partitions); + "p20230802_20230803", "p20230803_99991231"), + partitions); } @Test @@ -204,75 +204,81 @@ public void testRangePartitionWithJDBCTableUseStr2DateForError() { public void testRangePartitionWithJDBCTableUseStr2Date2() throws Exception { MockedMetadataMgr metadataMgr = (MockedMetadataMgr) connectContext.getGlobalStateMgr().getMetadataMgr(); MockedJDBCMetadata mockedJDBCMetadata = - (MockedJDBCMetadata) metadataMgr.getOptionalMetadata(MockedJDBCMetadata.MOCKED_JDBC_CATALOG_NAME).get(); + (MockedJDBCMetadata) metadataMgr.getOptionalMetadata(MockedJDBCMetadata.MOCKED_JDBC_CATALOG_NAME).get(); mockedJDBCMetadata.initPartitions(); MaterializedView materializedView = refreshMaterializedView("jdbc_parttbl_mv3", "20230731", "20230805"); List partitions = materializedView.getPartitions().stream() - .map(Partition::getName).sorted().collect(Collectors.toList()); + .map(Partition::getName).sorted().collect(Collectors.toList()); Assert.assertEquals(ImmutableList.of("p00010101_20230801", "p20230801_20230802", - "p20230802_20230803", "p20230803_99991231"), - partitions); + "p20230802_20230803", "p20230803_99991231"), + partitions); } @Test public void testStr2Date_DateTrunc() throws Exception { MockedMetadataMgr metadataMgr = (MockedMetadataMgr) connectContext.getGlobalStateMgr().getMetadataMgr(); MockedJDBCMetadata mockedJDBCMetadata = - (MockedJDBCMetadata) metadataMgr.getOptionalMetadata(MockedJDBCMetadata.MOCKED_JDBC_CATALOG_NAME).get(); + (MockedJDBCMetadata) metadataMgr.getOptionalMetadata(MockedJDBCMetadata.MOCKED_JDBC_CATALOG_NAME).get(); mockedJDBCMetadata.initPartitions(); String mvName = "jdbc_parttbl_str2date"; starRocksAssert.withMaterializedView("create materialized view jdbc_parttbl_str2date " + - "partition by date_trunc('month', ss) " + - "distributed by hash(a) buckets 10 " + - "REFRESH DEFERRED MANUAL " + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"partition_refresh_number\" = \"1\"" + - ") " + - "as select str2date(d,'%Y%m%d') ss, a, b, c from jdbc0.partitioned_db0.tbl5;"); + "partition by date_trunc('month', ss) " + + "distributed by hash(a) buckets 10 " + + "REFRESH DEFERRED MANUAL " + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"partition_refresh_number\" = \"1\"" + + ") " + + "as select str2date(d,'%Y%m%d') ss, a, b, c from jdbc0.partitioned_db0.tbl5;"); Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); MaterializedView materializedView = ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(testDb.getFullName(), mvName)); + .getTable(testDb.getFullName(), mvName)); // full refresh { starRocksAssert.getCtx().executeSql("refresh materialized view " + mvName + " force with sync mode"); List partitions = - materializedView.getPartitions().stream().map(Partition::getName).sorted() - .collect(Collectors.toList()); + materializedView.getPartitions().stream().map(Partition::getName).sorted() + .collect(Collectors.toList()); Assert.assertEquals(Arrays.asList("p000101_202308", "p202308_202309"), partitions); } // partial range refresh 1 { - Map partitionVersionMap = materializedView.getPartitions().stream().collect( - Collectors.toMap(Partition::getName, Partition::getVisibleVersion)); + Map partitionVersionMap = new HashMap<>(); + for (Partition p : materializedView.getPartitions()) { + partitionVersionMap.put(p.getName(), p.getDefaultPhysicalPartition().getVisibleVersion()); + } starRocksAssert.getCtx().executeSql("refresh materialized view " + mvName + - " partition start('2023-08-02') end('2023-09-01')" + - "force with sync mode"); + " partition start('2023-08-02') end('2023-09-01')" + + "force with sync mode"); List partitions = - materializedView.getPartitions().stream().map(Partition::getName).sorted() - .collect(Collectors.toList()); + materializedView.getPartitions().stream().map(Partition::getName).sorted() + .collect(Collectors.toList()); Assert.assertEquals(Arrays.asList("p000101_202308", "p202308_202309"), partitions); Assert.assertEquals(partitionVersionMap.get("p202308_202309").longValue(), - materializedView.getPartition("p202308_202309").getVisibleVersion()); + materializedView.getPartition("p202308_202309") + .getDefaultPhysicalPartition().getVisibleVersion()); } // partial range refresh 2 { - Map partitionVersionMap = materializedView.getPartitions().stream().collect( - Collectors.toMap(Partition::getName, Partition::getVisibleVersion)); + Map partitionVersionMap = new HashMap<>(); + for (Partition p : materializedView.getPartitions()) { + partitionVersionMap.put(p.getName(), p.getDefaultPhysicalPartition().getVisibleVersion()); + } starRocksAssert.getCtx().executeSql("refresh materialized view " + mvName + - " partition start('2023-07-01') end('2023-08-01')" + - "force with sync mode"); + " partition start('2023-07-01') end('2023-08-01')" + + "force with sync mode"); List partitions = - materializedView.getPartitions().stream().map(Partition::getName).sorted() - .collect(Collectors.toList()); + materializedView.getPartitions().stream().map(Partition::getName).sorted() + .collect(Collectors.toList()); Assert.assertEquals(Arrays.asList("p000101_202308", "p202308_202309"), partitions); Assert.assertEquals(partitionVersionMap.get("p202308_202309").longValue(), - materializedView.getPartition("p202308_202309").getVisibleVersion()); + materializedView.getPartition("p202308_202309").getDefaultPhysicalPartition() + .getVisibleVersion()); } starRocksAssert.dropMaterializedView(mvName); @@ -282,41 +288,41 @@ public void testStr2Date_DateTrunc() throws Exception { public void testStr2Date_TTL() throws Exception { MockedMetadataMgr metadataMgr = (MockedMetadataMgr) connectContext.getGlobalStateMgr().getMetadataMgr(); MockedJDBCMetadata mockedJDBCMetadata = - (MockedJDBCMetadata) metadataMgr.getOptionalMetadata(MockedJDBCMetadata.MOCKED_JDBC_CATALOG_NAME).get(); + (MockedJDBCMetadata) metadataMgr.getOptionalMetadata(MockedJDBCMetadata.MOCKED_JDBC_CATALOG_NAME).get(); mockedJDBCMetadata.initPartitions(); String mvName = "jdbc_parttbl_str2date"; starRocksAssert.withMaterializedView("create materialized view jdbc_parttbl_str2date " + - "partition by ss " + - "distributed by hash(a) buckets 10 " + - "REFRESH DEFERRED MANUAL " + - "PROPERTIES (\n" + - "\"replication_num\" = \"1\",\n" + - "\"partition_refresh_number\" = \"1\"," + - "'partition_ttl_number'='2'" + - ") " + - "as select str2date(d,'%Y%m%d') ss, a, b, c from jdbc0.partitioned_db0.tbl1;"); + "partition by ss " + + "distributed by hash(a) buckets 10 " + + "REFRESH DEFERRED MANUAL " + + "PROPERTIES (\n" + + "\"replication_num\" = \"1\",\n" + + "\"partition_refresh_number\" = \"1\"," + + "'partition_ttl_number'='2'" + + ") " + + "as select str2date(d,'%Y%m%d') ss, a, b, c from jdbc0.partitioned_db0.tbl1;"); Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); MaterializedView materializedView = ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(testDb.getFullName(), mvName)); + .getTable(testDb.getFullName(), mvName)); // initial create starRocksAssert.getCtx().executeSql("refresh materialized view " + mvName + " force with sync mode"); List partitions = - materializedView.getPartitions().stream().map(Partition::getName).sorted() - .collect(Collectors.toList()); + materializedView.getPartitions().stream().map(Partition::getName).sorted() + .collect(Collectors.toList()); Assert.assertEquals(Arrays.asList("p20230802_20230803", "p20230803_99991231"), partitions); // modify TTL { starRocksAssert.getCtx().executeSql( - String.format("alter materialized view %s set ('partition_ttl_number'='1')", mvName)); + String.format("alter materialized view %s set ('partition_ttl_number'='1')", mvName)); starRocksAssert.getCtx().executeSql(String.format("refresh materialized view %s with sync mode", mvName)); GlobalStateMgr.getCurrentState().getDynamicPartitionScheduler().runOnceForTest(); partitions = - materializedView.getPartitions().stream().map(Partition::getName).sorted() - .collect(Collectors.toList()); + materializedView.getPartitions().stream().map(Partition::getName).sorted() + .collect(Collectors.toList()); Assert.assertEquals(Arrays.asList("p20230803_99991231"), partitions); } starRocksAssert.dropMaterializedView(mvName); @@ -326,22 +332,22 @@ public void testStr2Date_TTL() throws Exception { public void testRangePartitionWithJDBCTableUseStr2Date3() throws Exception { MockedMetadataMgr metadataMgr = (MockedMetadataMgr) connectContext.getGlobalStateMgr().getMetadataMgr(); MockedJDBCMetadata mockedJDBCMetadata = - (MockedJDBCMetadata) metadataMgr.getOptionalMetadata(MockedJDBCMetadata.MOCKED_JDBC_CATALOG_NAME).get(); + (MockedJDBCMetadata) metadataMgr.getOptionalMetadata(MockedJDBCMetadata.MOCKED_JDBC_CATALOG_NAME).get(); mockedJDBCMetadata.initPartitions(); MaterializedView materializedView = refreshMaterializedView("jdbc_parttbl_mv5", "20230731", "20230805"); List partitions = materializedView.getPartitions().stream() - .map(Partition::getName).sorted().collect(Collectors.toList()); + .map(Partition::getName).sorted().collect(Collectors.toList()); Assert.assertEquals(ImmutableList.of("p00010101_20230801", "p20230801_20230802", - "p20230802_20230803", "p20230803_20230804"), - partitions); + "p20230802_20230803", "p20230803_20230804"), + partitions); } @Test public void testRefreshByParCreateOnlyNecessaryPar() throws Exception { MockedMetadataMgr metadataMgr = (MockedMetadataMgr) connectContext.getGlobalStateMgr().getMetadataMgr(); MockedJDBCMetadata mockedJDBCMetadata = - (MockedJDBCMetadata) metadataMgr.getOptionalMetadata(MockedJDBCMetadata.MOCKED_JDBC_CATALOG_NAME).get(); + (MockedJDBCMetadata) metadataMgr.getOptionalMetadata(MockedJDBCMetadata.MOCKED_JDBC_CATALOG_NAME).get(); mockedJDBCMetadata.initPartitions(); // get base table partitions @@ -350,13 +356,13 @@ public void testRefreshByParCreateOnlyNecessaryPar() throws Exception { Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); MaterializedView materializedView = ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(testDb.getFullName(), "jdbc_parttbl_mv6")); + .getTable(testDb.getFullName(), "jdbc_parttbl_mv6")); HashMap taskRunProperties = new HashMap<>(); // check corner case: the first partition of base table is 0000 to 20230801 // p20230801 of mv should not be created refreshMVRange(materializedView.getName(), "20230801", "20230802", false); List partitions = materializedView.getPartitions().stream() - .map(Partition::getName).sorted().collect(Collectors.toList()); + .map(Partition::getName).sorted().collect(Collectors.toList()); Assert.assertEquals(ImmutableList.of("p20230801_20230802"), partitions); } @@ -364,32 +370,32 @@ public void testRefreshByParCreateOnlyNecessaryPar() throws Exception { public void testStr2DateMVRefresh_Rewrite() throws Exception { MockedMetadataMgr metadataMgr = (MockedMetadataMgr) connectContext.getGlobalStateMgr().getMetadataMgr(); MockedJDBCMetadata mockedJDBCMetadata = - (MockedJDBCMetadata) metadataMgr.getOptionalMetadata(MockedJDBCMetadata.MOCKED_JDBC_CATALOG_NAME).get(); + (MockedJDBCMetadata) metadataMgr.getOptionalMetadata(MockedJDBCMetadata.MOCKED_JDBC_CATALOG_NAME).get(); mockedJDBCMetadata.initPartitions(); String mvName = "test_mv1"; starRocksAssert.withMaterializedView("create materialized view " + mvName + " " + - "partition by str2date(d,'%Y%m%d') " + - "distributed by hash(a) " + - "REFRESH DEFERRED MANUAL " + - "PROPERTIES (\n" + - "'replication_num' = '1'" + - ") " + - "as select t1.a, t2.b, t3.c, t1.d " + - " from jdbc0.partitioned_db0.part_tbl1 as t1 " + - " inner join jdbc0.partitioned_db0.part_tbl2 t2 on t1.d=t2.d " + - " inner join jdbc0.partitioned_db0.part_tbl3 t3 on t1.d=t3.d ;"); + "partition by str2date(d,'%Y%m%d') " + + "distributed by hash(a) " + + "REFRESH DEFERRED MANUAL " + + "PROPERTIES (\n" + + "'replication_num' = '1'" + + ") " + + "as select t1.a, t2.b, t3.c, t1.d " + + " from jdbc0.partitioned_db0.part_tbl1 as t1 " + + " inner join jdbc0.partitioned_db0.part_tbl2 t2 on t1.d=t2.d " + + " inner join jdbc0.partitioned_db0.part_tbl3 t3 on t1.d=t3.d ;"); Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); MaterializedView materializedView = ((MaterializedView) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(testDb.getFullName(), mvName)); + .getTable(testDb.getFullName(), mvName)); // initial create starRocksAssert.getCtx().executeSql("refresh materialized view " + mvName + " force with sync mode"); List partitions = - materializedView.getPartitions().stream().map(Partition::getName).sorted() - .collect(Collectors.toList()); + materializedView.getPartitions().stream().map(Partition::getName).sorted() + .collect(Collectors.toList()); Assert.assertEquals(Arrays.asList("p00010101_20230801", "p20230801_20230802", "p20230802_20230803", - "p20230803_99991231"), partitions); + "p20230803_99991231"), partitions); starRocksAssert.dropMaterializedView(mvName); } diff --git a/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorOlapPart2Test.java b/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorOlapPart2Test.java index 156fe68f71ba74..fe1a19ea5a13df 100644 --- a/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorOlapPart2Test.java +++ b/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorOlapPart2Test.java @@ -24,7 +24,6 @@ import com.starrocks.common.util.UUIDUtil; import com.starrocks.qe.ConnectContext; import com.starrocks.qe.SessionVariable; -import com.starrocks.scheduler.persist.TaskRunStatus; import com.starrocks.schema.MTable; import com.starrocks.server.GlobalStateMgr; import com.starrocks.sql.ast.StatementBase; @@ -273,8 +272,9 @@ public void testRefreshWithCachePartitionTraits() throws Exception { executeInsertSql(connectContext, "insert into tbl1 values(\"2022-02-20\", 2, 10)"); Partition p2 = table.getPartition("p2"); - while (p2.getVisibleVersion() != 3) { - System.out.println("waiting for partition p2 to be visible:" + p2.getVisibleVersion()); + while (p2.getDefaultPhysicalPartition().getVisibleVersion() != 3) { + System.out.println("waiting for partition p2 to be visible:" + + p2.getDefaultPhysicalPartition().getVisibleVersion()); Thread.sleep(1000); } MvUpdateInfo mvUpdateInfo = getMvUpdateInfo(mv); @@ -316,13 +316,13 @@ public void testTaskRun() { ), () -> { starRocksAssert - .withMaterializedView("create materialized view test_task_run \n" + + .withMaterializedView("create materialized view mv_refresh_priority\n" + "partition by date_trunc('month',k1) \n" + "distributed by hash(k2) buckets 10\n" + "refresh deferred manual\n" + "properties('replication_num' = '1', 'partition_refresh_number'='1')\n" + "as select k1, k2 from tbl6;"); - String mvName = "test_task_run"; + String mvName = "mv_refresh_priority"; Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(TEST_DB_NAME); MaterializedView mv = ((MaterializedView) testDb.getTable(mvName)); TaskManager tm = GlobalStateMgr.getCurrentState().getTaskManager(); @@ -359,7 +359,7 @@ public void testTaskRun() { while (taskRunScheduler.getRunningTaskCount() > 0) { Thread.sleep(100); } - starRocksAssert.dropMaterializedView("test_task_run"); + starRocksAssert.dropMaterializedView("mv_refresh_priority"); } ); } @@ -403,16 +403,12 @@ public void testRefreshPriority() { Task task = TaskBuilder.buildMvTask(mv, testDb.getFullName()); TaskRun taskRun = TaskRunBuilder.newBuilder(task).build(); initAndExecuteTaskRun(taskRun); - TGetTasksParams params = new TGetTasksParams(); - params.setTask_name(task.getName()); - List statuses = tm.getMatchedTaskRunStatus(params); - while (statuses.size() != 1) { - statuses = tm.getMatchedTaskRunStatus(params); + + TaskRun run = taskRunScheduler.getRunnableTaskRun(taskId); + Assert.assertEquals(Constants.TaskRunPriority.HIGHEST.value(), run.getStatus().getPriority()); + while (taskRunScheduler.getRunningTaskCount() > 0) { Thread.sleep(100); } - Assert.assertEquals(1, statuses.size()); - TaskRunStatus status = statuses.get(0); - Assert.assertEquals(Constants.TaskRunPriority.HIGHEST.value(), status.getPriority()); starRocksAssert.dropMaterializedView("mv_refresh_priority"); } ); @@ -438,16 +434,16 @@ public void testMVRefreshProperties() { () -> { String mvName = "test_mv1"; starRocksAssert.withMaterializedView("create materialized view test_mv1 \n" + - "partition by date_trunc('month',k1) \n" + - "distributed by hash(k2) buckets 10\n" + - "refresh deferred manual\n" + - "properties(" + - " 'replication_num' = '1', " + - " 'session.enable_materialized_view_rewrite' = 'true', \n" + - " 'session.enable_materialized_view_for_insert' = 'true', \n" + - " 'partition_refresh_number'='1'" + - ")\n" + - "as select k1, k2 from tbl6;", + "partition by date_trunc('month',k1) \n" + + "distributed by hash(k2) buckets 10\n" + + "refresh deferred manual\n" + + "properties(" + + " 'replication_num' = '1', " + + " 'session.enable_materialized_view_rewrite' = 'true', \n" + + " 'session.enable_materialized_view_for_insert' = 'true', \n" + + " 'partition_refresh_number'='1'" + + ")\n" + + "as select k1, k2 from tbl6;", () -> { Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); MaterializedView mv = ((MaterializedView) testDb.getTable(mvName)); diff --git a/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorOlapTest.java b/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorOlapTest.java index 0f3cc199babf15..16332617724626 100644 --- a/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorOlapTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/scheduler/PartitionBasedMvRefreshProcessorOlapTest.java @@ -375,18 +375,28 @@ public void testRangePartitionRefresh() throws Exception { executeInsertSql(connectContext, insertSql); refreshMVRange(materializedView.getName(), null, null, false); - Assert.assertEquals(1, materializedView.getPartition("p202112_202201").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p202201_202202").getVisibleVersion()); - Assert.assertEquals(1, materializedView.getPartition("p202202_202203").getVisibleVersion()); - Assert.assertEquals(1, materializedView.getPartition("p202203_202204").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p202204_202205").getVisibleVersion()); + Assert.assertEquals(1, materializedView.getPartition("p202112_202201") + .getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p202201_202202") + .getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(1, materializedView.getPartition("p202202_202203") + .getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(1, materializedView.getPartition("p202203_202204") + .getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p202204_202205") + .getDefaultPhysicalPartition().getVisibleVersion()); refreshMVRange(materializedView.getName(), "2021-12-03", "2022-04-05", false); - Assert.assertEquals(1, materializedView.getPartition("p202112_202201").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p202201_202202").getVisibleVersion()); - Assert.assertEquals(1, materializedView.getPartition("p202202_202203").getVisibleVersion()); - Assert.assertEquals(1, materializedView.getPartition("p202203_202204").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p202204_202205").getVisibleVersion()); + Assert.assertEquals(1, materializedView.getPartition("p202112_202201") + .getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p202201_202202") + .getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(1, materializedView.getPartition("p202202_202203") + .getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(1, materializedView.getPartition("p202203_202204") + .getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p202204_202205") + .getDefaultPhysicalPartition().getVisibleVersion()); insertSql = "insert into tbl4 partition(p3) values('2022-03-02',21,102);"; executeInsertSql(connectContext, insertSql); @@ -394,18 +404,28 @@ public void testRangePartitionRefresh() throws Exception { executeInsertSql(connectContext, insertSql); refreshMVRange(materializedView.getName(), "2021-12-03", "2022-03-01", false); - Assert.assertEquals(2, materializedView.getPartition("p202112_202201").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p202201_202202").getVisibleVersion()); - Assert.assertEquals(1, materializedView.getPartition("p202202_202203").getVisibleVersion()); - Assert.assertEquals(1, materializedView.getPartition("p202203_202204").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p202204_202205").getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p202112_202201") + .getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p202201_202202") + .getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(1, materializedView.getPartition("p202202_202203") + .getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(1, materializedView.getPartition("p202203_202204") + .getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p202204_202205") + .getDefaultPhysicalPartition().getVisibleVersion()); refreshMVRange(materializedView.getName(), "2021-12-03", "2022-05-06", true); - Assert.assertEquals(2, materializedView.getPartition("p202112_202201").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p202201_202202").getVisibleVersion()); - Assert.assertEquals(1, materializedView.getPartition("p202202_202203").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p202203_202204").getVisibleVersion()); - Assert.assertEquals(2, materializedView.getPartition("p202204_202205").getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p202112_202201") + .getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p202201_202202") + .getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(1, materializedView.getPartition("p202202_202203") + .getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p202203_202204") + .getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(2, materializedView.getPartition("p202204_202205") + .getDefaultPhysicalPartition().getVisibleVersion()); } @Test diff --git a/fe/fe-core/src/test/java/com/starrocks/server/ConcurrentDDLTest.java b/fe/fe-core/src/test/java/com/starrocks/server/ConcurrentDDLTest.java index 8e9712043cf285..dadc2ae99fbeda 100644 --- a/fe/fe-core/src/test/java/com/starrocks/server/ConcurrentDDLTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/server/ConcurrentDDLTest.java @@ -90,9 +90,9 @@ public void testConcurrentCreatingColocateTables() throws InterruptedException { try { System.out.println("start to create table"); starRocksAssert.withTable("create table test.test_tbl_" + Thread.currentThread().getId() + - " (id int) duplicate key (id)" + - " distributed by hash(id) buckets 5183 " + - "properties(\"replication_num\"=\"1\", \"colocate_with\"=\"test_cg_001\");"); + " (id int) duplicate key (id)" + + " distributed by hash(id) buckets 5183 " + + "properties(\"replication_num\"=\"1\", \"colocate_with\"=\"test_cg_001\");"); System.out.println("end to create table"); } catch (Exception e) { throw new RuntimeException(e); @@ -113,18 +113,19 @@ public void testConcurrentCreatingColocateTables() throws InterruptedException { Database db = GlobalStateMgr.getServingState().getLocalMetastore().getDb("test"); Table table = GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(db.getFullName(), "test_tbl_" + threadIds.get(0)); + .getTable(db.getFullName(), "test_tbl_" + threadIds.get(0)); List> bucketSeq = GlobalStateMgr.getCurrentState().getColocateTableIndex().getBackendsPerBucketSeq( - GlobalStateMgr.getCurrentState().getColocateTableIndex().getGroup(table.getId())); + GlobalStateMgr.getCurrentState().getColocateTableIndex().getGroup(table.getId())); // check all created colocate tables has same tablet distribution as the bucket seq in colocate group for (long threadId : threadIds) { table = GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "test_tbl_" + threadId); - List tablets = table.getPartitions().stream().findFirst().get().getBaseIndex().getTabletIdsInOrder(); + List tablets = table.getPartitions().stream().findFirst().get().getDefaultPhysicalPartition() + .getBaseIndex().getTabletIdsInOrder(); List backendIdList = tablets.stream() - .map(id -> GlobalStateMgr.getCurrentState().getTabletInvertedIndex().getReplicasByTabletId(id)) - .map(replicaList -> replicaList.get(0).getBackendId()) - .collect(Collectors.toList()); + .map(id -> GlobalStateMgr.getCurrentState().getTabletInvertedIndex().getReplicasByTabletId(id)) + .map(replicaList -> replicaList.get(0).getBackendId()) + .collect(Collectors.toList()); Assert.assertEquals(bucketSeq, backendIdList.stream().map(Arrays::asList).collect(Collectors.toList())); } } @@ -132,13 +133,13 @@ public void testConcurrentCreatingColocateTables() throws InterruptedException { @Test public void testConcurrentlyDropDbAndCreateTable() throws Exception { final String createTableSqlFormat = - "CREATE TABLE IF NOT EXISTS concurrent_test_db.test_tbl_RRR(k1 int, k2 int, k3 int)" + - " distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; + "CREATE TABLE IF NOT EXISTS concurrent_test_db.test_tbl_RRR(k1 int, k2 int, k3 int)" + + " distributed by hash(k1) buckets 3 properties('replication_num' = '1');"; final String createViewSqlFormat = "CREATE VIEW IF NOT EXISTS concurrent_test_db.test_view_RRR" + - " as select k1,k2 from concurrent_test_db.base_t1;"; + " as select k1,k2 from concurrent_test_db.base_t1;"; final String createMVSqlFormat = "CREATE MATERIALIZED VIEW IF NOT EXISTS" + - " concurrent_test_db.test_mv_RRR DISTRIBUTED BY HASH(`k2`) REFRESH MANUAL" + - " as select k2,k3 from concurrent_test_db.base_t1;"; + " concurrent_test_db.test_mv_RRR DISTRIBUTED BY HASH(`k2`) REFRESH MANUAL" + + " as select k2,k3 from concurrent_test_db.base_t1;"; final int NUM_ROUND = 1; @@ -156,16 +157,16 @@ public void testConcurrentlyDropDbAndCreateTable() throws Exception { System.out.println("creating table and db time: " + times); starRocksAssert.withDatabase("concurrent_test_db"); starRocksAssert.withTable( - "CREATE TABLE IF NOT EXISTS concurrent_test_db.base_t1(k1 int, k2 int, k3 int)" + - " distributed by hash(k1) buckets 3 properties('replication_num' = '1');"); + "CREATE TABLE IF NOT EXISTS concurrent_test_db.base_t1(k1 int, k2 int, k3 int)" + + " distributed by hash(k1) buckets 3 properties('replication_num' = '1');"); int time = 300 + random.nextInt(100); // sleep random time before dropping database Thread.sleep(time); System.out.println("dropping table and db"); Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("concurrent_test_db"); ShowTableStmt showTableStmt = - (ShowTableStmt) UtFrameUtils.parseStmtWithNewParser( - "show tables from concurrent_test_db", connectContext); + (ShowTableStmt) UtFrameUtils.parseStmtWithNewParser( + "show tables from concurrent_test_db", connectContext); starRocksAssert.dropDatabase("concurrent_test_db"); System.out.println("concurrent_test_db dropped"); } catch (Exception e) { @@ -252,9 +253,9 @@ public void testConcurrentCreateSameTable() throws InterruptedException { System.out.println("start to create table same_tbl"); try { starRocksAssert.withTable("create table test.same_tbl " + - " (id int) duplicate key (id)" + - " distributed by hash(id) buckets 5183 " + - "properties(\"replication_num\"=\"1\", \"colocate_with\"=\"test_cg_001\");"); + " (id int) duplicate key (id)" + + " distributed by hash(id) buckets 5183 " + + "properties(\"replication_num\"=\"1\", \"colocate_with\"=\"test_cg_001\");"); } catch (Exception e) { errorCount.incrementAndGet(); } diff --git a/fe/fe-core/src/test/java/com/starrocks/server/LocalMetaStoreTest.java b/fe/fe-core/src/test/java/com/starrocks/server/LocalMetaStoreTest.java index cd551d08a6e55e..9c99d78a9e01f0 100644 --- a/fe/fe-core/src/test/java/com/starrocks/server/LocalMetaStoreTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/server/LocalMetaStoreTest.java @@ -14,56 +14,8 @@ package com.starrocks.server; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; -import com.starrocks.catalog.DataProperty; -import com.starrocks.catalog.Database; -import com.starrocks.catalog.HiveTable; -import com.starrocks.catalog.LocalTablet; -import com.starrocks.catalog.MaterializedIndex; -import com.starrocks.catalog.OlapTable; -import com.starrocks.catalog.Partition; -import com.starrocks.catalog.PartitionInfo; -import com.starrocks.catalog.PhysicalPartitionImpl; -import com.starrocks.catalog.Table; -import com.starrocks.catalog.TabletMeta; -import com.starrocks.catalog.system.SystemId; -import com.starrocks.catalog.system.information.InfoSchemaDb; -import com.starrocks.catalog.system.sys.SysDb; -import com.starrocks.common.AnalysisException; -import com.starrocks.common.Config; -import com.starrocks.common.DdlException; -import com.starrocks.common.ErrorCode; -import com.starrocks.common.ErrorReportException; -import com.starrocks.common.FeConstants; -import com.starrocks.common.util.PropertyAnalyzer; -import com.starrocks.common.util.UUIDUtil; -import com.starrocks.common.util.concurrent.lock.LockType; -import com.starrocks.common.util.concurrent.lock.Locker; -import com.starrocks.persist.EditLog; -import com.starrocks.persist.ModifyPartitionInfo; -import com.starrocks.persist.PhysicalPartitionPersistInfoV2; -import com.starrocks.persist.TruncateTableInfo; -import com.starrocks.persist.metablock.SRMetaBlockReader; -import com.starrocks.persist.metablock.SRMetaBlockReaderV2; -import com.starrocks.qe.ConnectContext; -import com.starrocks.sql.ast.ColumnRenameClause; -import com.starrocks.thrift.TStorageMedium; -import com.starrocks.utframe.StarRocksAssert; -import com.starrocks.utframe.UtFrameUtils; -import mockit.Expectations; -import mockit.Mock; -import mockit.MockUp; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.util.List; -import java.util.Map; -import java.util.Set; - public class LocalMetaStoreTest { + /* private static ConnectContext connectContext; private static StarRocksAssert starRocksAssert; @@ -96,12 +48,12 @@ public void testGetNewPartitionsFromPartitions() throws DdlException { Partition sourcePartition = olapTable.getPartition("t1"); List sourcePartitionIds = Lists.newArrayList(sourcePartition.getId()); List tmpPartitionIds = Lists.newArrayList(connectContext.getGlobalStateMgr().getNextId()); - LocalMetastore localMetastore = connectContext.getGlobalStateMgr().getLocalMetastore(); + StarRocksMetadata starRocksMetadata = connectContext.getGlobalStateMgr().getStarRocksMetadata(); Map origPartitions = Maps.newHashMap(); - OlapTable copiedTable = localMetastore.getCopiedTable(db, olapTable, sourcePartitionIds, origPartitions); + OlapTable copiedTable = starRocksMetadata.getCopiedTable(db, olapTable, sourcePartitionIds, origPartitions); Assert.assertEquals(olapTable.getName(), copiedTable.getName()); Set tabletIdSet = Sets.newHashSet(); - List newPartitions = localMetastore.getNewPartitionsFromPartitions(db, + List newPartitions = starRocksMetadata.getNewPartitionsFromPartitions(db, olapTable, sourcePartitionIds, origPartitions, copiedTable, "_100", tabletIdSet, tmpPartitionIds, null, WarehouseManager.DEFAULT_WAREHOUSE_ID); Assert.assertEquals(sourcePartitionIds.size(), newPartitions.size()); @@ -149,7 +101,7 @@ public void logModifyPartition(ModifyPartitionInfo info) { }; LocalMetastore localMetastore = connectContext.getGlobalStateMgr().getLocalMetastore(); - localMetastore.getPartitionIdToStorageMediumMap(); + ReportHandler.getPartitionIdToStorageMediumMap(); // Clean test.mv1, avoid its refreshment affecting other cases in this testsuite. starRocksAssert.dropMaterializedView("test.mv1"); } @@ -184,7 +136,7 @@ public void testReplayAddSubPartition() throws DdlException { index.getId(), schemaHash, table.getPartitionInfo().getDataProperty(p.getId()).getStorageMedium()); index.addTablet(new LocalTablet(0), tabletMeta); PhysicalPartitionPersistInfoV2 info = new PhysicalPartitionPersistInfoV2( - db.getId(), table.getId(), p.getId(), new PhysicalPartitionImpl(123, "", p.getId(), 0, index)); + db.getId(), table.getId(), p.getId(), new PhysicalPartition(123, "", p.getId(), 0, index)); LocalMetastore localMetastore = connectContext.getGlobalStateMgr().getLocalMetastore(); localMetastore.replayAddSubPartition(info); @@ -311,5 +263,5 @@ public void testRenameColumnException() throws Exception { Assert.assertEquals(e.getErrorCode(), ErrorCode.ERR_DUP_FIELDNAME); } } - + */ } diff --git a/fe/fe-core/src/test/java/com/starrocks/server/MetadataMgrTest.java b/fe/fe-core/src/test/java/com/starrocks/server/MetadataMgrTest.java index 6d6383209e11f8..e0005b6c98f05f 100644 --- a/fe/fe-core/src/test/java/com/starrocks/server/MetadataMgrTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/server/MetadataMgrTest.java @@ -31,6 +31,7 @@ import com.starrocks.connector.iceberg.hive.IcebergHiveCatalog; import com.starrocks.connector.metadata.MetadataTableName; import com.starrocks.connector.metadata.iceberg.LogicalIcebergMetadataTable; +import com.starrocks.meta.StarRocksMetadata; import com.starrocks.qe.ConnectContext; import com.starrocks.sql.analyzer.AnalyzeTestUtil; import com.starrocks.sql.ast.CreateTableLikeStmt; @@ -276,7 +277,7 @@ public void testCreateIcebergTable() throws Exception { public void testHiveCreateTableLike() throws Exception { class MockedHiveMetadataMgr extends MockedMetadataMgr { - public MockedHiveMetadataMgr(LocalMetastore localMetastore, ConnectorMgr connectorMgr) { + public MockedHiveMetadataMgr(StarRocksMetadata localMetastore, ConnectorMgr connectorMgr) { super(localMetastore, connectorMgr); } @@ -316,7 +317,7 @@ public boolean tableExists(String catalogName, String dbName, String tblName) { ConnectContext connectContext = AnalyzeTestUtil.getConnectContext(); MetadataMgr metadataMgr = GlobalStateMgr.getCurrentState().getMetadataMgr(); MockedHiveMetadataMgr mockedHiveMetadataMgr = new MockedHiveMetadataMgr( - connectContext.getGlobalStateMgr().getLocalMetastore(), + connectContext.getGlobalStateMgr().getStarRocksMetadata(), connectContext.getGlobalStateMgr().getConnectorMgr()); // set to mockedHiveMetadataMgr to pass Analyzer check diff --git a/fe/fe-core/src/test/java/com/starrocks/server/WarehouseManagerTest.java b/fe/fe-core/src/test/java/com/starrocks/server/WarehouseManagerTest.java index d645c614496107..ba516a72f19965 100644 --- a/fe/fe-core/src/test/java/com/starrocks/server/WarehouseManagerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/server/WarehouseManagerTest.java @@ -279,7 +279,8 @@ public RunMode getCurrentRunMode() { Partition partition = new Partition(123, "aaa", null, null); MaterializedIndex index = new MaterializedIndex(1, MaterializedIndex.IndexState.NORMAL); ErrorReportException ex = Assert.assertThrows(ErrorReportException.class, - () -> scanNode.addScanRangeLocations(partition, partition, index, Collections.emptyList(), 1)); + () -> scanNode.addScanRangeLocations(partition, partition.getDefaultPhysicalPartition(), + index, Collections.emptyList(), 1)); Assert.assertEquals("No alive backend or compute node in warehouse null.", ex.getMessage()); } diff --git a/fe/fe-core/src/test/java/com/starrocks/service/FrontendServiceImplTest.java b/fe/fe-core/src/test/java/com/starrocks/service/FrontendServiceImplTest.java index 0d6ada7448397e..e7bff478839752 100644 --- a/fe/fe-core/src/test/java/com/starrocks/service/FrontendServiceImplTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/service/FrontendServiceImplTest.java @@ -292,9 +292,9 @@ public static void tearDown() throws Exception { String dropSQL2 = "drop table if exists site_access_2"; try { DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); DropTableStmt dropTableStmt2 = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL2, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt2); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt2); } catch (Exception ex) { } diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/AnalyzeStmtTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/AnalyzeStmtTest.java index 7f783507de7aca..1718ce638b0617 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/AnalyzeStmtTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/AnalyzeStmtTest.java @@ -323,7 +323,7 @@ public void testHistogramSampleRatio() { OlapTable t0 = (OlapTable) starRocksAssert.getCtx().getGlobalStateMgr() .getLocalMetastore().getDb("db").getTable("tbl"); for (Partition partition : t0.getAllPartitions()) { - partition.getBaseIndex().setRowCount(10000); + partition.getDefaultPhysicalPartition().getBaseIndex().setRowCount(10000); } String sql = "analyze table db.tbl update histogram on kk1 with 256 buckets " + @@ -332,7 +332,7 @@ public void testHistogramSampleRatio() { Assert.assertEquals("1", analyzeStmt.getProperties().get(StatsConstants.HISTOGRAM_SAMPLE_RATIO)); for (Partition partition : t0.getAllPartitions()) { - partition.getBaseIndex().setRowCount(400000); + partition.getDefaultPhysicalPartition().getBaseIndex().setRowCount(400000); } sql = "analyze table db.tbl update histogram on kk1 with 256 buckets " + @@ -341,7 +341,7 @@ public void testHistogramSampleRatio() { Assert.assertEquals("0.5", analyzeStmt.getProperties().get(StatsConstants.HISTOGRAM_SAMPLE_RATIO)); for (Partition partition : t0.getAllPartitions()) { - partition.getBaseIndex().setRowCount(20000000); + partition.getDefaultPhysicalPartition().getBaseIndex().setRowCount(20000000); } sql = "analyze table db.tbl update histogram on kk1 with 256 buckets " + "properties(\"histogram_sample_ratio\"=\"0.9\")"; diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/AnalyzeUtilTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/AnalyzeUtilTest.java index 4683c58b0e87c4..8808326885a454 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/AnalyzeUtilTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/AnalyzeUtilTest.java @@ -65,7 +65,7 @@ public void testSubQuery() throws Exception { sql = "create view basic as select v1 from test.t0;"; CreateViewStmt createTableStmt = (CreateViewStmt) UtFrameUtils.parseStmtWithNewParser(sql, AnalyzeTestUtil.getConnectContext()); - GlobalStateMgr.getCurrentState().getLocalMetastore().createView(createTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createView(createTableStmt); sql = "select v1 from basic"; statementBase = SqlParser.parse(sql, AnalyzeTestUtil.getConnectContext().getSessionVariable().getSqlMode()); final ConnectContext session = AnalyzeTestUtil.getConnectContext(); diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/CTASAnalyzerTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/CTASAnalyzerTest.java index b3c36c3649cbcf..f51059c4a26751 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/CTASAnalyzerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/CTASAnalyzerTest.java @@ -65,7 +65,7 @@ public static void beforeClass() throws Exception { // create statistic CreateDbStmt dbStmt = new CreateDbStmt(false, StatsConstants.STATISTICS_DB_NAME); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(dbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(dbStmt.getFullDbName()); } catch (DdlException e) { return; } diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/PrivilegeCheckerTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/PrivilegeCheckerTest.java index 2153e5194b13df..1c5f5958d117ff 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/PrivilegeCheckerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/PrivilegeCheckerTest.java @@ -210,7 +210,7 @@ private static void createMvForTest(ConnectContext connectContext) throws Except private static void createMaterializedView(String sql, ConnectContext connectContext) throws Exception { CreateMaterializedViewStatement createMaterializedViewStatement = (CreateMaterializedViewStatement) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createMaterializedViewStatement); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createMaterializedViewStatement); } private static void mockRepository() { @@ -2728,7 +2728,7 @@ public void testDropMaterializedViewStatement() throws Exception { ctxToTestUser(); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().dropMaterializedView(statement); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropMaterializedView(statement); } catch (Exception e) { System.out.println(e.getMessage()); Assert.assertTrue(e.getMessage().contains( @@ -2736,7 +2736,7 @@ public void testDropMaterializedViewStatement() throws Exception { } grantRevokeSqlAsRoot("grant drop on materialized view db1.mv4 to test"); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropMaterializedView(statement); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropMaterializedView(statement); GlobalStateMgr.getCurrentState().getAuthorizationMgr().removeInvalidObject(); ctxToTestUser(); } diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/ast/DescribeStmtTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/ast/DescribeStmtTest.java index 216fdf223660b0..26d52108a5311e 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/ast/DescribeStmtTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/ast/DescribeStmtTest.java @@ -75,7 +75,7 @@ public static void tearDown() throws Exception { String dropSQL = "drop table sales_records"; try { DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser(dropSQL, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } catch (Exception ex) { } diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/UtilsTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/UtilsTest.java index 09872154bd5561..f94eb9f594e223 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/UtilsTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/UtilsTest.java @@ -84,7 +84,7 @@ public class UtilsTest { protected static void setTableStatistics(OlapTable table, long rowCount) { for (Partition partition : table.getAllPartitions()) { - partition.getBaseIndex().setRowCount(rowCount); + partition.getDefaultPhysicalPartition().getBaseIndex().setRowCount(rowCount); } } @@ -129,7 +129,7 @@ public static void beforeClass() throws Exception { CreateDbStmt dbStmt = new CreateDbStmt(false, StatsConstants.STATISTICS_DB_NAME); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(dbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(dbStmt.getFullDbName()); } catch (DdlException e) { return; } diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rewrite/PredicateReorderRuleTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rewrite/PredicateReorderRuleTest.java index 781d5a9cb8c792..0e38d738f3d7d3 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rewrite/PredicateReorderRuleTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rewrite/PredicateReorderRuleTest.java @@ -107,7 +107,7 @@ public static void beforeClass() throws Exception { ");"); CreateDbStmt dbStmt = new CreateDbStmt(false, StatsConstants.STATISTICS_DB_NAME); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(dbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(dbStmt.getFullDbName()); } catch (DdlException e) { return; } diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rule/transformation/DistributionPrunerRuleTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rule/transformation/DistributionPrunerRuleTest.java index c2a3518b294e0f..b3e35721683599 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rule/transformation/DistributionPrunerRuleTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rule/transformation/DistributionPrunerRuleTest.java @@ -171,7 +171,7 @@ public void transform(@Mocked OlapTable olapTable, @Mocked Partition partition, partition.getSubPartitions(); result = Arrays.asList(partition); - partition.getIndex(anyLong); + partition.getDefaultPhysicalPartition().getIndex(anyLong); result = index; partition.getDistributionInfo(); diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rule/transformation/materialization/MVRewriteWithSchemaChangeTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rule/transformation/materialization/MVRewriteWithSchemaChangeTest.java index 6875211d870596..5c369fc7868597 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rule/transformation/materialization/MVRewriteWithSchemaChangeTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rule/transformation/materialization/MVRewriteWithSchemaChangeTest.java @@ -63,7 +63,8 @@ public void testSyncMVRewrite_PartitionPrune() throws Exception { " );"); String sql = "CREATE MATERIALIZED VIEW sync_mv1 AS select a, b*10 as col2, c+1 as col3 from sync_tbl_t1;"; StatementBase statementBase = UtFrameUtils.parseStmtWithNewParser(sql, connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView((CreateMaterializedViewStmt) statementBase); + GlobalStateMgr.getCurrentState().getStarRocksMetadata() + .createMaterializedView((CreateMaterializedViewStmt) statementBase); waitingRollupJobV2Finish(); String query = "select a, b*10 as col2, c+1 as col3 from sync_tbl_t1 order by a;"; String plan = getFragmentPlan(query); diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rule/transformation/materialization/MvRewritePartialPartitionTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rule/transformation/materialization/MvRewritePartialPartitionTest.java index f053c9e53ea6d4..22567c2be125c9 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rule/transformation/materialization/MvRewritePartialPartitionTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rule/transformation/materialization/MvRewritePartialPartitionTest.java @@ -740,7 +740,7 @@ public void testPartitionQueryRewriteSkipEmptyPartitions() throws Exception { " as" + " select id_date, sum(t1b) from table_with_day_partition group by id_date"); MaterializedView mv = starRocksAssert.getMv("test", "test_loose_mv"); - mv.getPartition("p19910330") + mv.getPartition("p19910330").getDefaultPhysicalPartition() .setVisibleVersion(Partition.PARTITION_INIT_VERSION, System.currentTimeMillis()); String query5 = "select id_date, sum(t1b) from table_with_day_partition" + " where id_date >= '1991-03-30' and id_date < '1991-04-03' group by id_date"; diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rule/transformation/materialization/MvRewriteTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rule/transformation/materialization/MvRewriteTest.java index 583774020a87cc..5c110c11ce42af 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rule/transformation/materialization/MvRewriteTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/rule/transformation/materialization/MvRewriteTest.java @@ -2079,7 +2079,7 @@ public void testCandidateOrdering_HierarchyAgg() throws Exception { MaterializedView mv = starRocksAssert.getMv("test", name); int mockRows = i + 1; - mv.getPartitions().forEach(p -> p.getBaseIndex().setRowCount(mockRows)); + mv.getPartitions().forEach(p -> p.getDefaultPhysicalPartition().getBaseIndex().setRowCount(mockRows)); } for (int i = 0; i < dimensions.size(); i++) { diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/statistics/CachedStatisticStorageTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/statistics/CachedStatisticStorageTest.java index 04947e3f6c1678..f3b44199db9f72 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/statistics/CachedStatisticStorageTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/statistics/CachedStatisticStorageTest.java @@ -87,7 +87,7 @@ public class CachedStatisticStorageTest { public static void createStatisticsTable() throws Exception { CreateDbStmt dbStmt = new CreateDbStmt(false, StatsConstants.STATISTICS_DB_NAME); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(dbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(dbStmt.getFullDbName()); } catch (DdlException e) { return; } diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/statistics/StatisticsCalculatorTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/statistics/StatisticsCalculatorTest.java index 7aa6087e3bfb23..987cc90772f507 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/statistics/StatisticsCalculatorTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/optimizer/statistics/StatisticsCalculatorTest.java @@ -244,7 +244,7 @@ public void testLogicalOlapTableScan() throws Exception { List partitionIds = partitions.stream().mapToLong(partition -> partition.getId()).boxed().collect(Collectors.toList()); for (Partition partition : partitions) { - partition.getBaseIndex().setRowCount(1000); + partition.getDefaultPhysicalPartition().getBaseIndex().setRowCount(1000); } List columns = table.getColumns(); @@ -349,9 +349,9 @@ public void testLogicalOlapTableEmptyPartition(@Mocked CachedStatisticStorage ca Partition partition2 = partitions.get(1); Partition partition3 = partitions.get(2); // mock one empty partition - partition1.setVisibleVersion(Partition.PARTITION_INIT_VERSION, System.currentTimeMillis()); - partition2.setVisibleVersion(2, System.currentTimeMillis()); - partition3.setVisibleVersion(2, System.currentTimeMillis()); + partition1.getDefaultPhysicalPartition().setVisibleVersion(Partition.PARTITION_INIT_VERSION, System.currentTimeMillis()); + partition2.getDefaultPhysicalPartition().setVisibleVersion(2, System.currentTimeMillis()); + partition3.getDefaultPhysicalPartition().setVisibleVersion(2, System.currentTimeMillis()); List partitionIds = partitions.stream().filter(partition -> !(partition.getName().equalsIgnoreCase("p1"))). mapToLong(Partition::getId).boxed().collect(Collectors.toList()); @@ -422,7 +422,7 @@ public void testLogicalOlapTableScanPartitionPrune1(@Mocked CachedStatisticStora List partitionIds = partitions.stream().filter(partition -> partition.getName().equalsIgnoreCase("p1")). mapToLong(Partition::getId).boxed().collect(Collectors.toList()); for (Partition partition : partitions) { - partition.getBaseIndex().setRowCount(1000); + partition.getDefaultPhysicalPartition().getBaseIndex().setRowCount(1000); } LogicalOlapScanOperator olapScanOperator = @@ -519,7 +519,7 @@ public void testLogicalOlapTableScanPartitionPrune2(@Mocked CachedStatisticStora List partitionIds = partitions.stream().filter(partition -> partition.getName().equalsIgnoreCase("p2")). mapToLong(partition -> partition.getId()).boxed().collect(Collectors.toList()); for (Partition partition : partitions) { - partition.getBaseIndex().setRowCount(1000); + partition.getDefaultPhysicalPartition().getBaseIndex().setRowCount(1000); } LogicalOlapScanOperator olapScanOperator = diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/plan/ConnectorPlanTestBase.java b/fe/fe-core/src/test/java/com/starrocks/sql/plan/ConnectorPlanTestBase.java index 6f2a3379232f62..9cf9e20c9e32b2 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/plan/ConnectorPlanTestBase.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/plan/ConnectorPlanTestBase.java @@ -88,7 +88,7 @@ public static void doInit(String warehouse) throws Exception { public static void mockAllCatalogs(ConnectContext ctx, String warehouse) throws Exception { GlobalStateMgr gsmMgr = ctx.getGlobalStateMgr(); - MockedMetadataMgr metadataMgr = new MockedMetadataMgr(gsmMgr.getLocalMetastore(), gsmMgr.getConnectorMgr()); + MockedMetadataMgr metadataMgr = new MockedMetadataMgr(gsmMgr.getStarRocksMetadata(), gsmMgr.getConnectorMgr()); gsmMgr.setMetadataMgr(metadataMgr); mockHiveCatalogImpl(metadataMgr); mockJDBCCatalogImpl(metadataMgr); @@ -104,7 +104,7 @@ public static void mockCatalog(ConnectContext ctx, String catalogName) throws Ex public static void mockCatalog(ConnectContext ctx, String catalogName, String warehouse) throws Exception { GlobalStateMgr gsmMgr = ctx.getGlobalStateMgr(); - MockedMetadataMgr metadataMgr = new MockedMetadataMgr(gsmMgr.getLocalMetastore(), gsmMgr.getConnectorMgr()); + MockedMetadataMgr metadataMgr = new MockedMetadataMgr(gsmMgr.getStarRocksMetadata(), gsmMgr.getConnectorMgr()); gsmMgr.setMetadataMgr(metadataMgr); switch (catalogName) { case MockedHiveMetadata.MOCKED_HIVE_CATALOG_NAME: @@ -150,7 +150,7 @@ public static void dropCatalog(String catalog) { public static void mockHiveCatalog(ConnectContext ctx) throws DdlException { GlobalStateMgr gsmMgr = ctx.getGlobalStateMgr(); - MockedMetadataMgr metadataMgr = new MockedMetadataMgr(gsmMgr.getLocalMetastore(), gsmMgr.getConnectorMgr()); + MockedMetadataMgr metadataMgr = new MockedMetadataMgr(gsmMgr.getStarRocksMetadata(), gsmMgr.getConnectorMgr()); gsmMgr.setMetadataMgr(metadataMgr); mockHiveCatalogImpl(metadataMgr); } diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/plan/ExternalTableTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/plan/ExternalTableTest.java index 4060dcb3881939..0b3e90066e1ced 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/plan/ExternalTableTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/plan/ExternalTableTest.java @@ -239,8 +239,9 @@ public void testJoinWithMysqlTable() throws Exception { Database db = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb("test"); OlapTable tbl = (OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), "jointest"); for (Partition partition : tbl.getPartitions()) { - partition.updateVisibleVersion(2); - for (MaterializedIndex mIndex : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { + partition.getDefaultPhysicalPartition().updateVisibleVersion(2); + for (MaterializedIndex mIndex : partition.getDefaultPhysicalPartition() + .getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) { mIndex.setRowCount(10000); for (Tablet tablet : mIndex.getTablets()) { for (Replica replica : ((LocalTablet) tablet).getImmutableReplicas()) { diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/plan/LimitTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/plan/LimitTest.java index b39d74674c0526..77b0158c8880ea 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/plan/LimitTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/plan/LimitTest.java @@ -788,7 +788,7 @@ public void testLimitPrune() throws Exception { // We need to let some tablets have data, some tablets don't data OlapTable t0 = (OlapTable) globalStateMgr.getLocalMetastore().getDb("test").getTable("t0"); - MaterializedIndex index = t0.getPartitions().stream().findFirst().get().getBaseIndex(); + MaterializedIndex index = t0.getPartitions().stream().findFirst().get().getDefaultPhysicalPartition().getBaseIndex(); LocalTablet tablets = (LocalTablet) index.getTablets().get(0); Replica replica = tablets.getSingleReplica(); new Expectations(replica) { diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/plan/PlanFragmentWithCostTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/plan/PlanFragmentWithCostTest.java index 4cf1d667314865..2ac0482d26f291 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/plan/PlanFragmentWithCostTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/plan/PlanFragmentWithCostTest.java @@ -914,7 +914,7 @@ public void testPushDownRuntimeFilterAcrossSetOperationNode() throws Exception { List tables = new ArrayList<>(Arrays.asList(t0, t1, t2)); List tabletIdsStrList = new ArrayList<>(); tables.forEach(olapTable -> tabletIdsStrList.add(Joiner.on(",") - .join(olapTable.getPartition(olapTable.getAllPartitionIds().get(0)) + .join(olapTable.getPartition(olapTable.getAllPartitionIds().get(0)).getDefaultPhysicalPartition() .getBaseIndex().getTablets().stream().map(t -> t.getId()).collect(Collectors.toList())))); ArrayList plans = new ArrayList<>(); @@ -1084,7 +1084,7 @@ public void testNotPushDownRuntimeFilterAcrossCTE() throws Exception { List tables = new ArrayList<>(Arrays.asList(t1, t2)); List tabletIdsStrList = new ArrayList<>(); tables.forEach(olapTable -> tabletIdsStrList.add(Joiner.on(",") - .join(olapTable.getPartition(olapTable.getAllPartitionIds().get(0)) + .join(olapTable.getPartition(olapTable.getAllPartitionIds().get(0)).getDefaultPhysicalPartition() .getBaseIndex().getTablets().stream().map(t -> t.getId()).collect(Collectors.toList())))); setTableStatistics(t1, 400000); diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/plan/PlanTestNoneDBBase.java b/fe/fe-core/src/test/java/com/starrocks/sql/plan/PlanTestNoneDBBase.java index 3d63995d4bef89..c9cb1b52f954ef 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/plan/PlanTestNoneDBBase.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/plan/PlanTestNoneDBBase.java @@ -245,14 +245,14 @@ public static void assertNotContains(String text, String... pattern) { public static void setTableStatistics(OlapTable table, long rowCount) { for (Partition partition : table.getAllPartitions()) { - partition.getBaseIndex().setRowCount(rowCount); + partition.getDefaultPhysicalPartition().getBaseIndex().setRowCount(rowCount); } } public static void setPartitionStatistics(OlapTable table, String partitionName, long rowCount) { for (Partition partition : table.getAllPartitions()) { if (partition.getName().equals(partitionName)) { - partition.getBaseIndex().setRowCount(rowCount); + partition.getDefaultPhysicalPartition().getBaseIndex().setRowCount(rowCount); } } } diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/plan/ScanTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/plan/ScanTest.java index a826bf7f1b038d..6931ab684e09b0 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/plan/ScanTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/plan/ScanTest.java @@ -468,7 +468,7 @@ public void testHint() throws Exception { Collection partitions = tb.getPartitions(); acquireReplica: for (Partition partition : partitions) { - MaterializedIndex index = partition.getIndex(tb.getBaseIndexId()); + MaterializedIndex index = partition.getDefaultPhysicalPartition().getIndex(tb.getBaseIndexId()); for (Tablet tablet : index.getTablets()) { replicaId = ((LocalTablet) tablet).getImmutableReplicas().get(0).getId(); break acquireReplica; diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/plan/ViewPlanTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/plan/ViewPlanTest.java index afe465df48053d..78b56121d486c6 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/plan/ViewPlanTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/plan/ViewPlanTest.java @@ -1722,7 +1722,7 @@ public void testAlter() throws Exception { AlterViewStmt alterViewStmt = (AlterViewStmt) UtFrameUtils.parseStmtWithNewParser(alterView, starRocksAssert.getCtx()); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterView(alterViewStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterView(alterViewStmt); sqlPlan = getFragmentPlan(alterStmt); viewPlan = getFragmentPlan("select * from " + viewName); diff --git a/fe/fe-core/src/test/java/com/starrocks/statistic/StatisticsCollectJobTest.java b/fe/fe-core/src/test/java/com/starrocks/statistic/StatisticsCollectJobTest.java index ed74fc726c8d48..6eddfbba4059f0 100644 --- a/fe/fe-core/src/test/java/com/starrocks/statistic/StatisticsCollectJobTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/statistic/StatisticsCollectJobTest.java @@ -98,7 +98,7 @@ public static void beforeClass() throws Exception { OlapTable t0 = (OlapTable) globalStateMgr.getLocalMetastore().getDb("test").getTable("t0_stats"); t0StatsTableId = t0.getId(); Partition partition = new ArrayList<>(t0.getPartitions()).get(0); - partition.updateVisibleVersion(2, t0UpdateTime + partition.getDefaultPhysicalPartition().updateVisibleVersion(2, t0UpdateTime .atZone(Clock.systemDefaultZone().getZone()).toEpochSecond() * 1000); setTableStatistics(t0, 20000000); @@ -115,7 +115,7 @@ public static void beforeClass() throws Exception { ");"); OlapTable t1 = (OlapTable) globalStateMgr.getLocalMetastore().getDb("test").getTable("t1_stats"); - new ArrayList<>(t1.getPartitions()).get(0).updateVisibleVersion(2); + new ArrayList<>(t1.getPartitions()).get(0).getDefaultPhysicalPartition().updateVisibleVersion(2); setTableStatistics(t1, 20000000); starRocksAssert.withTable("CREATE TABLE `t0_stats_partition` (\n" + @@ -143,7 +143,7 @@ public static void beforeClass() throws Exception { "\"in_memory\" = \"false\"\n" + ");"); OlapTable t0p = (OlapTable) globalStateMgr.getLocalMetastore().getDb("test").getTable("t0_stats_partition"); - new ArrayList<>(t0p.getPartitions()).get(0).updateVisibleVersion(2); + new ArrayList<>(t0p.getPartitions()).get(0).getDefaultPhysicalPartition().updateVisibleVersion(2); setTableStatistics(t0p, 20000000); starRocksAssert.withDatabase("stats"); @@ -161,7 +161,7 @@ public static void beforeClass() throws Exception { ");"); OlapTable tps = (OlapTable) globalStateMgr.getLocalMetastore().getDb("stats").getTable("tprimary_stats"); - new ArrayList<>(tps.getPartitions()).get(0).updateVisibleVersion(2); + new ArrayList<>(tps.getPartitions()).get(0).getDefaultPhysicalPartition().updateVisibleVersion(2); setTableStatistics(tps, 20000000); starRocksAssert.withTable("CREATE TABLE `tunique_stats` (\n" + @@ -177,7 +177,7 @@ public static void beforeClass() throws Exception { ");"); OlapTable tus = (OlapTable) globalStateMgr.getLocalMetastore().getDb("stats").getTable("tunique_stats"); - new ArrayList<>(tus.getPartitions()).get(0).updateVisibleVersion(2); + new ArrayList<>(tus.getPartitions()).get(0).getDefaultPhysicalPartition().updateVisibleVersion(2); setTableStatistics(tps, 20000000); starRocksAssert.withTable("CREATE TABLE `tcount` (\n" + @@ -192,7 +192,7 @@ public static void beforeClass() throws Exception { ");"); OlapTable tcount = (OlapTable) globalStateMgr.getLocalMetastore().getDb("stats").getTable("tcount"); - new ArrayList<>(tcount.getPartitions()).get(0).updateVisibleVersion(2); + new ArrayList<>(tcount.getPartitions()).get(0).getDefaultPhysicalPartition().updateVisibleVersion(2); setTableStatistics(tcount, 20000000); String createStructTableSql = "CREATE TABLE struct_a(\n" + @@ -207,7 +207,7 @@ public static void beforeClass() throws Exception { ");"; starRocksAssert.withTable(createStructTableSql); OlapTable structTable = (OlapTable) globalStateMgr.getLocalMetastore().getDb("stats").getTable("struct_a"); - new ArrayList<>(structTable.getPartitions()).get(0).updateVisibleVersion(2); + new ArrayList<>(structTable.getPartitions()).get(0).getDefaultPhysicalPartition().updateVisibleVersion(2); setTableStatistics(structTable, 20000000); } @@ -876,8 +876,8 @@ public void testFullStatisticsBuildCollectSQLList() { .getLocalMetastore().getDb("test").getTable("t0_stats_partition"); int i = 1; for (Partition p : t0p.getAllPartitions()) { - p.updateVisibleVersion(2); - p.getBaseIndex().setRowCount(i * 100L); + p.getDefaultPhysicalPartition().updateVisibleVersion(2); + p.getDefaultPhysicalPartition().getBaseIndex().setRowCount(i * 100L); i++; } @@ -909,8 +909,8 @@ public void testFullStatisticsBuildCollectSQLList() { Assert.assertEquals(4, collectSqlList.size()); for (Partition p : t0p.getAllPartitions()) { - p.updateVisibleVersion(2); - p.getBaseIndex().setRowCount(0); + p.getDefaultPhysicalPartition().updateVisibleVersion(2); + p.getDefaultPhysicalPartition().getBaseIndex().setRowCount(0); } collectSqlList = collectJob.buildCollectSQLList(1); diff --git a/fe/fe-core/src/test/java/com/starrocks/statistic/StatisticsExecutorTest.java b/fe/fe-core/src/test/java/com/starrocks/statistic/StatisticsExecutorTest.java index ee9d6fecae224e..7148f50fada696 100644 --- a/fe/fe-core/src/test/java/com/starrocks/statistic/StatisticsExecutorTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/statistic/StatisticsExecutorTest.java @@ -73,7 +73,8 @@ public static void beforeClass() throws Exception { OlapTable t0 = (OlapTable) globalStateMgr.getLocalMetastore().getDb("test").getTable("t0_stats"); Partition partition = new ArrayList<>(t0.getPartitions()).get(0); - partition.updateVisibleVersion(2, LocalDateTime.of(2022, 1, 1, 1, 1, 1) + partition.getDefaultPhysicalPartition() + .updateVisibleVersion(2, LocalDateTime.of(2022, 1, 1, 1, 1, 1) .atZone(Clock.systemDefaultZone().getZone()).toEpochSecond() * 1000); setTableStatistics(t0, 20000000); } diff --git a/fe/fe-core/src/test/java/com/starrocks/system/SystemInfoServiceTest.java b/fe/fe-core/src/test/java/com/starrocks/system/SystemInfoServiceTest.java index e66624c2e58ca7..5dccf313ffc67f 100644 --- a/fe/fe-core/src/test/java/com/starrocks/system/SystemInfoServiceTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/system/SystemInfoServiceTest.java @@ -17,9 +17,9 @@ import com.google.api.client.util.Maps; import com.starrocks.common.DdlException; import com.starrocks.common.Pair; +import com.starrocks.meta.StarRocksMetadata; import com.starrocks.persist.EditLog; import com.starrocks.server.GlobalStateMgr; -import com.starrocks.server.LocalMetastore; import com.starrocks.server.RunMode; import com.starrocks.service.FrontendOptions; import com.starrocks.sql.analyzer.AlterSystemStmtAnalyzer; @@ -178,7 +178,7 @@ public RunMode getCurrentRunMode() { Backend be = new Backend(10001, "newHost", 1000); service.addBackend(be); - LocalMetastore localMetastore = new LocalMetastore(globalStateMgr, null, null); + StarRocksMetadata starRocksMetadata = new StarRocksMetadata(); new Expectations() { { @@ -186,9 +186,9 @@ public RunMode getCurrentRunMode() { minTimes = 0; result = be; - globalStateMgr.getLocalMetastore(); + globalStateMgr.getStarRocksMetadata(); minTimes = 0; - result = localMetastore; + result = starRocksMetadata; } }; @@ -211,16 +211,16 @@ public RunMode getCurrentRunMode() { Backend be = new Backend(10001, "newHost", 1000); be.setStarletPort(1001); - LocalMetastore localMetastore = new LocalMetastore(globalStateMgr, null, null); + StarRocksMetadata starRocksMetadata = new StarRocksMetadata(); new Expectations() { { service.getBackendWithHeartbeatPort("newHost", 1000); minTimes = 0; result = be; - globalStateMgr.getLocalMetastore(); + globalStateMgr.getStarRocksMetadata(); minTimes = 0; - result = localMetastore; + result = starRocksMetadata; } }; diff --git a/fe/fe-core/src/test/java/com/starrocks/task/AgentTaskTest.java b/fe/fe-core/src/test/java/com/starrocks/task/AgentTaskTest.java index ca41fc62b4a4bf..86b6bc74051199 100644 --- a/fe/fe-core/src/test/java/com/starrocks/task/AgentTaskTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/task/AgentTaskTest.java @@ -313,8 +313,7 @@ public void failedAgentTaskTest() { @Test public void testBackendNoAlive() { - LocalMetastore localMetastore = new LocalMetastore(GlobalStateMgr.getCurrentState(), - null, null); + LocalMetastore localMetastore = new LocalMetastore(GlobalStateMgr.getCurrentState()); List tasks = new ArrayList<>(); tasks.add((CreateReplicaTask) createReplicaTask); @@ -337,8 +336,7 @@ public ComputeNode getBackendOrComputeNode(long backendId) { } }; - LocalMetastore localMetastore = new LocalMetastore(GlobalStateMgr.getCurrentState(), - null, null); + LocalMetastore localMetastore = new LocalMetastore(GlobalStateMgr.getCurrentState()); List tasks = new ArrayList<>(); tasks.add((CreateReplicaTask) createReplicaTask); diff --git a/fe/fe-core/src/test/java/com/starrocks/transaction/GlobalTransactionMgrTest.java b/fe/fe-core/src/test/java/com/starrocks/transaction/GlobalTransactionMgrTest.java index 9bc3051327df9d..975f5a1ba5569d 100644 --- a/fe/fe-core/src/test/java/com/starrocks/transaction/GlobalTransactionMgrTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/transaction/GlobalTransactionMgrTest.java @@ -116,14 +116,14 @@ public class GlobalTransactionMgrTest { private static GlobalStateMgr slaveGlobalStateMgr; private TransactionState.TxnCoordinator transactionSource = - new TransactionState.TxnCoordinator(TransactionState.TxnSourceType.FE, "localfe"); + new TransactionState.TxnCoordinator(TransactionState.TxnSourceType.FE, "localfe"); private TransactionState.TxnCoordinator transactionSourceBe = - new TransactionState.TxnCoordinator(TransactionState.TxnSourceType.BE, "localbe"); + new TransactionState.TxnCoordinator(TransactionState.TxnSourceType.BE, "localbe"); @Before public void setUp() throws InstantiationException, IllegalAccessException, IllegalArgumentException, - InvocationTargetException, NoSuchMethodException, SecurityException { + InvocationTargetException, NoSuchMethodException, SecurityException { fakeEditLog = new FakeEditLog(); fakeGlobalStateMgr = new FakeGlobalStateMgr(); fakeTransactionIDGenerator = new FakeTransactionIDGenerator(); @@ -143,15 +143,15 @@ public void tearDown() { @Test public void testBeginTransaction() throws LabelAlreadyUsedException, AnalysisException, - RunningTxnExceedException, DuplicatedRequestException { + RunningTxnExceedException, DuplicatedRequestException { FakeGlobalStateMgr.setGlobalStateMgr(masterGlobalStateMgr); long transactionId = masterTransMgr - .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), - GlobalStateMgrTestUtil.testTxnLable1, - transactionSource, - LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); + .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), + GlobalStateMgrTestUtil.testTxnLable1, + transactionSource, + LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); TransactionState transactionState = - masterTransMgr.getTransactionState(GlobalStateMgrTestUtil.testDbId1, transactionId); + masterTransMgr.getTransactionState(GlobalStateMgrTestUtil.testDbId1, transactionId); assertNotNull(transactionState); assertEquals(transactionId, transactionState.getTransactionId()); assertEquals(TransactionStatus.PREPARE, transactionState.getTransactionStatus()); @@ -161,21 +161,21 @@ public void testBeginTransaction() throws LabelAlreadyUsedException, AnalysisExc @Test public void testBeginTransactionWithSameLabel() throws LabelAlreadyUsedException, AnalysisException, - RunningTxnExceedException, DuplicatedRequestException { + RunningTxnExceedException, DuplicatedRequestException { FakeGlobalStateMgr.setGlobalStateMgr(masterGlobalStateMgr); long transactionId = 0; try { transactionId = masterTransMgr - .beginTransaction(GlobalStateMgrTestUtil.testDbId1, - Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), - GlobalStateMgrTestUtil.testTxnLable1, - transactionSource, - LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); + .beginTransaction(GlobalStateMgrTestUtil.testDbId1, + Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), + GlobalStateMgrTestUtil.testTxnLable1, + transactionSource, + LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); } catch (AnalysisException | LabelAlreadyUsedException e) { e.printStackTrace(); } TransactionState transactionState = - masterTransMgr.getTransactionState(GlobalStateMgrTestUtil.testDbId1, transactionId); + masterTransMgr.getTransactionState(GlobalStateMgrTestUtil.testDbId1, transactionId); assertNotNull(transactionState); assertEquals(transactionId, transactionState.getTransactionId()); assertEquals(TransactionStatus.PREPARE, transactionState.getTransactionStatus()); @@ -184,11 +184,11 @@ public void testBeginTransactionWithSameLabel() throws LabelAlreadyUsedException try { transactionId = masterTransMgr - .beginTransaction(GlobalStateMgrTestUtil.testDbId1, - Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), - GlobalStateMgrTestUtil.testTxnLable1, - transactionSource, - LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); + .beginTransaction(GlobalStateMgrTestUtil.testDbId1, + Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), + GlobalStateMgrTestUtil.testTxnLable1, + transactionSource, + LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); } catch (Exception e) { // TODO: handle exception } @@ -199,36 +199,36 @@ public void testBeginTransactionWithSameLabel() throws LabelAlreadyUsedException public void testCommitTransaction1() throws UserException { FakeGlobalStateMgr.setGlobalStateMgr(masterGlobalStateMgr); long transactionId = masterTransMgr - .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), - GlobalStateMgrTestUtil.testTxnLable1, - transactionSource, - LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); + .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), + GlobalStateMgrTestUtil.testTxnLable1, + transactionSource, + LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); // commit a transaction TabletCommitInfo tabletCommitInfo1 = new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, - GlobalStateMgrTestUtil.testBackendId1); + GlobalStateMgrTestUtil.testBackendId1); TabletCommitInfo tabletCommitInfo2 = new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, - GlobalStateMgrTestUtil.testBackendId2); + GlobalStateMgrTestUtil.testBackendId2); TabletCommitInfo tabletCommitInfo3 = new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, - GlobalStateMgrTestUtil.testBackendId3); + GlobalStateMgrTestUtil.testBackendId3); List transTablets = Lists.newArrayList(); transTablets.add(tabletCommitInfo1); transTablets.add(tabletCommitInfo2); transTablets.add(tabletCommitInfo3); masterTransMgr.commitTransaction(GlobalStateMgrTestUtil.testDbId1, transactionId, transTablets, - Lists.newArrayList(), null); + Lists.newArrayList(), null); TransactionState transactionState = fakeEditLog.getTransaction(transactionId); // check status is committed assertEquals(TransactionStatus.COMMITTED, transactionState.getTransactionStatus()); // check replica version Partition testPartition = masterGlobalStateMgr.getLocalMetastore() - .getTable(GlobalStateMgrTestUtil.testDbId1, GlobalStateMgrTestUtil.testTableId1) - .getPartition(GlobalStateMgrTestUtil.testPartition1); + .getTable(GlobalStateMgrTestUtil.testDbId1, GlobalStateMgrTestUtil.testTableId1) + .getPartition(GlobalStateMgrTestUtil.testPartition1); // check partition version - assertEquals(GlobalStateMgrTestUtil.testStartVersion, testPartition.getVisibleVersion()); - assertEquals(GlobalStateMgrTestUtil.testStartVersion + 2, testPartition.getNextVersion()); + assertEquals(GlobalStateMgrTestUtil.testStartVersion, testPartition.getDefaultPhysicalPartition().getVisibleVersion()); + assertEquals(GlobalStateMgrTestUtil.testStartVersion + 2, testPartition.getDefaultPhysicalPartition().getNextVersion()); // check partition next version - LocalTablet tablet = (LocalTablet) testPartition.getIndex(GlobalStateMgrTestUtil.testIndexId1) - .getTablet(GlobalStateMgrTestUtil.testTabletId1); + LocalTablet tablet = (LocalTablet) testPartition.getDefaultPhysicalPartition() + .getIndex(GlobalStateMgrTestUtil.testIndexId1).getTablet(GlobalStateMgrTestUtil.testTabletId1); for (Replica replica : tablet.getImmutableReplicas()) { assertEquals(GlobalStateMgrTestUtil.testStartVersion, replica.getVersion()); } @@ -244,20 +244,20 @@ public void testCommitTransactionWithOneFailed() throws UserException { TransactionState transactionState = null; FakeGlobalStateMgr.setGlobalStateMgr(masterGlobalStateMgr); long transactionId = masterTransMgr - .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), - GlobalStateMgrTestUtil.testTxnLable1, - transactionSource, - LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); + .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), + GlobalStateMgrTestUtil.testTxnLable1, + transactionSource, + LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); // commit a transaction with 1,2 success TabletCommitInfo tabletCommitInfo1 = new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, - GlobalStateMgrTestUtil.testBackendId1); + GlobalStateMgrTestUtil.testBackendId1); TabletCommitInfo tabletCommitInfo2 = new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, - GlobalStateMgrTestUtil.testBackendId2); + GlobalStateMgrTestUtil.testBackendId2); List transTablets = Lists.newArrayList(); transTablets.add(tabletCommitInfo1); transTablets.add(tabletCommitInfo2); masterTransMgr.commitTransaction(GlobalStateMgrTestUtil.testDbId1, transactionId, transTablets, - Lists.newArrayList(), null); + Lists.newArrayList(), null); // follower globalStateMgr replay the transaction transactionState = fakeEditLog.getTransaction(transactionId); @@ -268,19 +268,19 @@ public void testCommitTransactionWithOneFailed() throws UserException { FakeGlobalStateMgr.setGlobalStateMgr(masterGlobalStateMgr); // commit another transaction with 1,3 success long transactionId2 = masterTransMgr - .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), - GlobalStateMgrTestUtil.testTxnLable2, - transactionSource, - LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); + .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), + GlobalStateMgrTestUtil.testTxnLable2, + transactionSource, + LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); tabletCommitInfo1 = new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, GlobalStateMgrTestUtil.testBackendId1); TabletCommitInfo tabletCommitInfo3 = new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, - GlobalStateMgrTestUtil.testBackendId3); + GlobalStateMgrTestUtil.testBackendId3); transTablets = Lists.newArrayList(); transTablets.add(tabletCommitInfo1); transTablets.add(tabletCommitInfo3); try { masterTransMgr.commitTransaction(GlobalStateMgrTestUtil.testDbId1, transactionId2, transTablets, - Lists.newArrayList(), null); + Lists.newArrayList(), null); Assert.fail(); } catch (TabletQuorumFailedException e) { transactionState = masterTransMgr.getTransactionState(GlobalStateMgrTestUtil.testDbId1, transactionId2); @@ -289,15 +289,16 @@ public void testCommitTransactionWithOneFailed() throws UserException { } // check replica version Partition testPartition = - masterGlobalStateMgr.getLocalMetastore() - .getTable(GlobalStateMgrTestUtil.testDbId1, GlobalStateMgrTestUtil.testTableId1) - .getPartition(GlobalStateMgrTestUtil.testPartition1); + masterGlobalStateMgr.getLocalMetastore() + .getTable(GlobalStateMgrTestUtil.testDbId1, GlobalStateMgrTestUtil.testTableId1) + .getPartition(GlobalStateMgrTestUtil.testPartition1); // check partition version - assertEquals(GlobalStateMgrTestUtil.testStartVersion, testPartition.getVisibleVersion()); - assertEquals(GlobalStateMgrTestUtil.testStartVersion + 2, testPartition.getNextVersion()); + assertEquals(GlobalStateMgrTestUtil.testStartVersion, testPartition.getDefaultPhysicalPartition().getVisibleVersion()); + assertEquals(GlobalStateMgrTestUtil.testStartVersion + 2, testPartition.getDefaultPhysicalPartition().getNextVersion()); // check partition next version - LocalTablet tablet = (LocalTablet) testPartition.getIndex(GlobalStateMgrTestUtil.testIndexId1) - .getTablet(GlobalStateMgrTestUtil.testTabletId1); + LocalTablet tablet = (LocalTablet) testPartition.getDefaultPhysicalPartition() + .getIndex(GlobalStateMgrTestUtil.testIndexId1) + .getTablet(GlobalStateMgrTestUtil.testTabletId1); for (Replica replica : tablet.getImmutableReplicas()) { assertEquals(GlobalStateMgrTestUtil.testStartVersion, replica.getVersion()); } @@ -313,20 +314,20 @@ public void testCommitTransactionWithOneFailed() throws UserException { transTablets.add(tabletCommitInfo2); transTablets.add(tabletCommitInfo3); masterTransMgr.commitTransaction(GlobalStateMgrTestUtil.testDbId1, transactionId2, transTablets, - Lists.newArrayList(), null); + Lists.newArrayList(), null); transactionState = fakeEditLog.getTransaction(transactionId2); // check status is commit assertEquals(TransactionStatus.COMMITTED, transactionState.getTransactionStatus()); // check replica version testPartition = masterGlobalStateMgr.getLocalMetastore() - .getTable(GlobalStateMgrTestUtil.testDbId1, GlobalStateMgrTestUtil.testTableId1) - .getPartition(GlobalStateMgrTestUtil.testPartition1); + .getTable(GlobalStateMgrTestUtil.testDbId1, GlobalStateMgrTestUtil.testTableId1) + .getPartition(GlobalStateMgrTestUtil.testPartition1); // check partition version - assertEquals(GlobalStateMgrTestUtil.testStartVersion, testPartition.getVisibleVersion()); - assertEquals(GlobalStateMgrTestUtil.testStartVersion + 3, testPartition.getNextVersion()); + assertEquals(GlobalStateMgrTestUtil.testStartVersion, testPartition.getDefaultPhysicalPartition().getVisibleVersion()); + assertEquals(GlobalStateMgrTestUtil.testStartVersion + 3, testPartition.getDefaultPhysicalPartition().getNextVersion()); // check partition next version - tablet = (LocalTablet) testPartition.getIndex(GlobalStateMgrTestUtil.testIndexId1) - .getTablet(GlobalStateMgrTestUtil.testTabletId1); + tablet = (LocalTablet) testPartition.getDefaultPhysicalPartition().getIndex(GlobalStateMgrTestUtil.testIndexId1) + .getTablet(GlobalStateMgrTestUtil.testTabletId1); for (Replica replica : tablet.getImmutableReplicas()) { assertEquals(GlobalStateMgrTestUtil.testStartVersion, replica.getVersion()); } @@ -345,8 +346,8 @@ public void testCommitTransactionWithOneFailed() throws UserException { assertEquals(GlobalStateMgrTestUtil.testStartVersion, replcia2.getLastSuccessVersion()); assertEquals(GlobalStateMgrTestUtil.testStartVersion, replcia3.getLastSuccessVersion()); // check partition version - assertEquals(GlobalStateMgrTestUtil.testStartVersion, testPartition.getVisibleVersion()); - assertEquals(GlobalStateMgrTestUtil.testStartVersion + 3, testPartition.getNextVersion()); + assertEquals(GlobalStateMgrTestUtil.testStartVersion, testPartition.getDefaultPhysicalPartition().getVisibleVersion()); + assertEquals(GlobalStateMgrTestUtil.testStartVersion + 3, testPartition.getDefaultPhysicalPartition().getNextVersion()); transactionState = fakeEditLog.getTransaction(transactionId2); FakeGlobalStateMgr.setGlobalStateMgr(slaveGlobalStateMgr); @@ -357,35 +358,35 @@ public void testCommitTransactionWithOneFailed() throws UserException { @Test public void testCommitRoutineLoadTransaction(@Injectable TabletCommitInfo tabletCommitInfo, @Mocked EditLog editLog) - throws UserException { + throws UserException { FakeGlobalStateMgr.setGlobalStateMgr(masterGlobalStateMgr); TabletCommitInfo tabletCommitInfo1 = - new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, GlobalStateMgrTestUtil.testBackendId1); + new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, GlobalStateMgrTestUtil.testBackendId1); TabletCommitInfo tabletCommitInfo2 = - new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, GlobalStateMgrTestUtil.testBackendId2); + new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, GlobalStateMgrTestUtil.testBackendId2); TabletCommitInfo tabletCommitInfo3 = - new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, GlobalStateMgrTestUtil.testBackendId3); + new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, GlobalStateMgrTestUtil.testBackendId3); List transTablets = Lists.newArrayList(); transTablets.add(tabletCommitInfo1); transTablets.add(tabletCommitInfo2); transTablets.add(tabletCommitInfo3); KafkaRoutineLoadJob routineLoadJob = new KafkaRoutineLoadJob(1L, "test", 1L, 1L, "host:port", - "topic"); + "topic"); List routineLoadTaskInfoList = - Deencapsulation.getField(routineLoadJob, "routineLoadTaskInfoList"); + Deencapsulation.getField(routineLoadJob, "routineLoadTaskInfoList"); Map partitionIdToOffset = Maps.newHashMap(); partitionIdToOffset.put(1, 0L); KafkaTaskInfo routineLoadTaskInfo = - new KafkaTaskInfo(UUID.randomUUID(), routineLoadJob, 20000, System.currentTimeMillis(), - partitionIdToOffset, Config.routine_load_task_timeout_second); + new KafkaTaskInfo(UUID.randomUUID(), routineLoadJob, 20000, System.currentTimeMillis(), + partitionIdToOffset, Config.routine_load_task_timeout_second); Deencapsulation.setField(routineLoadTaskInfo, "txnId", 1L); routineLoadTaskInfoList.add(routineLoadTaskInfo); TransactionState transactionState = new TransactionState(1L, Lists.newArrayList(1L), 1L, "label", null, - LoadJobSourceType.ROUTINE_LOAD_TASK, new TxnCoordinator(TxnSourceType.BE, "be1"), - routineLoadJob.getId(), - Config.stream_load_default_timeout_second); + LoadJobSourceType.ROUTINE_LOAD_TASK, new TxnCoordinator(TxnSourceType.BE, "be1"), + routineLoadJob.getId(), + Config.stream_load_default_timeout_second); transactionState.setTransactionStatus(TransactionStatus.PREPARE); masterTransMgr.getCallbackFactory().addCallback(routineLoadJob); // Deencapsulation.setField(transactionState, "txnStateChangeListener", routineLoadJob); @@ -433,9 +434,9 @@ LoadJobSourceType.ROUTINE_LOAD_TASK, new TxnCoordinator(TxnSourceType.BE, "be1") routineLoadManager.addRoutineLoadJob(routineLoadJob, "db"); Deencapsulation.setField(masterTransMgr.getDatabaseTransactionMgr(GlobalStateMgrTestUtil.testDbId1), - "idToRunningTransactionState", idToTransactionState); + "idToRunningTransactionState", idToTransactionState); masterTransMgr.commitTransaction(1L, 1L, transTablets, Lists.newArrayList(), - txnCommitAttachment); + txnCommitAttachment); Assert.assertEquals(Long.valueOf(101), Deencapsulation.getField(routineLoadJob, "currentTotalRows")); Assert.assertEquals(Long.valueOf(1), Deencapsulation.getField(routineLoadJob, "currentErrorRows")); @@ -452,31 +453,31 @@ public void testCommitRoutineLoadTransactionWithErrorMax(@Injectable TabletCommi FakeGlobalStateMgr.setGlobalStateMgr(masterGlobalStateMgr); TabletCommitInfo tabletCommitInfo1 = - new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, GlobalStateMgrTestUtil.testBackendId1); + new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, GlobalStateMgrTestUtil.testBackendId1); TabletCommitInfo tabletCommitInfo2 = - new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, GlobalStateMgrTestUtil.testBackendId2); + new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, GlobalStateMgrTestUtil.testBackendId2); TabletCommitInfo tabletCommitInfo3 = - new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, GlobalStateMgrTestUtil.testBackendId3); + new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, GlobalStateMgrTestUtil.testBackendId3); List transTablets = Lists.newArrayList(); transTablets.add(tabletCommitInfo1); transTablets.add(tabletCommitInfo2); transTablets.add(tabletCommitInfo3); KafkaRoutineLoadJob routineLoadJob = - new KafkaRoutineLoadJob(1L, "test", 1L, 1L, "host:port", "topic"); + new KafkaRoutineLoadJob(1L, "test", 1L, 1L, "host:port", "topic"); List routineLoadTaskInfoList = - Deencapsulation.getField(routineLoadJob, "routineLoadTaskInfoList"); + Deencapsulation.getField(routineLoadJob, "routineLoadTaskInfoList"); Map partitionIdToOffset = Maps.newHashMap(); partitionIdToOffset.put(1, 0L); KafkaTaskInfo routineLoadTaskInfo = - new KafkaTaskInfo(UUID.randomUUID(), routineLoadJob, 20000, System.currentTimeMillis(), - partitionIdToOffset, Config.routine_load_task_timeout_second); + new KafkaTaskInfo(UUID.randomUUID(), routineLoadJob, 20000, System.currentTimeMillis(), + partitionIdToOffset, Config.routine_load_task_timeout_second); Deencapsulation.setField(routineLoadTaskInfo, "txnId", 1L); routineLoadTaskInfoList.add(routineLoadTaskInfo); TransactionState transactionState = new TransactionState(1L, Lists.newArrayList(1L), 1L, "label", null, - LoadJobSourceType.ROUTINE_LOAD_TASK, new TxnCoordinator(TxnSourceType.BE, "be1"), - routineLoadJob.getId(), - Config.stream_load_default_timeout_second); + LoadJobSourceType.ROUTINE_LOAD_TASK, new TxnCoordinator(TxnSourceType.BE, "be1"), + routineLoadJob.getId(), + Config.stream_load_default_timeout_second); transactionState.setTransactionStatus(TransactionStatus.PREPARE); masterTransMgr.getCallbackFactory().addCallback(routineLoadJob); Map idToTransactionState = Maps.newHashMap(); @@ -523,14 +524,14 @@ LoadJobSourceType.ROUTINE_LOAD_TASK, new TxnCoordinator(TxnSourceType.BE, "be1") routineLoadManager.addRoutineLoadJob(routineLoadJob, "db"); Deencapsulation.setField(masterTransMgr.getDatabaseTransactionMgr(GlobalStateMgrTestUtil.testDbId1), - "idToRunningTransactionState", idToTransactionState); + "idToRunningTransactionState", idToTransactionState); masterTransMgr.commitTransaction(1L, 1L, transTablets, Lists.newArrayList(), - txnCommitAttachment); + txnCommitAttachment); Assert.assertEquals(Long.valueOf(0), Deencapsulation.getField(routineLoadJob, "currentTotalRows")); Assert.assertEquals(Long.valueOf(0), Deencapsulation.getField(routineLoadJob, "currentErrorRows")); Assert.assertEquals(Long.valueOf(111L), - ((KafkaProgress) routineLoadJob.getProgress()).getOffsetByPartition(1)); + ((KafkaProgress) routineLoadJob.getProgress()).getOffsetByPartition(1)); // todo(ml): change to assert queue // Assert.assertEquals(0, routineLoadManager.getNeedScheduleTasksQueue().size()); Assert.assertEquals(RoutineLoadJob.JobState.PAUSED, routineLoadJob.getState()); @@ -569,12 +570,15 @@ public void testFinishTransaction() throws UserException { // check replica version Partition testPartition = masterGlobalStateMgr.getLocalMetastore() .getDb(GlobalStateMgrTestUtil.testDbId1).getTable(GlobalStateMgrTestUtil.testTableId1) - .getPartition(GlobalStateMgrTestUtil.testPartition1); + .getPartition(GlobalStateMgrTestUtil.testPartition1); // check partition version - assertEquals(GlobalStateMgrTestUtil.testStartVersion + 1, testPartition.getVisibleVersion()); - assertEquals(GlobalStateMgrTestUtil.testStartVersion + 2, testPartition.getNextVersion()); + assertEquals(GlobalStateMgrTestUtil.testStartVersion + 1, + testPartition.getDefaultPhysicalPartition().getVisibleVersion()); + assertEquals(GlobalStateMgrTestUtil.testStartVersion + 2, + testPartition.getDefaultPhysicalPartition().getNextVersion()); // check partition next version - LocalTablet tablet = (LocalTablet) testPartition.getIndex(GlobalStateMgrTestUtil.testIndexId1) + LocalTablet tablet = (LocalTablet) testPartition.getDefaultPhysicalPartition() + .getIndex(GlobalStateMgrTestUtil.testIndexId1) .getTablet(GlobalStateMgrTestUtil.testTabletId1); for (Replica replica : tablet.getImmutableReplicas()) { if (replica.getId() == GlobalStateMgrTestUtil.testReplicaId1) { @@ -593,27 +597,28 @@ public void testFinishTransaction() throws UserException { public void testFinishTransactionWithOneFailed() throws UserException { TransactionState transactionState = null; Partition testPartition = - masterGlobalStateMgr.getLocalMetastore() - .getTable(GlobalStateMgrTestUtil.testDbId1, GlobalStateMgrTestUtil.testTableId1) - .getPartition(GlobalStateMgrTestUtil.testPartition1); - LocalTablet tablet = (LocalTablet) testPartition.getIndex(GlobalStateMgrTestUtil.testIndexId1) - .getTablet(GlobalStateMgrTestUtil.testTabletId1); + masterGlobalStateMgr.getLocalMetastore() + .getTable(GlobalStateMgrTestUtil.testDbId1, GlobalStateMgrTestUtil.testTableId1) + .getPartition(GlobalStateMgrTestUtil.testPartition1); + LocalTablet tablet = (LocalTablet) testPartition.getDefaultPhysicalPartition() + .getIndex(GlobalStateMgrTestUtil.testIndexId1) + .getTablet(GlobalStateMgrTestUtil.testTabletId1); FakeGlobalStateMgr.setGlobalStateMgr(masterGlobalStateMgr); long transactionId = masterTransMgr - .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), - GlobalStateMgrTestUtil.testTxnLable1, - transactionSource, - LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); + .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), + GlobalStateMgrTestUtil.testTxnLable1, + transactionSource, + LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); // commit a transaction with 1,2 success TabletCommitInfo tabletCommitInfo1 = new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, - GlobalStateMgrTestUtil.testBackendId1); + GlobalStateMgrTestUtil.testBackendId1); TabletCommitInfo tabletCommitInfo2 = new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, - GlobalStateMgrTestUtil.testBackendId2); + GlobalStateMgrTestUtil.testBackendId2); List transTablets = Lists.newArrayList(); transTablets.add(tabletCommitInfo1); transTablets.add(tabletCommitInfo2); masterTransMgr.commitTransaction(GlobalStateMgrTestUtil.testDbId1, transactionId, transTablets, - Lists.newArrayList(), null); + Lists.newArrayList(), null); // follower globalStateMgr replay the transaction transactionState = fakeEditLog.getTransaction(transactionId); @@ -661,19 +666,19 @@ public void testFinishTransactionWithOneFailed() throws UserException { FakeGlobalStateMgr.setGlobalStateMgr(masterGlobalStateMgr); // commit another transaction with 1,3 success long transactionId2 = masterTransMgr - .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), - GlobalStateMgrTestUtil.testTxnLable2, - transactionSource, - LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); + .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), + GlobalStateMgrTestUtil.testTxnLable2, + transactionSource, + LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); tabletCommitInfo1 = new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, GlobalStateMgrTestUtil.testBackendId1); TabletCommitInfo tabletCommitInfo3 = new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, - GlobalStateMgrTestUtil.testBackendId3); + GlobalStateMgrTestUtil.testBackendId3); transTablets = Lists.newArrayList(); transTablets.add(tabletCommitInfo1); transTablets.add(tabletCommitInfo3); try { masterTransMgr.commitTransaction(GlobalStateMgrTestUtil.testDbId1, transactionId2, transTablets, - Lists.newArrayList(), null); + Lists.newArrayList(), null); Assert.fail(); } catch (TabletQuorumFailedException e) { transactionState = masterTransMgr.getTransactionState(GlobalStateMgrTestUtil.testDbId1, transactionId2); @@ -690,17 +695,19 @@ public void testFinishTransactionWithOneFailed() throws UserException { transTablets.add(tabletCommitInfo2); transTablets.add(tabletCommitInfo3); masterTransMgr.commitTransaction(GlobalStateMgrTestUtil.testDbId1, transactionId2, transTablets, - Lists.newArrayList(), null); + Lists.newArrayList(), null); transactionState = fakeEditLog.getTransaction(transactionId2); // check status is commit assertEquals(TransactionStatus.COMMITTED, transactionState.getTransactionStatus()); // check replica version testPartition = masterGlobalStateMgr.getLocalMetastore() - .getTable(GlobalStateMgrTestUtil.testDbId1, GlobalStateMgrTestUtil.testTableId1) - .getPartition(GlobalStateMgrTestUtil.testPartition1); + .getTable(GlobalStateMgrTestUtil.testDbId1, GlobalStateMgrTestUtil.testTableId1) + .getPartition(GlobalStateMgrTestUtil.testPartition1); // check partition version - assertEquals(GlobalStateMgrTestUtil.testStartVersion + 1, testPartition.getVisibleVersion()); - assertEquals(GlobalStateMgrTestUtil.testStartVersion + 3, testPartition.getNextVersion()); + assertEquals(GlobalStateMgrTestUtil.testStartVersion + 1, + testPartition.getDefaultPhysicalPartition().getVisibleVersion()); + assertEquals(GlobalStateMgrTestUtil.testStartVersion + 3, + testPartition.getDefaultPhysicalPartition().getNextVersion()); // follower globalStateMgr replay the transaction transactionState = fakeEditLog.getTransaction(transactionId2); @@ -726,8 +733,10 @@ public void testFinishTransactionWithOneFailed() throws UserException { assertEquals(GlobalStateMgrTestUtil.testStartVersion + 2, replcia2.getLastSuccessVersion()); assertEquals(GlobalStateMgrTestUtil.testStartVersion + 2, replcia3.getLastSuccessVersion()); // check partition version - assertEquals(GlobalStateMgrTestUtil.testStartVersion + 2, testPartition.getVisibleVersion()); - assertEquals(GlobalStateMgrTestUtil.testStartVersion + 3, testPartition.getNextVersion()); + assertEquals(GlobalStateMgrTestUtil.testStartVersion + 2, + testPartition.getDefaultPhysicalPartition().getVisibleVersion()); + assertEquals(GlobalStateMgrTestUtil.testStartVersion + 3, + testPartition.getDefaultPhysicalPartition().getNextVersion()); transactionState = fakeEditLog.getTransaction(transactionId2); FakeGlobalStateMgr.setGlobalStateMgr(slaveGlobalStateMgr); @@ -743,8 +752,9 @@ public void replayWithExpiredJob() throws Exception { // 1. replay a normal finished transaction TransactionState state = - new TransactionState(dbId, new ArrayList<>(), 1, "label_a", null, LoadJobSourceType.BACKEND_STREAMING, - transactionSource, -1, -1); + new TransactionState(dbId, new ArrayList<>(), 1, "label_a", null, + LoadJobSourceType.BACKEND_STREAMING, + transactionSource, -1, -1); state.setTransactionStatus(TransactionStatus.ABORTED); state.setReason("fake reason"); state.setFinishTime(System.currentTimeMillis() - 2000); @@ -753,8 +763,8 @@ public void replayWithExpiredJob() throws Exception { // 2. replay a expired transaction TransactionState state2 = - new TransactionState(dbId, new ArrayList<>(), 2, "label_b", null, LoadJobSourceType.BACKEND_STREAMING, - transactionSource, -1, -1); + new TransactionState(dbId, new ArrayList<>(), 2, "label_b", null, LoadJobSourceType.BACKEND_STREAMING, + transactionSource, -1, -1); state2.setTransactionStatus(TransactionStatus.ABORTED); state2.setReason("fake reason"); state2.setFinishTime(System.currentTimeMillis()); @@ -764,8 +774,8 @@ public void replayWithExpiredJob() throws Exception { Thread.sleep(2000); // 3. replay a valid transaction, let state expire TransactionState state3 = - new TransactionState(dbId, new ArrayList<>(), 3, "label_c", null, LoadJobSourceType.BACKEND_STREAMING, - transactionSource, -1, -1); + new TransactionState(dbId, new ArrayList<>(), 3, "label_c", null, LoadJobSourceType.BACKEND_STREAMING, + transactionSource, -1, -1); state3.setTransactionStatus(TransactionStatus.ABORTED); state3.setReason("fake reason"); state3.setFinishTime(System.currentTimeMillis()); @@ -799,30 +809,30 @@ public void testPrepareTransaction() throws UserException { FakeGlobalStateMgr.setGlobalStateMgr(masterGlobalStateMgr); long transactionId = masterTransMgr - .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), - GlobalStateMgrTestUtil.testTxnLable1, - transactionSource, - LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); + .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), + GlobalStateMgrTestUtil.testTxnLable1, + transactionSource, + LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); // commit a transaction TabletCommitInfo tabletCommitInfo1 = new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, - GlobalStateMgrTestUtil.testBackendId1); + GlobalStateMgrTestUtil.testBackendId1); TabletCommitInfo tabletCommitInfo2 = new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, - GlobalStateMgrTestUtil.testBackendId2); + GlobalStateMgrTestUtil.testBackendId2); TabletCommitInfo tabletCommitInfo3 = new TabletCommitInfo(GlobalStateMgrTestUtil.testTabletId1, - GlobalStateMgrTestUtil.testBackendId3); + GlobalStateMgrTestUtil.testBackendId3); List transTablets = Lists.newArrayList(); transTablets.add(tabletCommitInfo1); transTablets.add(tabletCommitInfo2); transTablets.add(tabletCommitInfo3); masterTransMgr.prepareTransaction(GlobalStateMgrTestUtil.testDbId1, transactionId, transTablets, - Lists.newArrayList(), null); + Lists.newArrayList(), null); TransactionState transactionState = fakeEditLog.getTransaction(transactionId); assertEquals(TransactionStatus.PREPARED, transactionState.getTransactionStatus()); try { masterTransMgr.commitPreparedTransaction( - masterGlobalStateMgr.getLocalMetastore().getDb(GlobalStateMgrTestUtil.testDbId1), transactionId, - (long) 1000); + masterGlobalStateMgr.getLocalMetastore().getDb(GlobalStateMgrTestUtil.testDbId1), transactionId, + (long) 1000); Assert.fail("should throw publish timeout exception"); } catch (UserException e) { } @@ -837,15 +847,18 @@ public void testPrepareTransaction() throws UserException { assertEquals(TransactionStatus.VISIBLE, transactionState.getTransactionStatus()); // check replica version Partition testPartition = - masterGlobalStateMgr.getLocalMetastore() - .getTable(GlobalStateMgrTestUtil.testDbId1, GlobalStateMgrTestUtil.testTableId1) - .getPartition(GlobalStateMgrTestUtil.testPartition1); + masterGlobalStateMgr.getLocalMetastore() + .getTable(GlobalStateMgrTestUtil.testDbId1, GlobalStateMgrTestUtil.testTableId1) + .getPartition(GlobalStateMgrTestUtil.testPartition1); // check partition version - assertEquals(GlobalStateMgrTestUtil.testStartVersion + 1, testPartition.getVisibleVersion()); - assertEquals(GlobalStateMgrTestUtil.testStartVersion + 2, testPartition.getNextVersion()); + assertEquals(GlobalStateMgrTestUtil.testStartVersion + 1, + testPartition.getDefaultPhysicalPartition().getVisibleVersion()); + assertEquals(GlobalStateMgrTestUtil.testStartVersion + 2, + testPartition.getDefaultPhysicalPartition().getNextVersion()); // check partition next version - LocalTablet tablet = (LocalTablet) testPartition.getIndex(GlobalStateMgrTestUtil.testIndexId1) - .getTablet(GlobalStateMgrTestUtil.testTabletId1); + LocalTablet tablet = (LocalTablet) testPartition.getDefaultPhysicalPartition() + .getIndex(GlobalStateMgrTestUtil.testIndexId1) + .getTablet(GlobalStateMgrTestUtil.testTabletId1); for (Replica replica : tablet.getImmutableReplicas()) { if (replica.getId() == GlobalStateMgrTestUtil.testReplicaId1) { assertEquals(GlobalStateMgrTestUtil.testStartVersion, replica.getVersion()); @@ -862,10 +875,10 @@ public void testPrepareTransaction() throws UserException { @Test public void testSaveLoadJsonFormatImage() throws Exception { long transactionId = masterTransMgr - .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), - GlobalStateMgrTestUtil.testTxnLable1, - transactionSource, - LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); + .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), + GlobalStateMgrTestUtil.testTxnLable1, + transactionSource, + LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); UtFrameUtils.PseudoImage pseudoImage = new UtFrameUtils.PseudoImage(); masterTransMgr.saveTransactionStateV2(pseudoImage.getImageWriter()); @@ -881,7 +894,7 @@ public void testSaveLoadJsonFormatImage() throws Exception { @Test public void testRetryCommitOnRateLimitExceededTimeout() - throws UserException { + throws UserException { Database db = new Database(10, "db0"); GlobalTransactionMgr globalTransactionMgr = spy(new GlobalTransactionMgr(GlobalStateMgr.getCurrentState())); DatabaseTransactionMgr dbTransactionMgr = spy(new DatabaseTransactionMgr(10L, GlobalStateMgr.getCurrentState())); @@ -891,15 +904,15 @@ public void testRetryCommitOnRateLimitExceededTimeout() doReturn(transactionState).when(globalTransactionMgr).getTransactionState(db.getId(), 1001); doReturn(dbTransactionMgr).when(globalTransactionMgr).getDatabaseTransactionMgr(db.getId()); doThrow(new CommitRateExceededException(1001, now + 60 * 1000L)) - .when(dbTransactionMgr) - .commitTransaction(1001L, Collections.emptyList(), Collections.emptyList(), null); + .when(dbTransactionMgr) + .commitTransaction(1001L, Collections.emptyList(), Collections.emptyList(), null); Assert.assertThrows(CommitRateExceededException.class, () -> globalTransactionMgr.commitAndPublishTransaction(db, 1001, - Collections.emptyList(), Collections.emptyList(), 10, null)); + Collections.emptyList(), Collections.emptyList(), 10, null)); } @Test public void testPublishVersionTimeout() - throws UserException, LockTimeoutException { + throws UserException, LockTimeoutException { Database db = new Database(10, "db0"); GlobalTransactionMgr globalTransactionMgr = spy(new GlobalTransactionMgr(GlobalStateMgr.getCurrentState())); DatabaseTransactionMgr dbTransactionMgr = spy(new DatabaseTransactionMgr(10L, GlobalStateMgr.getCurrentState())); @@ -909,52 +922,52 @@ public void testPublishVersionTimeout() doReturn(dbTransactionMgr).when(globalTransactionMgr).getDatabaseTransactionMgr(db.getId()); doReturn(transactionState).when(globalTransactionMgr).getTransactionState(db.getId(), 1001); doReturn(new VisibleStateWaiter(new TransactionState())) - .when(dbTransactionMgr) - .commitTransaction(1001L, Collections.emptyList(), Collections.emptyList(), null); + .when(dbTransactionMgr) + .commitTransaction(1001L, Collections.emptyList(), Collections.emptyList(), null); Assert.assertFalse(globalTransactionMgr.commitAndPublishTransaction(db, 1001, - Collections.emptyList(), Collections.emptyList(), 2, null)); + Collections.emptyList(), Collections.emptyList(), 2, null)); } @Test public void testRetryCommitOnRateLimitExceededThrowUnexpectedException() - throws UserException { + throws UserException { Database db = new Database(10, "db0"); GlobalTransactionMgr globalTransactionMgr = spy(new GlobalTransactionMgr(GlobalStateMgr.getCurrentState())); DatabaseTransactionMgr dbTransactionMgr = spy(new DatabaseTransactionMgr(10L, GlobalStateMgr.getCurrentState())); doReturn(dbTransactionMgr).when(globalTransactionMgr).getDatabaseTransactionMgr(db.getId()); doThrow(NullPointerException.class) - .when(dbTransactionMgr) - .commitTransaction(1001L, Collections.emptyList(), Collections.emptyList(), null); + .when(dbTransactionMgr) + .commitTransaction(1001L, Collections.emptyList(), Collections.emptyList(), null); Assert.assertThrows(UserException.class, () -> globalTransactionMgr.commitAndPublishTransaction(db, 1001, - Collections.emptyList(), Collections.emptyList(), 10, null)); + Collections.emptyList(), Collections.emptyList(), 10, null)); } @Test public void testRetryCommitOnRateLimitExceededThrowLockTimeoutException() - throws UserException, LockTimeoutException { + throws UserException, LockTimeoutException { Database db = new Database(10L, "db0"); GlobalTransactionMgr globalTransactionMgr = spy(new GlobalTransactionMgr(GlobalStateMgr.getCurrentState())); TransactionState transactionState = new TransactionState(); doReturn(transactionState) - .when(globalTransactionMgr) - .getTransactionState(10L, 1001L); + .when(globalTransactionMgr) + .getTransactionState(10L, 1001L); doThrow(LockTimeoutException.class) - .when(globalTransactionMgr) - .retryCommitOnRateLimitExceeded(db, 1001L, Collections.emptyList(), Collections.emptyList(), null, 10L); + .when(globalTransactionMgr) + .retryCommitOnRateLimitExceeded(db, 1001L, Collections.emptyList(), Collections.emptyList(), null, 10L); Assert.assertThrows(LockTimeoutException.class, () -> globalTransactionMgr.commitAndPublishTransaction(db, 1001L, - Collections.emptyList(), Collections.emptyList(), 10L, null)); + Collections.emptyList(), Collections.emptyList(), 10L, null)); } @Test public void testGetTransactionNumByCoordinateBe() throws LabelAlreadyUsedException, AnalysisException, - RunningTxnExceedException, DuplicatedRequestException { + RunningTxnExceedException, DuplicatedRequestException { masterTransMgr - .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), - GlobalStateMgrTestUtil.testTxnLable1, - transactionSourceBe, - LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); + .beginTransaction(GlobalStateMgrTestUtil.testDbId1, Lists.newArrayList(GlobalStateMgrTestUtil.testTableId1), + GlobalStateMgrTestUtil.testTxnLable1, + transactionSourceBe, + LoadJobSourceType.FRONTEND, Config.stream_load_default_timeout_second); long res = masterTransMgr.getTransactionNumByCoordinateBe("localbe"); assertEquals(1, res); } @@ -1006,16 +1019,16 @@ public void testCommitLockTimeout() throws UserException, LockTimeoutException { Database db = new Database(10L, "db0"); GlobalTransactionMgr globalTransactionMgr = spy(new GlobalTransactionMgr(GlobalStateMgr.getCurrentState())); doThrow(LockTimeoutException.class) - .when(globalTransactionMgr) - .commitAndPublishTransaction(db, 1001L, Collections.emptyList(), Collections.emptyList(), 10L, null); + .when(globalTransactionMgr) + .commitAndPublishTransaction(db, 1001L, Collections.emptyList(), Collections.emptyList(), 10L, null); Assert.assertThrows(ErrorReportException.class, () -> globalTransactionMgr.commitAndPublishTransaction(db, 1001L, - Collections.emptyList(), Collections.emptyList(), 10L)); + Collections.emptyList(), Collections.emptyList(), 10L)); } @Test public void testCheckValidTimeoutSecond() { ExceptionChecker.expectThrowsWithMsg(AnalysisException.class, - "Invalid timeout: '1'. Expected values should be between 2 and 3 seconds", - () -> GlobalTransactionMgr.checkValidTimeoutSecond(1, 3, 2)); + "Invalid timeout: '1'. Expected values should be between 2 and 3 seconds", + () -> GlobalTransactionMgr.checkValidTimeoutSecond(1, 3, 2)); } } diff --git a/fe/fe-core/src/test/java/com/starrocks/transaction/LakePublishBatchTest.java b/fe/fe-core/src/test/java/com/starrocks/transaction/LakePublishBatchTest.java index de9f838446e430..0dfb21c8dc0bd6 100644 --- a/fe/fe-core/src/test/java/com/starrocks/transaction/LakePublishBatchTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/transaction/LakePublishBatchTest.java @@ -98,7 +98,7 @@ public void testNormal() throws Exception { int num = 0; for (Partition partition : table.getPartitions()) { - MaterializedIndex baseIndex = partition.getBaseIndex(); + MaterializedIndex baseIndex = partition.getDefaultPhysicalPartition().getBaseIndex(); for (Long tabletId : baseIndex.getTabletIdsInOrder()) { for (Long backendId : GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getBackendIds()) { TabletCommitInfo tabletCommitInfo = new TabletCommitInfo(tabletId, backendId); @@ -165,7 +165,7 @@ public void testPublishTransactionState() throws Exception { List transTablets = Lists.newArrayList(); for (Partition partition : table.getPartitions()) { - MaterializedIndex baseIndex = partition.getBaseIndex(); + MaterializedIndex baseIndex = partition.getDefaultPhysicalPartition().getBaseIndex(); for (Long tabletId : baseIndex.getTabletIdsInOrder()) { for (Long backendId : GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getBackendIds()) { TabletCommitInfo tabletCommitInfo = new TabletCommitInfo(tabletId, backendId); @@ -197,7 +197,7 @@ public void testPublishDbDroped() throws Exception { Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), TABLE); List transTablets = Lists.newArrayList(); for (Partition partition : table.getPartitions()) { - MaterializedIndex baseIndex = partition.getBaseIndex(); + MaterializedIndex baseIndex = partition.getDefaultPhysicalPartition().getBaseIndex(); for (Long tabletId : baseIndex.getTabletIdsInOrder()) { for (Long backendId : GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getBackendIds()) { TabletCommitInfo tabletCommitInfo = new TabletCommitInfo(tabletId, backendId); @@ -253,7 +253,7 @@ public void testPublishTableDropped() throws Exception { Table table = GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), TABLE); List transTablets = Lists.newArrayList(); for (Partition partition : table.getPartitions()) { - MaterializedIndex baseIndex = partition.getBaseIndex(); + MaterializedIndex baseIndex = partition.getDefaultPhysicalPartition().getBaseIndex(); for (Long tabletId : baseIndex.getTabletIdsInOrder()) { for (Long backendId : GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getBackendIds()) { TabletCommitInfo tabletCommitInfo = new TabletCommitInfo(tabletId, backendId); @@ -312,7 +312,7 @@ public void testTransformBatchToSingle() throws Exception { int num = 0; for (Partition partition : table.getPartitions()) { - MaterializedIndex baseIndex = partition.getBaseIndex(); + MaterializedIndex baseIndex = partition.getDefaultPhysicalPartition().getBaseIndex(); for (Long tabletId : baseIndex.getTabletIdsInOrder()) { for (Long backendId : GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getBackendIds()) { TabletCommitInfo tabletCommitInfo = new TabletCommitInfo(tabletId, backendId); diff --git a/fe/fe-core/src/test/java/com/starrocks/transaction/LakeTableTxnLogApplierTest.java b/fe/fe-core/src/test/java/com/starrocks/transaction/LakeTableTxnLogApplierTest.java index a3da2b240bd729..58d27805758be9 100644 --- a/fe/fe-core/src/test/java/com/starrocks/transaction/LakeTableTxnLogApplierTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/transaction/LakeTableTxnLogApplierTest.java @@ -30,15 +30,16 @@ public void testCommitAndApply() { tableCommitInfo.addPartitionCommitInfo(partitionCommitInfo); applier.applyCommitLog(state, tableCommitInfo); - Assert.assertEquals(1, table.getPartition(partitionId).getVisibleVersion()); - Assert.assertEquals(3, table.getPartition(partitionId).getNextVersion()); + Assert.assertEquals(1, table.getPartition(partitionId).getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(3, table.getPartition(partitionId).getDefaultPhysicalPartition().getNextVersion()); state.setTransactionStatus(TransactionStatus.VISIBLE); partitionCommitInfo.setVersionTime(System.currentTimeMillis()); applier.applyVisibleLog(state, tableCommitInfo, /*unused*/null); - Assert.assertEquals(2, table.getPartition(partitionId).getVisibleVersion()); - Assert.assertEquals(3, table.getPartition(partitionId).getNextVersion()); - Assert.assertEquals(partitionCommitInfo.getVersionTime(), table.getPartition(partitionId).getVisibleVersionTime()); + Assert.assertEquals(2, table.getPartition(partitionId).getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(3, table.getPartition(partitionId).getDefaultPhysicalPartition().getNextVersion()); + Assert.assertEquals(partitionCommitInfo.getVersionTime(), table.getPartition(partitionId).getDefaultPhysicalPartition() + .getVisibleVersionTime()); } @Test @@ -51,13 +52,13 @@ public void testApplyCommitLogWithDroppedPartition() { tableCommitInfo.addPartitionCommitInfo(partitionCommitInfo); applier.applyCommitLog(state, tableCommitInfo); - Assert.assertEquals(1, table.getPartition(partitionId).getVisibleVersion()); - Assert.assertEquals(2, table.getPartition(partitionId).getNextVersion()); + Assert.assertEquals(1, table.getPartition(partitionId).getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(2, table.getPartition(partitionId).getDefaultPhysicalPartition().getNextVersion()); state.setTransactionStatus(TransactionStatus.VISIBLE); partitionCommitInfo.setVersionTime(System.currentTimeMillis()); applier.applyVisibleLog(state, tableCommitInfo, /*unused*/null); - Assert.assertEquals(1, table.getPartition(partitionId).getVisibleVersion()); - Assert.assertEquals(2, table.getPartition(partitionId).getNextVersion()); + Assert.assertEquals(1, table.getPartition(partitionId).getDefaultPhysicalPartition().getVisibleVersion()); + Assert.assertEquals(2, table.getPartition(partitionId).getDefaultPhysicalPartition().getNextVersion()); } } diff --git a/fe/fe-core/src/test/java/com/starrocks/utframe/StarRocksAssert.java b/fe/fe-core/src/test/java/com/starrocks/utframe/StarRocksAssert.java index ba7b8345128312..ba15cd94b6373b 100644 --- a/fe/fe-core/src/test/java/com/starrocks/utframe/StarRocksAssert.java +++ b/fe/fe-core/src/test/java/com/starrocks/utframe/StarRocksAssert.java @@ -174,7 +174,7 @@ public StarRocksAssert withDatabase(String dbName) throws Exception { DropDbStmt dropDbStmt = (DropDbStmt) UtFrameUtils.parseStmtWithNewParser("drop database if exists `" + dbName + "`;", ctx); try { - GlobalStateMgr.getCurrentState().getLocalMetastore().dropDb(dropDbStmt.getDbName(), dropDbStmt.isForceDrop()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropDb(dropDbStmt.getDbName(), dropDbStmt.isForceDrop()); } catch (MetaNotFoundException e) { if (!dropDbStmt.isSetIfExists()) { ErrorReport.reportDdlException(ErrorCode.ERR_DB_DROP_EXISTS, dbName); @@ -183,7 +183,7 @@ public StarRocksAssert withDatabase(String dbName) throws Exception { CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser("create database `" + dbName + "`;", ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); return this; } @@ -192,7 +192,7 @@ public StarRocksAssert createDatabaseIfNotExists(String dbName) throws Exception CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseStmtWithNewParser("create database if not exists `" + dbName + "`;", ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(createDbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(createDbStmt.getFullDbName()); } catch (AlreadyExistsException e) { // ignore } @@ -216,7 +216,7 @@ public StarRocksAssert withUser(String user) throws Exception { public StarRocksAssert withDatabaseWithoutAnalyze(String dbName) throws Exception { CreateDbStmt dbStmt = new CreateDbStmt(false, dbName); - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(dbStmt.getFullDbName()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(dbStmt.getFullDbName()); return this; } @@ -339,7 +339,7 @@ public static void utCreateTableWithRetry(CreateTableStmt createTableStmt, Conne createTableStmtCopied = (CreateTableStmt) UtFrameUtils.parseStmtWithNewParser( createTableStmt.getOrigStmt().originStmt, ctx); } - GlobalStateMgr.getCurrentState().getLocalMetastore().createTable(createTableStmtCopied); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createTable(createTableStmtCopied); }, "Create Table", 3); } @@ -350,7 +350,7 @@ public static void utDropTableWithRetry(DropTableStmt dropTableStmt, ConnectCont dropTableStmtCopied = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser( dropTableStmt.getOrigStmt().originStmt, ctx); } - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmtCopied); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmtCopied); }, "Drop Table", 3); } @@ -571,13 +571,13 @@ public StarRocksAssert useTable(String table) { } public Table getTable(String dbName, String tableName) { - return ctx.getGlobalStateMgr().getLocalMetastore().mayGetDb(dbName) + return ctx.getGlobalStateMgr().getStarRocksMetadata().mayGetDb(dbName) .map(db -> GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), tableName)) .orElse(null); } public MaterializedView getMv(String dbName, String tableName) { - return (MaterializedView) ctx.getGlobalStateMgr().getLocalMetastore() + return (MaterializedView) ctx.getGlobalStateMgr().getStarRocksMetadata() .mayGetDb(dbName) .map(db -> GlobalStateMgr.getCurrentState().getLocalMetastore().getTable(db.getFullName(), tableName)) .orElse(null); @@ -591,7 +591,7 @@ public StarRocksAssert withSingleReplicaTable(String sql) throws Exception { } CreateTableStmt createTableStmt = (CreateTableStmt) statementBase; createTableStmt.getProperties().put(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM, "1"); - GlobalStateMgr.getCurrentState().getLocalMetastore().createTable(createTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createTable(createTableStmt); } catch (Exception e) { LOG.warn("create table failed, sql:{}", sql, e); throw e; @@ -621,7 +621,7 @@ public void withAsyncMv( if (isOnlySingleReplica) { createMaterializedViewStatement.getProperties().put(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM, "1"); } - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createMaterializedViewStatement); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createMaterializedViewStatement); if (isRefresh) { new MockUp() { @Mock @@ -631,12 +631,12 @@ public void handleDMLStmt(ExecPlan execPlan, DmlStmt stmt) throws Exception { TableName tableName = insertStmt.getTableName(); Database testDb = GlobalStateMgr.getCurrentState().getLocalMetastore().getDb(stmt.getTableName().getDb()); OlapTable tbl = ((OlapTable) GlobalStateMgr.getCurrentState().getLocalMetastore() - .getTable(testDb.getFullName(), tableName.getTbl())); + .getTable(testDb.getFullName(), tableName.getTbl())); for (Partition partition : tbl.getPartitions()) { if (insertStmt.getTargetPartitionIds().contains(partition.getId())) { - long version = partition.getVisibleVersion() + 1; - partition.setVisibleVersion(version, System.currentTimeMillis()); - MaterializedIndex baseIndex = partition.getBaseIndex(); + long version = partition.getDefaultPhysicalPartition().getVisibleVersion() + 1; + partition.getDefaultPhysicalPartition().setVisibleVersion(version, System.currentTimeMillis()); + MaterializedIndex baseIndex = partition.getDefaultPhysicalPartition().getBaseIndex(); List tablets = baseIndex.getTablets(); for (Tablet tablet : tablets) { List replicas = ((LocalTablet) tablet).getImmutableReplicas(); @@ -658,7 +658,7 @@ public void handleDMLStmt(ExecPlan execPlan, DmlStmt stmt) throws Exception { public StarRocksAssert withView(String sql) throws Exception { CreateViewStmt createTableStmt = (CreateViewStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().createView(createTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createView(createTableStmt); return this; } @@ -679,7 +679,7 @@ public StarRocksAssert withView(String sql, ExceptionRunnable action) throws Exc public StarRocksAssert dropView(String viewName) throws Exception { DropTableStmt dropViewStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser("drop view " + viewName + ";", ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropViewStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropViewStmt); return this; } @@ -693,13 +693,13 @@ public StarRocksAssert dropCatalog(String catalogName) throws Exception { public StarRocksAssert dropDatabase(String dbName) throws Exception { DropDbStmt dropDbStmt = (DropDbStmt) UtFrameUtils.parseStmtWithNewParser("drop database " + dbName + ";", ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropDb(dropDbStmt.getDbName(), dropDbStmt.isForceDrop()); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropDb(dropDbStmt.getDbName(), dropDbStmt.isForceDrop()); return this; } public StarRocksAssert alterMvProperties(String sql) throws Exception { AlterMaterializedViewStmt alterMvStmt = (AlterMaterializedViewStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterMaterializedView(alterMvStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterMaterializedView(alterMvStmt); return this; } @@ -708,14 +708,14 @@ public StarRocksAssert alterTableProperties(String sql) throws Exception { Assert.assertFalse(alterTableStmt.getAlterClauseList().isEmpty()); Assert.assertTrue(alterTableStmt.getAlterClauseList().get(0) instanceof ModifyTablePropertiesClause); Analyzer.analyze(alterTableStmt, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(ctx, alterTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(ctx, alterTableStmt); return this; } public StarRocksAssert alterTable(String sql) throws Exception { AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); Analyzer.analyze(alterTableStmt, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(ctx, alterTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(ctx, alterTableStmt); return this; } @@ -729,7 +729,7 @@ public StarRocksAssert dropTables(List tableNames) throws Exception { public StarRocksAssert dropTable(String tableName) throws Exception { DropTableStmt dropTableStmt = (DropTableStmt) UtFrameUtils.parseStmtWithNewParser("drop table " + tableName + ";", ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); return this; } @@ -744,7 +744,7 @@ public StarRocksAssert dropTemporaryTable(String tableName, boolean ifExists) th public StarRocksAssert dropMaterializedView(String materializedViewName) throws Exception { DropMaterializedViewStmt dropMaterializedViewStmt = (DropMaterializedViewStmt) UtFrameUtils. parseStmtWithNewParser("drop materialized view if exists " + materializedViewName + ";", ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropMaterializedView(dropMaterializedViewStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropMaterializedView(dropMaterializedViewStmt); return this; } @@ -883,7 +883,7 @@ public StarRocksAssert withMaterializedView(String sql, if (isOnlySingleReplica) { createMaterializedViewStmt.getProperties().put(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM, "1"); } - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createMaterializedViewStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createMaterializedViewStmt); checkAlterJob(); } else { Preconditions.checkState(stmt instanceof CreateMaterializedViewStatement); @@ -891,7 +891,7 @@ public StarRocksAssert withMaterializedView(String sql, if (isOnlySingleReplica) { createMaterializedViewStatement.getProperties().put(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM, "1"); } - GlobalStateMgr.getCurrentState().getLocalMetastore().createMaterializedView(createMaterializedViewStatement); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createMaterializedView(createMaterializedViewStatement); if (isRefresh) { String mvName = createMaterializedViewStatement.getTableName().getTbl(); refreshMvPartition(String.format("refresh materialized view %s", mvName)); @@ -1014,7 +1014,7 @@ public void updateTablePartitionVersion(String dbName, String tableName, long ve // Add rollup public StarRocksAssert withRollup(String sql) throws Exception { AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseStmtWithNewParser(sql, ctx); - GlobalStateMgr.getCurrentState().getLocalMetastore().alterTable(ctx, alterTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().alterTable(ctx, alterTableStmt); checkAlterJob(); return this; } diff --git a/fe/fe-core/src/test/java/com/starrocks/utframe/TestWithFeService.java b/fe/fe-core/src/test/java/com/starrocks/utframe/TestWithFeService.java index 9f07bddd520b0c..2e263f518e816b 100644 --- a/fe/fe-core/src/test/java/com/starrocks/utframe/TestWithFeService.java +++ b/fe/fe-core/src/test/java/com/starrocks/utframe/TestWithFeService.java @@ -169,7 +169,7 @@ protected void cleanStarrocksFeDir() { } public void createDatabase(String db) throws Exception { - GlobalStateMgr.getCurrentState().getLocalMetastore().createDb(db); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().createDb(db); } public void useDatabase(String dbName) { @@ -189,7 +189,7 @@ public void createTable(String sql) throws Exception { public void dropTable(String table, boolean force) throws Exception { DropTableStmt dropTableStmt = (DropTableStmt) parseAndAnalyzeStmt( "drop table " + table + (force ? " force" : "") + ";", connectContext); - GlobalStateMgr.getCurrentState().getLocalMetastore().dropTable(dropTableStmt); + GlobalStateMgr.getCurrentState().getStarRocksMetadata().dropTable(dropTableStmt); } public void createTables(String... sqls) throws Exception { diff --git a/fe/fe-core/src/test/java/com/starrocks/utframe/UtFrameUtils.java b/fe/fe-core/src/test/java/com/starrocks/utframe/UtFrameUtils.java index 8f4e75af332dfd..7f4fefabd49695 100644 --- a/fe/fe-core/src/test/java/com/starrocks/utframe/UtFrameUtils.java +++ b/fe/fe-core/src/test/java/com/starrocks/utframe/UtFrameUtils.java @@ -702,7 +702,7 @@ private static String initMockEnv(ConnectContext connectContext, QueryDumpInfo r // mock replay external table info if (!replayDumpInfo.getHmsTableMap().isEmpty()) { ReplayMetadataMgr replayMetadataMgr = new ReplayMetadataMgr( - GlobalStateMgr.getCurrentState().getLocalMetastore(), + GlobalStateMgr.getCurrentState().getStarRocksMetadata(), GlobalStateMgr.getCurrentState().getConnectorMgr(), GlobalStateMgr.getCurrentState().getResourceMgr(), replayDumpInfo.getHmsTableMap(), @@ -1229,8 +1229,8 @@ public static boolean matchPlanWithoutId(String expect, String actual) { } public static void setPartitionVersion(Partition partition, long version) { - partition.setVisibleVersion(version, System.currentTimeMillis()); - MaterializedIndex baseIndex = partition.getBaseIndex(); + partition.getDefaultPhysicalPartition().setVisibleVersion(version, System.currentTimeMillis()); + MaterializedIndex baseIndex = partition.getDefaultPhysicalPartition().getBaseIndex(); List tablets = baseIndex.getTablets(); for (Tablet tablet : tablets) { List replicas = ((LocalTablet) tablet).getImmutableReplicas(); @@ -1344,7 +1344,7 @@ public void handleDMLStmt(ExecPlan execPlan, DmlStmt stmt) throws Exception { if (tbl != null) { for (Long partitionId : insertStmt.getTargetPartitionIds()) { Partition partition = tbl.getPartition(partitionId); - setPartitionVersion(partition, partition.getVisibleVersion() + 1); + setPartitionVersion(partition, partition.getDefaultPhysicalPartition().getVisibleVersion() + 1); } } } else if (stmt instanceof DeleteStmt) {