From 8e5738610abbc60654f5ef32f229c117bca34251 Mon Sep 17 00:00:00 2001 From: zhangyuan Date: Wed, 25 Dec 2024 16:51:54 +0800 Subject: [PATCH] [feature](backup) backup and restore privileges catalogs workloadgroup (#44905) --- .../org/apache/doris/nereids/DorisParser.g4 | 12 +- fe/fe-core/src/main/cup/sql_parser.cup | 28 + .../doris/analysis/AbstractBackupStmt.java | 16 +- .../org/apache/doris/analysis/BackupStmt.java | 80 +- .../doris/analysis/CancelBackupStmt.java | 6 + .../apache/doris/analysis/RestoreStmt.java | 80 +- .../apache/doris/analysis/ShowBackupStmt.java | 6 + .../doris/analysis/ShowRestoreStmt.java | 6 + .../doris/backup/BackupCatalogMeta.java | 76 ++ .../apache/doris/backup/BackupGlobalInfo.java | 141 ++++ .../apache/doris/backup/BackupHandler.java | 25 +- .../org/apache/doris/backup/BackupJob.java | 26 +- .../apache/doris/backup/BackupJobInfo.java | 75 +- .../org/apache/doris/backup/RestoreJob.java | 347 ++++++++- .../apache/doris/blockrule/SqlBlockRule.java | 11 + .../doris/blockrule/SqlBlockRuleMgr.java | 24 + .../apache/doris/datasource/CatalogMgr.java | 21 + .../apache/doris/mysql/privilege/Auth.java | 113 ++- .../doris/mysql/privilege/PasswordPolicy.java | 31 +- .../privilege/PasswordPolicyManager.java | 5 +- .../apache/doris/mysql/privilege/Role.java | 34 +- .../apache/doris/mysql/privilege/User.java | 12 + .../doris/mysql/privilege/UserProperty.java | 12 + .../mysql/privilege/UserPropertyMgr.java | 4 + .../mysql/privilege/UserRoleManager.java | 4 + .../org/apache/doris/policy/PolicyMgr.java | 14 + .../resource/workloadgroup/WorkloadGroup.java | 12 + .../workloadgroup/WorkloadGroupMgr.java | 18 + .../doris/backup/BackupHandlerTest.java | 2 +- .../apache/doris/backup/BackupJobTest.java | 12 +- .../apache/doris/backup/RestoreJobTest.java | 4 +- .../test_backup_restore_priv.out | 36 + .../test_backup_restore_priv.groovy | 694 ++++++++++++++++++ 33 files changed, 1944 insertions(+), 43 deletions(-) create mode 100644 fe/fe-core/src/main/java/org/apache/doris/backup/BackupCatalogMeta.java create mode 100644 fe/fe-core/src/main/java/org/apache/doris/backup/BackupGlobalInfo.java create mode 100644 regression-test/data/backup_restore/test_backup_restore_priv.out create mode 100644 regression-test/suites/backup_restore/test_backup_restore_priv.groovy diff --git a/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisParser.g4 b/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisParser.g4 index f5730bddd549ee..4dc608583a12e1 100644 --- a/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisParser.g4 +++ b/fe/fe-core/src/main/antlr4/org/apache/doris/nereids/DorisParser.g4 @@ -305,10 +305,10 @@ unsupportedOtherStatement | WARM UP (CLUSTER | COMPUTE GROUP) destination=identifier WITH ((CLUSTER | COMPUTE GROUP) source=identifier | (warmUpItem (AND warmUpItem)*)) FORCE? #warmUpCluster - | BACKUP SNAPSHOT label=multipartIdentifier TO repo=identifier + | BACKUP GLOBAL? SNAPSHOT label=multipartIdentifier TO repo=identifier ((ON | EXCLUDE) LEFT_PAREN baseTableRef (COMMA baseTableRef)* RIGHT_PAREN)? properties=propertyClause? #backup - | RESTORE SNAPSHOT label=multipartIdentifier FROM repo=identifier + | RESTORE GLOBAL? SNAPSHOT label=multipartIdentifier FROM repo=identifier ((ON | EXCLUDE) LEFT_PAREN baseTableRef (COMMA baseTableRef)* RIGHT_PAREN)? properties=propertyClause? #restore | START TRANSACTION (WITH CONSISTENT SNAPSHOT)? #unsupportedStartTransaction @@ -359,8 +359,8 @@ unsupportedShowStatement wildWhere? sortClause? limitClause? #showTabletsFromTable | SHOW PROPERTY (FOR user=identifierOrText)? wildWhere? #showUserProperties | SHOW ALL PROPERTIES wildWhere? #showAllProperties - | SHOW BACKUP ((FROM | IN) database=multipartIdentifier)? wildWhere? #showBackup - | SHOW BRIEF? RESTORE ((FROM | IN) database=multipartIdentifier)? wildWhere? #showRestore + | SHOW GLOBAL? BACKUP ((FROM | IN) database=multipartIdentifier)? wildWhere? #showBackup + | SHOW GLOBAL? BRIEF? RESTORE ((FROM | IN) database=multipartIdentifier)? wildWhere? #showRestore | SHOW RESOURCES wildWhere? sortClause? limitClause? #showResources | SHOW WORKLOAD GROUPS wildWhere? #showWorkloadGroups | SHOW SNAPSHOT ON repo=identifier wildWhere? #showSnapshot @@ -496,8 +496,8 @@ unsupportedCancelStatement (COMMA jobIds+=INTEGER_VALUE)* RIGHT_PAREN)? #cancelBuildIndex | CANCEL DECOMMISSION BACKEND hostPorts+=STRING_LITERAL (COMMA hostPorts+=STRING_LITERAL)* #cancelDecommisionBackend - | CANCEL BACKUP ((FROM | IN) database=identifier)? #cancelBackup - | CANCEL RESTORE ((FROM | IN) database=identifier)? #cancelRestore + | CANCEL GLOBAL? BACKUP ((FROM | IN) database=identifier)? #cancelBackup + | CANCEL GLOBAL? RESTORE ((FROM | IN) database=identifier)? #cancelRestore ; supportedAdminStatement diff --git a/fe/fe-core/src/main/cup/sql_parser.cup b/fe/fe-core/src/main/cup/sql_parser.cup index 7046a71c63bdee..7a07c23f572adb 100644 --- a/fe/fe-core/src/main/cup/sql_parser.cup +++ b/fe/fe-core/src/main/cup/sql_parser.cup @@ -4550,10 +4550,18 @@ show_param ::= {: RESULT = new ShowBackupStmt(db, parser.where); :} + | KW_GLOBAL KW_BACKUP opt_wild_where + {: + RESULT = new ShowBackupStmt(parser.where); + :} | KW_RESTORE opt_db:db opt_wild_where {: RESULT = new ShowRestoreStmt(db, parser.where); :} + | KW_GLOBAL KW_RESTORE opt_wild_where + {: + RESULT = new ShowRestoreStmt(parser.where); + :} | KW_BRIEF KW_RESTORE opt_db:db opt_wild_where {: RESULT = new ShowRestoreStmt(db, parser.where, true); @@ -5069,10 +5077,18 @@ cancel_param ::= {: RESULT = new CancelBackupStmt(db, false); :} + | KW_GLOBAL KW_BACKUP + {: + RESULT = new CancelBackupStmt(false); + :} | KW_RESTORE opt_db:db {: RESULT = new CancelBackupStmt(db, true); :} + | KW_GLOBAL KW_RESTORE + {: + RESULT = new CancelBackupStmt(true); + :} | KW_WARM KW_UP KW_JOB opt_wild_where {: RESULT = new CancelCloudWarmUpStmt(parser.where); @@ -5456,6 +5472,12 @@ backup_stmt ::= {: RESULT = new BackupStmt(label, repoName, tblRefClause, properties); :} + | KW_BACKUP KW_GLOBAL KW_SNAPSHOT job_label:label + KW_TO ident:repoName + opt_properties:properties + {: + RESULT = new BackupStmt(label, repoName, properties); + :} ; unlock_tables_stmt ::= @@ -5557,6 +5579,12 @@ restore_stmt ::= {: RESULT = new RestoreStmt(label, repoName, tblRefClause, properties); :} + | KW_RESTORE KW_GLOBAL KW_SNAPSHOT job_label:label + KW_FROM ident:repoName + opt_properties:properties + {: + RESULT = new RestoreStmt(label, repoName, properties); + :} ; // Kill statement diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AbstractBackupStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AbstractBackupStmt.java index 3aa0c54a5c52ff..a0c0db738b29da 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AbstractBackupStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AbstractBackupStmt.java @@ -22,6 +22,7 @@ import org.apache.doris.common.Config; import org.apache.doris.common.ErrorCode; import org.apache.doris.common.ErrorReport; +import org.apache.doris.common.FeConstants; import org.apache.doris.common.UserException; import org.apache.doris.datasource.InternalCatalog; import org.apache.doris.mysql.privilege.PrivPredicate; @@ -44,16 +45,21 @@ public class AbstractBackupStmt extends DdlStmt { protected String repoName; protected AbstractBackupTableRefClause abstractBackupTableRefClause; protected Map properties; - protected long timeoutMs; + public boolean backupGlobal = false; public AbstractBackupStmt(LabelName labelName, String repoName, AbstractBackupTableRefClause abstractBackupTableRefClause, - Map properties) { - this.labelName = labelName; + Map properties, boolean backupGlobal) { + if (backupGlobal) { + this.labelName = new LabelName(FeConstants.INTERNAL_DB_NAME, labelName.getLabelName()); + } else { + this.labelName = labelName; + } this.repoName = repoName; this.abstractBackupTableRefClause = abstractBackupTableRefClause; this.properties = properties == null ? Maps.newHashMap() : properties; + this.backupGlobal = backupGlobal; } @Override @@ -120,6 +126,10 @@ public String getDbName() { return labelName.getDbName(); } + public boolean isBackupGlobal() { + return backupGlobal; + } + public String getLabel() { return labelName.getLabelName(); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/BackupStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/BackupStmt.java index 445db61157061d..9753afc2d98422 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/BackupStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/BackupStmt.java @@ -30,6 +30,9 @@ public class BackupStmt extends AbstractBackupStmt implements NotFallbackInParser { private static final String PROP_TYPE = "type"; public static final String PROP_CONTENT = "content"; + public static final String PROP_BACKUP_PRIV = "backup_privilege"; + public static final String PROP_BACKUP_CATALOG = "backup_catalog"; + public static final String PROP_BACKUP_WORKLOAD_GROUP = "backup_workload_group"; public enum BackupType { INCREMENTAL, FULL @@ -41,11 +44,18 @@ public enum BackupContent { private BackupType type = BackupType.FULL; private BackupContent content = BackupContent.ALL; + private boolean backupPriv = false; + private boolean backupCatalog = false; + private boolean backupWorkloadGroup = false; public BackupStmt(LabelName labelName, String repoName, AbstractBackupTableRefClause abstractBackupTableRefClause, Map properties) { - super(labelName, repoName, abstractBackupTableRefClause, properties); + super(labelName, repoName, abstractBackupTableRefClause, properties, false); + } + + public BackupStmt(LabelName labelName, String repoName, Map properties) { + super(labelName, repoName, null, properties, true); } public long getTimeoutMs() { @@ -60,6 +70,18 @@ public BackupContent getContent() { return content; } + public boolean isBackupPriv() { + return backupPriv; + } + + public boolean isBackupCatalog() { + return backupCatalog; + } + + public boolean isBackupWorkloadGroup() { + return backupWorkloadGroup; + } + @Override public void analyze(Analyzer analyzer) throws UserException { super.analyze(analyzer); @@ -103,6 +125,62 @@ protected void analyzeProperties() throws AnalysisException { copiedProperties.remove(PROP_CONTENT); } + // backup_priv + String backupPrivProp = copiedProperties.get(PROP_BACKUP_PRIV); + if (backupPrivProp != null) { + if (backupPrivProp.equalsIgnoreCase("true")) { + backupPriv = true; + } else if (backupPrivProp.equalsIgnoreCase("false")) { + backupPriv = false; + } else { + ErrorReport.reportAnalysisException(ErrorCode.ERR_COMMON_ERROR, + "Invalid backup privileges:" + backupPrivProp); + } + copiedProperties.remove(PROP_BACKUP_PRIV); + } + + // backup_catalog + String backupCatalogProp = copiedProperties.get(PROP_BACKUP_CATALOG); + if (backupCatalogProp != null) { + if (backupCatalogProp.equalsIgnoreCase("true")) { + backupCatalog = true; + } else if (backupCatalogProp.equalsIgnoreCase("false")) { + backupCatalog = false; + } else { + ErrorReport.reportAnalysisException(ErrorCode.ERR_COMMON_ERROR, + "Invalid backup catalog:" + backupCatalogProp); + } + copiedProperties.remove(PROP_BACKUP_CATALOG); + } + + // backup_workload + String backupWorkloadGroupProp = copiedProperties.get(PROP_BACKUP_WORKLOAD_GROUP); + if (backupWorkloadGroupProp != null) { + if (backupWorkloadGroupProp.equalsIgnoreCase("true")) { + backupWorkloadGroup = true; + } else if (backupWorkloadGroupProp.equalsIgnoreCase("false")) { + backupWorkloadGroup = false; + } else { + ErrorReport.reportAnalysisException(ErrorCode.ERR_COMMON_ERROR, + "Invalid backup workload group:" + backupWorkloadGroupProp); + } + copiedProperties.remove(PROP_BACKUP_WORKLOAD_GROUP); + } + + if (isBackupGlobal()) { + if (properties.get(PROP_BACKUP_PRIV) == null + && properties.get(PROP_BACKUP_CATALOG) == null + && properties.get(PROP_BACKUP_WORKLOAD_GROUP) == null) { + backupPriv = true; + backupCatalog = true; + backupWorkloadGroup = true; + } + } else { + backupPriv = false; + backupCatalog = false; + backupWorkloadGroup = false; + } + if (!copiedProperties.isEmpty()) { ErrorReport.reportAnalysisException(ErrorCode.ERR_COMMON_ERROR, "Unknown backup job properties: " + copiedProperties.keySet()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CancelBackupStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CancelBackupStmt.java index 06c1d09972c8d9..22d30430f613d8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CancelBackupStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CancelBackupStmt.java @@ -21,6 +21,7 @@ import org.apache.doris.common.AnalysisException; import org.apache.doris.common.ErrorCode; import org.apache.doris.common.ErrorReport; +import org.apache.doris.common.FeConstants; import org.apache.doris.common.UserException; import org.apache.doris.datasource.InternalCatalog; import org.apache.doris.mysql.privilege.PrivPredicate; @@ -38,6 +39,11 @@ public CancelBackupStmt(String dbName, boolean isRestore) { this.isRestore = isRestore; } + public CancelBackupStmt(boolean isRestore) { + this.dbName = FeConstants.INTERNAL_DB_NAME; + this.isRestore = isRestore; + } + public String getDbName() { return dbName; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RestoreStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RestoreStmt.java index bc38cfe09e5606..5c70d3d478e3c1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RestoreStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RestoreStmt.java @@ -44,6 +44,9 @@ public class RestoreStmt extends AbstractBackupStmt implements NotFallbackInPars public static final String PROP_CLEAN_TABLES = "clean_tables"; public static final String PROP_CLEAN_PARTITIONS = "clean_partitions"; public static final String PROP_ATOMIC_RESTORE = "atomic_restore"; + public static final String PROP_RESERVE_PRIVILEGE = "reserve_privilege"; + public static final String PROP_RESERVE_CATALOG = "reserve_catalog"; + public static final String PROP_RESERVE_WORKLOAD_GROUP = "reserve_workload_group"; private boolean allowLoad = false; private ReplicaAllocation replicaAlloc = ReplicaAllocation.DEFAULT_ALLOCATION; @@ -58,15 +61,23 @@ public class RestoreStmt extends AbstractBackupStmt implements NotFallbackInPars private boolean isAtomicRestore = false; private byte[] meta = null; private byte[] jobInfo = null; + private boolean reservePrivilege = false; + private boolean reserveCatalog = false; + private boolean reserveWorkloadGroup = false; public RestoreStmt(LabelName labelName, String repoName, AbstractBackupTableRefClause restoreTableRefClause, Map properties) { - super(labelName, repoName, restoreTableRefClause, properties); + super(labelName, repoName, restoreTableRefClause, properties, false); + } + + public RestoreStmt(LabelName labelName, String repoName, + Map properties) { + super(labelName, repoName, null, properties, true); } public RestoreStmt(LabelName labelName, String repoName, AbstractBackupTableRefClause restoreTableRefClause, Map properties, byte[] meta, byte[] jobInfo) { - super(labelName, repoName, restoreTableRefClause, properties); + super(labelName, repoName, restoreTableRefClause, properties, false); this.meta = meta; this.jobInfo = jobInfo; } @@ -127,6 +138,18 @@ public boolean isAtomicRestore() { return isAtomicRestore; } + public boolean reservePrivilege() { + return reservePrivilege; + } + + public boolean reserveCatalog() { + return reserveCatalog; + } + + public boolean reserveWorkloadGroup() { + return reserveWorkloadGroup; + } + @Override public void analyze(Analyzer analyzer) throws UserException { if (repoName.equals(Repository.KEEP_ON_LOCAL_REPO_NAME)) { @@ -212,6 +235,59 @@ public void analyzeProperties() throws AnalysisException { // is atomic restore isAtomicRestore = eatBooleanProperty(copiedProperties, PROP_ATOMIC_RESTORE, isAtomicRestore); + // reserve privilege + if (copiedProperties.containsKey(PROP_RESERVE_PRIVILEGE)) { + if (copiedProperties.get(PROP_RESERVE_PRIVILEGE).equalsIgnoreCase("true")) { + reservePrivilege = true; + } else if (copiedProperties.get(PROP_RESERVE_PRIVILEGE).equalsIgnoreCase("false")) { + reservePrivilege = false; + } else { + ErrorReport.reportAnalysisException(ErrorCode.ERR_COMMON_ERROR, + "Invalid reserve_privilege value: " + copiedProperties.get(PROP_RESERVE_PRIVILEGE)); + } + copiedProperties.remove(PROP_RESERVE_PRIVILEGE); + } + + // reserve catalog + if (copiedProperties.containsKey(PROP_RESERVE_CATALOG)) { + if (copiedProperties.get(PROP_RESERVE_CATALOG).equalsIgnoreCase("true")) { + reserveCatalog = true; + } else if (copiedProperties.get(PROP_RESERVE_CATALOG).equalsIgnoreCase("false")) { + reserveCatalog = false; + } else { + ErrorReport.reportAnalysisException(ErrorCode.ERR_COMMON_ERROR, + "Invalid reserve_catalog value: " + copiedProperties.get(PROP_RESERVE_CATALOG)); + } + copiedProperties.remove(PROP_RESERVE_CATALOG); + } + + // reserve workload group + if (copiedProperties.containsKey(PROP_RESERVE_WORKLOAD_GROUP)) { + if (copiedProperties.get(PROP_RESERVE_WORKLOAD_GROUP).equalsIgnoreCase("true")) { + reserveWorkloadGroup = true; + } else if (copiedProperties.get(PROP_RESERVE_WORKLOAD_GROUP).equalsIgnoreCase("false")) { + reserveWorkloadGroup = false; + } else { + ErrorReport.reportAnalysisException(ErrorCode.ERR_COMMON_ERROR, + "Invalid reserve_workload_group value: " + copiedProperties.get(PROP_RESERVE_WORKLOAD_GROUP)); + } + copiedProperties.remove(PROP_RESERVE_WORKLOAD_GROUP); + } + + if (isBackupGlobal()) { + if (!properties.containsKey(PROP_RESERVE_PRIVILEGE) + && !properties.containsKey(PROP_RESERVE_CATALOG) + && !properties.containsKey(PROP_RESERVE_WORKLOAD_GROUP)) { + reservePrivilege = true; + reserveCatalog = true; + reserveWorkloadGroup = true; + } + } else { + reservePrivilege = false; + reserveCatalog = false; + reserveWorkloadGroup = false; + } + if (!copiedProperties.isEmpty()) { ErrorReport.reportAnalysisException(ErrorCode.ERR_COMMON_ERROR, "Unknown restore job properties: " + copiedProperties.keySet()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackupStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackupStmt.java index a76857a8f8bc58..51c04774661123 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackupStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackupStmt.java @@ -24,6 +24,7 @@ import org.apache.doris.common.CaseSensibility; import org.apache.doris.common.ErrorCode; import org.apache.doris.common.ErrorReport; +import org.apache.doris.common.FeConstants; import org.apache.doris.common.PatternMatcher; import org.apache.doris.common.PatternMatcherWrapper; import org.apache.doris.common.UserException; @@ -54,6 +55,11 @@ public ShowBackupStmt(String dbName, Expr where) { this.where = where; } + public ShowBackupStmt(Expr where) { + this.dbName = FeConstants.INTERNAL_DB_NAME; + this.where = where; + } + public String getDbName() { return dbName; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRestoreStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRestoreStmt.java index 2ffa8607fbd86e..b1ed26f5686ed8 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRestoreStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRestoreStmt.java @@ -24,6 +24,7 @@ import org.apache.doris.common.CaseSensibility; import org.apache.doris.common.ErrorCode; import org.apache.doris.common.ErrorReport; +import org.apache.doris.common.FeConstants; import org.apache.doris.common.PatternMatcher; import org.apache.doris.common.PatternMatcherWrapper; import org.apache.doris.common.UserException; @@ -65,6 +66,11 @@ public ShowRestoreStmt(String dbName, Expr where) { this.where = where; } + public ShowRestoreStmt(Expr where) { + this.dbName = FeConstants.INTERNAL_DB_NAME; + this.where = where; + } + public ShowRestoreStmt(String dbName, Expr where, boolean needBriefResult) { this.dbName = dbName; this.where = where; diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupCatalogMeta.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupCatalogMeta.java new file mode 100644 index 00000000000000..7c177e95fa0bcb --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupCatalogMeta.java @@ -0,0 +1,76 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.backup; + +import org.apache.doris.common.io.Text; +import org.apache.doris.common.io.Writable; +import org.apache.doris.persist.gson.GsonUtils; + +import com.google.common.base.Strings; +import com.google.common.collect.Maps; +import com.google.gson.annotations.SerializedName; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Map; + +public class BackupCatalogMeta implements Writable { + @SerializedName(value = "catalogName") + String catalogName; + @SerializedName(value = "resource") + String resource; + @SerializedName(value = "properties") + Map properties; + @SerializedName(value = "comment") + String comment; + + public BackupCatalogMeta(String catalogName, String resource, Map properties, + String comment) { + this.catalogName = catalogName; + this.resource = Strings.nullToEmpty(resource); + this.comment = Strings.nullToEmpty(comment); + this.properties = Maps.newHashMap(properties); + } + + public String getCatalogName() { + return catalogName; + } + + public String getResource() { + return resource; + } + + public String getComment() { + return comment; + } + + public Map getProperties() { + return properties; + } + + @Override + public void write(DataOutput out) throws IOException { + Text.writeString(out, GsonUtils.GSON.toJson(this)); + } + + public static BackupCatalogMeta read(DataInput in) throws IOException { + String json = Text.readString(in); + return GsonUtils.GSON.fromJson(json, BackupCatalogMeta.class); + } +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupGlobalInfo.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupGlobalInfo.java new file mode 100644 index 00000000000000..13c975edeaac68 --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupGlobalInfo.java @@ -0,0 +1,141 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.backup; + +import org.apache.doris.analysis.UserIdentity; +import org.apache.doris.blockrule.SqlBlockRule; +import org.apache.doris.catalog.Env; +import org.apache.doris.common.io.Text; +import org.apache.doris.common.io.Writable; +import org.apache.doris.mysql.privilege.PasswordPolicy; +import org.apache.doris.mysql.privilege.Role; +import org.apache.doris.mysql.privilege.User; +import org.apache.doris.mysql.privilege.UserProperty; +import org.apache.doris.persist.gson.GsonUtils; +import org.apache.doris.policy.Policy; +import org.apache.doris.policy.PolicyTypeEnum; +import org.apache.doris.resource.workloadgroup.WorkloadGroup; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.gson.annotations.SerializedName; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class BackupGlobalInfo implements Writable { + // Users + @SerializedName(value = "users") + private List userList = Lists.newArrayList(); + // roles + @SerializedName(value = "roles") + private List roleList = Lists.newArrayList(); + @SerializedName(value = "userProperties") + private List userProperties = Lists.newArrayList(); + @SerializedName(value = "roleToUsers") + private Map> roleToUsers = Maps.newHashMap(); + @SerializedName(value = "policyMap") + private Map policyMap = Maps.newHashMap(); + @SerializedName(value = "propertyMap") + protected Map propertyMap = Maps.newHashMap(); + // rowPolicy name -> policy + @SerializedName(value = "rowPolicies") + private List rowPolicies = Lists.newArrayList(); + @SerializedName(value = "sqlBlockRules") + private List sqlBlockRules = Lists.newArrayList(); + @SerializedName(value = "catalogs") + private List catalogs = Lists.newArrayList(); + @SerializedName(value = "workloadGroups") + private List workloadGroups = Lists.newArrayList(); + + public BackupGlobalInfo() { + } + + public List getUserList() { + return userList; + } + + public List getRoleList() { + return roleList; + } + + public List getUserProperties() { + return userProperties; + } + + public Map> getRoleToUsers() { + return roleToUsers; + } + + public Map getPolicyMap() { + return policyMap; + } + + public Set getUsersByRole(String roleName) { + return roleToUsers.get(roleName); + } + + public List getRowPolicies() { + return rowPolicies; + } + + public List getWorkloadGroups() { + return workloadGroups; + } + + public List getCatalogs() { + return catalogs; + } + + public List getSqlBlockRules() { + return sqlBlockRules; + } + + public void init(boolean backupPriv, boolean backupCatalog, boolean backupWorkloadGroup) { + + if (backupPriv) { + Env.getCurrentEnv().getAuth().getAuthInfoCopied(userList, roleList, userProperties, roleToUsers, + propertyMap, policyMap); + rowPolicies = Env.getCurrentEnv().getPolicyMgr().getCopiedPoliciesByType(PolicyTypeEnum.ROW); + sqlBlockRules = Env.getCurrentEnv().getSqlBlockRuleMgr().getAllRulesCopied(); + } + + if (backupCatalog) { + catalogs = Env.getCurrentEnv().getCatalogMgr().getAllCatalogsCopied(); + } + + if (backupWorkloadGroup) { + workloadGroups = Env.getCurrentEnv().getWorkloadGroupMgr().getAllWorkloadGroupsCopied(); + } + } + + public static BackupGlobalInfo read(DataInput in) throws IOException { + String json = Text.readString(in); + return GsonUtils.GSON.fromJson(json, BackupGlobalInfo.class); + } + + @Override + public void write(DataOutput out) throws IOException { + Text.writeString(out, GsonUtils.GSON.toJson(this)); + } + +} diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java index 6f88881e3cb2a3..363d39067e398a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java @@ -44,6 +44,7 @@ import org.apache.doris.common.DdlException; import org.apache.doris.common.ErrorCode; import org.apache.doris.common.ErrorReport; +import org.apache.doris.common.FeConstants; import org.apache.doris.common.Pair; import org.apache.doris.common.io.Writable; import org.apache.doris.common.util.MasterDaemon; @@ -65,6 +66,7 @@ import com.google.common.collect.Lists; import com.google.common.collect.Sets; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -331,6 +333,9 @@ public void process(AbstractBackupStmt stmt) throws DdlException { // check if db exist String dbName = stmt.getDbName(); + if (StringUtils.isEmpty(dbName) && stmt.isBackupGlobal()) { + dbName = FeConstants.INTERNAL_DB_NAME; + } Database db = env.getInternalCatalog().getDbOrDdlException(dbName); // Try to get sequence lock. @@ -388,7 +393,9 @@ private void backup(Repository repository, Database db, BackupStmt stmt) throws // Determine the tables to be backed up if (abstractBackupTableRefClause == null) { - tableNames = db.getTableNames(); + if (!stmt.isBackupGlobal()) { + tableNames = db.getTableNames(); + } } else if (abstractBackupTableRefClause.isExclude()) { tableNames = db.getTableNames(); for (TableRef tableRef : abstractBackupTableRefClause.getTableRefList()) { @@ -496,7 +503,7 @@ private void backup(Repository repository, Database db, BackupStmt stmt) throws // Create a backup job BackupJob backupJob = new BackupJob(stmt.getLabel(), db.getId(), ClusterNamespace.getNameFromFullName(db.getFullName()), - tblRefs, stmt.getTimeoutMs(), stmt.getContent(), env, repoId, commitSeq); + tblRefs, stmt.getTimeoutMs(), stmt, env, repoId, commitSeq); // write log env.getEditLog().logBackupJob(backupJob); @@ -530,13 +537,18 @@ private void restore(Repository repository, Database db, RestoreStmt stmt) throw "Failed to get info of snapshot '" + stmt.getLabel() + "' because: " + status.getErrMsg() + ". Maybe specified wrong backup timestamp"); } - // Check if all restore objects are exist in this snapshot. // Also remove all unrelated objs Preconditions.checkState(infos.size() == 1); jobInfo = infos.get(0); } + if (!jobInfo.backupOlapTableObjects.isEmpty() && stmt.isBackupGlobal()) { + ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR, + "Failed to restore from snapshot '" + stmt.getLabel() + + "' because: This snapshot is not a global backup."); + } + checkAndFilterRestoreObjsExistInSnapshot(jobInfo, stmt.getAbstractBackupTableRefClause()); // Create a restore job @@ -558,14 +570,15 @@ private void restore(Repository repository, Database db, RestoreStmt stmt) throw jobInfo.getBackupTime(), TimeUtils.getDatetimeFormatWithHyphenWithTimeZone()); restoreJob = new RestoreJob(stmt.getLabel(), backupTimestamp, db.getId(), db.getFullName(), jobInfo, stmt.allowLoad(), stmt.getReplicaAlloc(), - stmt.getTimeoutMs(), metaVersion, stmt.reserveReplica(), - stmt.reserveDynamicPartitionEnable(), stmt.isBeingSynced(), - stmt.isCleanTables(), stmt.isCleanPartitions(), stmt.isAtomicRestore(), + stmt.getTimeoutMs(), metaVersion, stmt.reserveReplica(), stmt.reserveDynamicPartitionEnable(), + stmt.reservePrivilege(), stmt.reserveCatalog(), stmt.reserveWorkloadGroup(), + stmt.isBeingSynced(), stmt.isCleanTables(), stmt.isCleanPartitions(), stmt.isAtomicRestore(), env, Repository.KEEP_ON_LOCAL_REPO_ID, backupMeta); } else { restoreJob = new RestoreJob(stmt.getLabel(), stmt.getBackupTimestamp(), db.getId(), db.getFullName(), jobInfo, stmt.allowLoad(), stmt.getReplicaAlloc(), stmt.getTimeoutMs(), stmt.getMetaVersion(), stmt.reserveReplica(), stmt.reserveDynamicPartitionEnable(), + stmt.reservePrivilege(), stmt.reserveCatalog(), stmt.reserveWorkloadGroup(), stmt.isBeingSynced(), stmt.isCleanTables(), stmt.isCleanPartitions(), stmt.isAtomicRestore(), env, repository.getId()); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java index de12670807f20e..5e4843cbb51afb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java @@ -120,7 +120,9 @@ public enum BackupJobState { private BackupMeta backupMeta; // job info file content private BackupJobInfo jobInfo; - + private boolean isBackupPriv = false; + private boolean isBackupCatalog = false; + private boolean isBackupWorkloadGroup = false; // save the local dir of this backup job // after job is done, this dir should be deleted private Path localJobDirPath = null; @@ -145,13 +147,16 @@ public BackupJob(JobType jobType) { } public BackupJob(String label, long dbId, String dbName, List tableRefs, long timeoutMs, - BackupContent content, Env env, long repoId, long commitSeq) { + BackupStmt stmt, Env env, long repoId, long commitSeq) { super(JobType.BACKUP, label, dbId, dbName, timeoutMs, env, repoId); this.tableRefs = tableRefs; this.state = BackupJobState.PENDING; this.commitSeq = commitSeq; - properties.put(BackupStmt.PROP_CONTENT, content.name()); + properties.put(BackupStmt.PROP_CONTENT, stmt.getContent().name()); properties.put(SNAPSHOT_COMMIT_SEQ, String.valueOf(commitSeq)); + isBackupPriv = stmt.isBackupPriv(); + isBackupCatalog = stmt.isBackupCatalog(); + isBackupWorkloadGroup = stmt.isBackupWorkloadGroup(); } public BackupJobState getState() { @@ -174,6 +179,18 @@ public String getLocalMetaInfoFilePath() { return localMetaInfoFilePath; } + public boolean isBackupPriv() { + return isBackupPriv; + } + + public boolean isBackupCatalog() { + return isBackupCatalog; + } + + public boolean isBackupWorkloadGroup() { + return isBackupWorkloadGroup; + } + public BackupContent getContent() { if (properties.containsKey(BackupStmt.PROP_CONTENT)) { return BackupStmt.BackupContent.valueOf(properties.get(BackupStmt.PROP_CONTENT).toUpperCase()); @@ -865,7 +882,8 @@ private void saveMetaInfo() { } } jobInfo = BackupJobInfo.fromCatalog(createTime, label, dbName, dbId, - getContent(), backupMeta, snapshotInfos, tableCommitSeqMap); + getContent(), backupMeta, snapshotInfos, tableCommitSeqMap, + isBackupPriv(), isBackupCatalog(), isBackupWorkloadGroup()); if (LOG.isDebugEnabled()) { LOG.debug("job info: {}. {}", jobInfo, this); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJobInfo.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJobInfo.java index 554a21c44080f7..12b6a24dce108e 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJobInfo.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJobInfo.java @@ -20,6 +20,7 @@ import org.apache.doris.analysis.BackupStmt.BackupContent; import org.apache.doris.analysis.PartitionNames; import org.apache.doris.analysis.TableRef; +import org.apache.doris.analysis.UserIdentity; import org.apache.doris.backup.RestoreFileMapping.IdChain; import org.apache.doris.catalog.Env; import org.apache.doris.catalog.MaterializedIndex; @@ -39,8 +40,11 @@ import org.apache.doris.common.Version; import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; +import org.apache.doris.mysql.privilege.Role; +import org.apache.doris.mysql.privilege.User; import org.apache.doris.persist.gson.GsonPostProcessable; import org.apache.doris.persist.gson.GsonUtils; +import org.apache.doris.resource.workloadgroup.WorkloadGroup; import org.apache.doris.thrift.TNetworkAddress; import com.google.common.base.Joiner; @@ -335,6 +339,14 @@ public static class BriefBackupJobInfo { public List odbcTableList = Lists.newArrayList(); @SerializedName("odbc_resource_list") public List odbcResourceList = Lists.newArrayList(); + @SerializedName("user_list") + public List userList = Lists.newArrayList(); + @SerializedName("role_list") + public List roleList = Lists.newArrayList(); + @SerializedName("catalog_list") + public List catalogList = Lists.newArrayList(); + @SerializedName("workload_group_list") + public List workloadGroupList = Lists.newArrayList(); public static BriefBackupJobInfo fromBackupJobInfo(BackupJobInfo backupJobInfo) { BriefBackupJobInfo briefBackupJobInfo = new BriefBackupJobInfo(); @@ -352,6 +364,42 @@ public static BriefBackupJobInfo fromBackupJobInfo(BackupJobInfo backupJobInfo) briefBackupJobInfo.viewList = backupJobInfo.newBackupObjects.views; briefBackupJobInfo.odbcTableList = backupJobInfo.newBackupObjects.odbcTables; briefBackupJobInfo.odbcResourceList = backupJobInfo.newBackupObjects.odbcResources; + + BackupGlobalInfo backupGlobalInfo = backupJobInfo.newBackupObjects.backupGlobalInfo; + if (backupGlobalInfo != null) { + //users + List userList = backupGlobalInfo.getUserList(); + for (User user : userList) { + BackupUserInfo backupUserInfo = new BackupUserInfo(); + backupUserInfo.userIdentity = user.getUserIdentity(); + briefBackupJobInfo.userList.add(backupUserInfo); + } + + // roles + List roleList = backupGlobalInfo.getRoleList(); + for (Role role : roleList) { + BackupRoleInfo backupRoleInfo = new BackupRoleInfo(); + backupRoleInfo.name = role.getRoleName(); + briefBackupJobInfo.roleList.add(backupRoleInfo); + } + + // catalogs + List catalogs = backupGlobalInfo.getCatalogs(); + for (BackupCatalogMeta catalog : catalogs) { + BackupCatalogInfo backupCatalogInfo = new BackupCatalogInfo(); + backupCatalogInfo.name = catalog.getCatalogName(); + briefBackupJobInfo.catalogList.add(backupCatalogInfo); + } + + // workloadGroups + List workloadGroups = backupGlobalInfo.getWorkloadGroups(); + for (WorkloadGroup workloadGroup : workloadGroups) { + BackupWorkloadGroupInfo backupWorkloadGroupInfo = new BackupWorkloadGroupInfo(); + backupWorkloadGroupInfo.name = workloadGroup.getName(); + briefBackupJobInfo.workloadGroupList.add(backupWorkloadGroupInfo); + } + } + return briefBackupJobInfo; } } @@ -370,6 +418,8 @@ public static class NewBackupObjects { public List odbcTables = Lists.newArrayList(); @SerializedName("odbc_resources") public List odbcResources = Lists.newArrayList(); + @SerializedName("BackupGlobalInfo") + public BackupGlobalInfo backupGlobalInfo = null; } public static class BackupOlapTableInfo { @@ -488,6 +538,26 @@ public static class BackupOdbcResourceInfo { public String name; } + public static class BackupUserInfo { + @SerializedName("userIdentity") + public UserIdentity userIdentity; + } + + public static class BackupRoleInfo { + @SerializedName("name") + public String name; + } + + public static class BackupCatalogInfo { + @SerializedName("name") + public String name; + } + + public static class BackupWorkloadGroupInfo { + @SerializedName("name") + public String name; + } + // eg: __db_10001/__tbl_10002/__part_10003/__idx_10002/__10004 public String getFilePath(String db, String tbl, String part, String idx, long tabletId) { if (!db.equalsIgnoreCase(dbName)) { @@ -601,7 +671,8 @@ public Long getSchemaHash(long tableId, long partitionId, long indexId) { public static BackupJobInfo fromCatalog(long backupTime, String label, String dbName, long dbId, BackupContent content, BackupMeta backupMeta, - Map snapshotInfos, Map tableCommitSeqMap) { + Map snapshotInfos, Map tableCommitSeqMap, + boolean backupPriv, boolean backupCatalog, boolean backupWorkloadGroup) { BackupJobInfo jobInfo = new BackupJobInfo(); jobInfo.backupTime = backupTime; @@ -688,6 +759,8 @@ public static BackupJobInfo fromCatalog(long backupTime, String label, String db jobInfo.newBackupObjects.odbcResources.add(backupOdbcResourceInfo); } } + jobInfo.newBackupObjects.backupGlobalInfo = new BackupGlobalInfo(); + jobInfo.newBackupObjects.backupGlobalInfo.init(backupPriv, backupCatalog, backupWorkloadGroup); return jobInfo; } diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java index 6dfd02b3a42648..906304f5dcce83 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java +++ b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java @@ -18,13 +18,22 @@ package org.apache.doris.backup; import org.apache.doris.analysis.BackupStmt.BackupContent; +import org.apache.doris.analysis.CreateCatalogStmt; +import org.apache.doris.analysis.CreateWorkloadGroupStmt; +import org.apache.doris.analysis.DropCatalogStmt; +import org.apache.doris.analysis.DropPolicyStmt; +import org.apache.doris.analysis.DropWorkloadGroupStmt; +import org.apache.doris.analysis.PasswordOptions; import org.apache.doris.analysis.RestoreStmt; +import org.apache.doris.analysis.TableName; +import org.apache.doris.analysis.UserIdentity; import org.apache.doris.backup.BackupJobInfo.BackupIndexInfo; import org.apache.doris.backup.BackupJobInfo.BackupOlapTableInfo; import org.apache.doris.backup.BackupJobInfo.BackupPartitionInfo; import org.apache.doris.backup.BackupJobInfo.BackupTabletInfo; import org.apache.doris.backup.RestoreFileMapping.IdChain; import org.apache.doris.backup.Status.ErrCode; +import org.apache.doris.blockrule.SqlBlockRule; import org.apache.doris.catalog.BinlogConfig; import org.apache.doris.catalog.DataProperty; import org.apache.doris.catalog.Database; @@ -68,9 +77,18 @@ import org.apache.doris.common.util.TimeUtils; import org.apache.doris.datasource.InternalCatalog; import org.apache.doris.datasource.property.S3ClientBEProperties; +import org.apache.doris.mysql.privilege.PasswordPolicy; +import org.apache.doris.mysql.privilege.Role; +import org.apache.doris.mysql.privilege.User; +import org.apache.doris.mysql.privilege.UserProperty; import org.apache.doris.persist.gson.GsonPostProcessable; import org.apache.doris.persist.gson.GsonUtils; +import org.apache.doris.policy.Policy; +import org.apache.doris.policy.PolicyTypeEnum; +import org.apache.doris.policy.RowPolicy; import org.apache.doris.resource.Tag; +import org.apache.doris.resource.workloadgroup.WorkloadGroup; +import org.apache.doris.resource.workloadgroup.WorkloadGroupMgr; import org.apache.doris.task.AgentBatchTask; import org.apache.doris.task.AgentTask; import org.apache.doris.task.AgentTaskExecutor; @@ -111,6 +129,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.zip.GZIPInputStream; @@ -123,6 +142,9 @@ public class RestoreJob extends AbstractJob implements GsonPostProcessable { private static final String PROP_CLEAN_TABLES = RestoreStmt.PROP_CLEAN_TABLES; private static final String PROP_CLEAN_PARTITIONS = RestoreStmt.PROP_CLEAN_PARTITIONS; private static final String PROP_ATOMIC_RESTORE = RestoreStmt.PROP_ATOMIC_RESTORE; + private static final String PROP_RESERVE_PRIVILEGE = RestoreStmt.PROP_RESERVE_PRIVILEGE; + private static final String PROP_RESERVE_CATALOG = RestoreStmt.PROP_RESERVE_CATALOG; + private static final String PROP_RESERVE_WORKLOAD_GROUP = RestoreStmt.PROP_RESERVE_WORKLOAD_GROUP; private static final String ATOMIC_RESTORE_TABLE_PREFIX = "__doris_atomic_restore_prefix__"; private static final Logger LOG = LogManager.getLogger(RestoreJob.class); @@ -190,7 +212,18 @@ public enum RestoreJobState { // tablet id->(be id -> snapshot info) @SerializedName("si") private com.google.common.collect.Table snapshotInfos = HashBasedTable.create(); - + @SerializedName("users") + List restoredUsers = Lists.newArrayList(); + @SerializedName("roles") + List>, Role>> restoredRoles = Lists.newArrayList(); + @SerializedName("workloadgroups") + private List restoredWorkloadGroups = Lists.newArrayList(); + @SerializedName("catalogs") + private List restoredCatalogs = Lists.newArrayList(); + @SerializedName("rowpolicies") + private List restoredRowPolicies = Lists.newArrayList(); + @SerializedName("sqlBlockRules") + private List restoredSqlBlockRules = Lists.newArrayList(); private Map unfinishedSignatureToId = Maps.newConcurrentMap(); // the meta version is used when reading backup meta from file. @@ -210,6 +243,12 @@ public enum RestoreJobState { private boolean isCleanPartitions = false; // Whether to restore the data into a temp table, and then replace the origin one. private boolean isAtomicRestore = false; + // Whether to restore privileges + private boolean reservePrivilege = false; + // Whether to restore catalogs + private boolean reserveCatalog = false; + // Whether to restore workload group + private boolean reserveWorkloadGroup = false; // restore properties @SerializedName("prop") @@ -227,7 +266,8 @@ public RestoreJob(JobType jobType) { public RestoreJob(String label, String backupTs, long dbId, String dbName, BackupJobInfo jobInfo, boolean allowLoad, ReplicaAllocation replicaAlloc, long timeoutMs, int metaVersion, boolean reserveReplica, - boolean reserveDynamicPartitionEnable, boolean isBeingSynced, boolean isCleanTables, + boolean reserveDynamicPartitionEnable, boolean reservePrivilege, boolean reserveCatalog, + boolean reserveWorkloadGroup, boolean isBeingSynced, boolean isCleanTables, boolean isCleanPartitions, boolean isAtomicRestore, Env env, long repoId) { super(JobType.RESTORE, label, dbId, dbName, timeoutMs, env, repoId); this.backupTimestamp = backupTs; @@ -246,21 +286,28 @@ public RestoreJob(String label, String backupTs, long dbId, String dbName, Backu this.isCleanTables = isCleanTables; this.isCleanPartitions = isCleanPartitions; this.isAtomicRestore = isAtomicRestore; + this.reservePrivilege = reservePrivilege; + this.reserveCatalog = reserveCatalog; + this.reserveWorkloadGroup = reserveWorkloadGroup; properties.put(PROP_RESERVE_REPLICA, String.valueOf(reserveReplica)); properties.put(PROP_RESERVE_DYNAMIC_PARTITION_ENABLE, String.valueOf(reserveDynamicPartitionEnable)); properties.put(PROP_IS_BEING_SYNCED, String.valueOf(isBeingSynced)); properties.put(PROP_CLEAN_TABLES, String.valueOf(isCleanTables)); properties.put(PROP_CLEAN_PARTITIONS, String.valueOf(isCleanPartitions)); properties.put(PROP_ATOMIC_RESTORE, String.valueOf(isAtomicRestore)); + properties.put(PROP_RESERVE_PRIVILEGE, String.valueOf(reservePrivilege)); + properties.put(PROP_RESERVE_CATALOG, String.valueOf(reserveCatalog)); + properties.put(PROP_RESERVE_WORKLOAD_GROUP, String.valueOf(reserveWorkloadGroup)); } public RestoreJob(String label, String backupTs, long dbId, String dbName, BackupJobInfo jobInfo, boolean allowLoad, ReplicaAllocation replicaAlloc, long timeoutMs, int metaVersion, boolean reserveReplica, - boolean reserveDynamicPartitionEnable, boolean isBeingSynced, boolean isCleanTables, + boolean reserveDynamicPartitionEnable, boolean reservePrivilege, boolean reserveCatalog, + boolean reserveWorkloadGroup, boolean isBeingSynced, boolean isCleanTables, boolean isCleanPartitions, boolean isAtomicRestore, Env env, long repoId, BackupMeta backupMeta) { this(label, backupTs, dbId, dbName, jobInfo, allowLoad, replicaAlloc, timeoutMs, metaVersion, reserveReplica, - reserveDynamicPartitionEnable, isBeingSynced, isCleanTables, isCleanPartitions, isAtomicRestore, env, - repoId); + reserveDynamicPartitionEnable, reservePrivilege, reserveCatalog, reserveWorkloadGroup, isBeingSynced, + isCleanTables, isCleanPartitions, isAtomicRestore, env, repoId); this.backupMeta = backupMeta; } @@ -957,6 +1004,11 @@ private void checkAndPrepareMeta() { if (!status.ok()) { return; } + // check and restore global info + checkAndRestoreGlobalInfo(); + if (!status.ok()) { + return; + } if (LOG.isDebugEnabled()) { LOG.debug("finished to restore resources. {}", this.jobId); } @@ -1263,6 +1315,205 @@ private void checkAndRestoreResources() { } } + private void checkAndRestorePrivileges() { + List users = jobInfo.newBackupObjects.backupGlobalInfo.getUserList(); + List localUsers = Lists.newArrayList(); + + for (User user : users) { + if (Env.getCurrentEnv().getAuth().doesUserExist(user.getUserIdentity())) { + localUsers.add(user.getUserIdentity()); + continue; + } + try { + Env.getCurrentEnv().getAuth().createUserInternal(user.getUserIdentity(), null, + user.getPassword().getPassword(), true, PasswordOptions.UNSET_OPTION, + user.getComment(), UUID.randomUUID().toString(), false); + } catch (DdlException e) { + LOG.error("restore user fail should not happen", e); + status = new Status(ErrCode.COMMON_ERROR, "restore user " + + user.getUserIdentity().toString() + " failed:" + e.getMessage()); + return; + } + restoredUsers.add(user); + } + + List userProperties = jobInfo.newBackupObjects.backupGlobalInfo.getUserProperties(); + for (UserProperty userProperty : userProperties) { + for (UserIdentity localIdentity : localUsers) { + if (localIdentity.getUser().equals(userProperty.getQualifiedUser())) { + continue; + } + } + try { + List> properties = Lists.newArrayList(); + List> list = userProperty.fetchProperty(); + for (List row : list) { + String key = row.get(0); + String value = row.get(1); + if (key.equals(UserProperty.PROP_RESOURCE_TAGS)) { + continue; + } + + if (key.equals(UserProperty.PROP_WORKLOAD_GROUP) && !reserveWorkloadGroup) { + properties.add(Pair.of(key, WorkloadGroupMgr.DEFAULT_GROUP_NAME)); + continue; + } + + if (!value.equals("")) { + properties.add(Pair.of(key, value)); + } + + } + Env.getCurrentEnv().getAuth().updateUserPropertyInternal(userProperty.getQualifiedUser(), + properties, false /* is replay */); + } catch (Exception e) { + LOG.error("restore user property fail should not happen", e); + status = new Status(ErrCode.COMMON_ERROR, "restore user " + + userProperty.getQualifiedUser() + "'s property failed:" + e.getMessage()); + return; + } + } + + Map policyMap = jobInfo.newBackupObjects.backupGlobalInfo.getPolicyMap(); + for (Map.Entry entry : policyMap.entrySet()) { + UserIdentity identity = entry.getKey(); + PasswordPolicy passwordPolicy = entry.getValue(); + for (UserIdentity localIdentity : localUsers) { + if (localIdentity.equals(identity)) { + continue; + } + } + try { + PasswordOptions passwordOptions = new PasswordOptions(passwordPolicy.getExpirePolicy().expirationSecond, + passwordPolicy.getHistoryPolicy().historyNum, -2, + passwordPolicy.getFailedLoginPolicy().numFailedLogin, + passwordPolicy.getFailedLoginPolicy().passwordLockSeconds, -2); + Env.getCurrentEnv().getAuth().getPasswdPolicyManager().updatePolicy(identity, null, passwordOptions); + } catch (Exception e) { + LOG.error("restore user password policy fail should not happen", e); + status = new Status(ErrCode.COMMON_ERROR, "restore user " + + identity.toString() + "'s password policy failed:" + e.getMessage()); + return; + } + } + + List rowPolicies = jobInfo.newBackupObjects.backupGlobalInfo.getRowPolicies(); + for (Policy policy : rowPolicies) { + RowPolicy rowPolicy = (RowPolicy) policy; + if (Env.getCurrentEnv().getPolicyMgr().existPolicy(policy)) { + continue; + } + + try { + Env.getCurrentEnv().getPolicyMgr().createRowPolicy(rowPolicy); + } catch (Exception e) { + LOG.error("restore row policy fail should not happen", e); + status = new Status(ErrCode.COMMON_ERROR, "restore row policy " + + policy.getPolicyName() + " failed:" + e.getMessage()); + return; + } + restoredRowPolicies.add(rowPolicy); + } + + List sqlBlockRules = jobInfo.newBackupObjects.backupGlobalInfo.getSqlBlockRules(); + for (SqlBlockRule sqlBlockRule : sqlBlockRules) { + if (Env.getCurrentEnv().getSqlBlockRuleMgr().existRule(sqlBlockRule.getName())) { + continue; + } + + try { + Env.getCurrentEnv().getSqlBlockRuleMgr().createSqlBlockRule(sqlBlockRule, false); + } catch (Exception e) { + LOG.error("restore sqlBlockRule fail should not happen", e); + status = new Status(ErrCode.COMMON_ERROR, "restore sqlBlockRule " + + sqlBlockRule.getName() + " failed:" + e.getMessage()); + return; + } + restoredSqlBlockRules.add(sqlBlockRule); + } + + List roles = jobInfo.newBackupObjects.backupGlobalInfo.getRoleList(); + for (Role role : roles) { + Role oldRole = Env.getCurrentEnv().getAuth().getRoleByName(role.getRoleName()); + Set userIdentities = null; + if (oldRole != null) { + oldRole = oldRole.clone(); + userIdentities = Env.getCurrentEnv().getAuth().getRoleUsers(oldRole.getRoleName()); + } + try { + Env.getCurrentEnv().getAuth().createRoleInternal(role.getRoleName(), + true, role.getComment(), false); + Env.getCurrentEnv().getAuth().grant(role, jobInfo.newBackupObjects.backupGlobalInfo + .getUsersByRole(role.getRoleName())); + } catch (DdlException e) { + LOG.error("restore role fail should not happen", e); + status = new Status(ErrCode.COMMON_ERROR, "restore role " + + role.getRoleName() + " failed:" + e.getMessage()); + return; + } + Role newRole = Env.getCurrentEnv().getAuth().getRoleByName(role.getRoleName()); + restoredRoles.add(Pair.of(Pair.of(oldRole, userIdentities), newRole)); + } + } + + private void checkAndRestoreCatalogs() { + List catalogs = jobInfo.newBackupObjects.backupGlobalInfo.getCatalogs(); + for (BackupCatalogMeta catalog : catalogs) { + if (Env.getCurrentEnv().getCatalogMgr().getCatalog(catalog.getCatalogName()) != null) { + status = new Status(ErrCode.COMMON_ERROR, "catalog " + + catalog.getCatalogName() + " already exist!"); + return; + } + try { + CreateCatalogStmt stmt = new CreateCatalogStmt(false, catalog.getCatalogName(), catalog.getResource(), + catalog.getProperties(), catalog.getComment()); + Env.getCurrentEnv().getCatalogMgr().createCatalog(stmt); + } catch (Exception e) { + status = new Status(ErrCode.COMMON_ERROR, "restore catalog " + + catalog.getCatalogName() + " failed:" + e.getMessage()); + return; + } + restoredCatalogs.add(catalog); + } + } + + private void checkAndRestoreWorkloadGroups() { + List workloadGroups = jobInfo.newBackupObjects.backupGlobalInfo.getWorkloadGroups(); + for (WorkloadGroup workloadGroup : workloadGroups) { + if (Env.getCurrentEnv().getWorkloadGroupMgr().isWorkloadGroupExists(workloadGroup.getName())) { + status = new Status(ErrCode.COMMON_ERROR, "workload group " + + workloadGroup.getName() + " already exist!"); + return; + } + + try { + CreateWorkloadGroupStmt stmt = new CreateWorkloadGroupStmt(false, workloadGroup.getName(), + workloadGroup.getProperties()); + Env.getCurrentEnv().getWorkloadGroupMgr().createWorkloadGroup(stmt); + } catch (Exception e) { + status = new Status(ErrCode.COMMON_ERROR, "restore workload group " + + workloadGroup.getName() + " failed:" + e.getMessage()); + return; + } + restoredWorkloadGroups.add(workloadGroup); + } + } + + private void checkAndRestoreGlobalInfo() { + if (jobInfo.newBackupObjects.backupGlobalInfo == null) { + return; + } + if (reservePrivilege) { + checkAndRestorePrivileges(); + } + if (reserveCatalog) { + checkAndRestoreCatalogs(); + } + if (reserveWorkloadGroup) { + checkAndRestoreWorkloadGroups(); + } + } + private boolean genFileMappingWhenBackupReplicasEqual(PartitionInfo localPartInfo, Partition localPartition, Table localTbl, BackupPartitionInfo backupPartInfo, String partitionName, BackupOlapTableInfo tblInfo, ReplicaAllocation remoteReplicaAlloc) { @@ -2100,6 +2351,12 @@ private Status allTabletCommitted(boolean isReplay) { restoredPartitions.clear(); restoredTbls.clear(); restoredResources.clear(); + restoredCatalogs.clear(); + restoredWorkloadGroups.clear(); + restoredRowPolicies.clear(); + restoredSqlBlockRules.clear(); + restoredUsers.clear(); + restoredRoles.clear(); // release snapshot before clearing snapshotInfos releaseSnapshots(); @@ -2367,6 +2624,86 @@ private void cancelInternal(boolean isReplay) { LOG.info("remove restored resource when cancelled: {}", resource.getName()); resourceMgr.dropResource(resource); } + + // remove restored workloadgroups + for (WorkloadGroup workloadGroup : restoredWorkloadGroups) { + LOG.info("remove restored workloadGroup when cancelled: {}", workloadGroup.getName()); + try { + DropWorkloadGroupStmt stmt = new DropWorkloadGroupStmt(true, workloadGroup.getName()); + Env.getCurrentEnv().getWorkloadGroupMgr().dropWorkloadGroup(stmt); + } catch (Exception e) { + LOG.info("Drop restored workload group " + workloadGroup.getName() + + " failed when cancelled:", e.getMessage()); + } + } + + // remove restored catalogs + for (BackupCatalogMeta catalogMeta : restoredCatalogs) { + LOG.info("remove restored catalog when cancelled: {}", catalogMeta.getCatalogName()); + try { + DropCatalogStmt stmt = new DropCatalogStmt(true, catalogMeta.getCatalogName()); + Env.getCurrentEnv().getCatalogMgr().dropCatalog(stmt); + } catch (Exception e) { + LOG.info("Drop restored catalog " + catalogMeta.getCatalogName() + + " failed when cancelled:", e.getMessage()); + } + } + + // remove restored users + for (User user : restoredUsers) { + LOG.info("remove restored user when cancelled: {}", user.getUserIdentity()); + try { + Env.getCurrentEnv().getAuth().dropUser(user.getUserIdentity(), true); + } catch (Exception e) { + LOG.info("Drop restored user " + user.getUserIdentity() + + " failed when cancelled:", e.getMessage()); + } + } + + // remove restored roles + for (Pair>, Role> pair : restoredRoles) { + Role oldRule = pair.first.first; + Set userIdentities = pair.first.second; + Role newRule = pair.second; + LOG.info("remove restored role when cancelled: {}", newRule.getRoleName()); + + try { + Env.getCurrentEnv().getAuth().dropRole(newRule.getRoleName(), true); + if (oldRule != null) { + Env.getCurrentEnv().getAuth().createRoleInternal(oldRule.getRoleName(), + true, oldRule.getComment(), false); + Env.getCurrentEnv().getAuth().grant(oldRule, userIdentities); + } + } catch (Exception e) { + LOG.info("remove restored role " + newRule.getRoleName() + + " failed when cancelled:", e.getMessage()); + } + } + + // remove restored rowpolicies + for (RowPolicy rowPolicy : restoredRowPolicies) { + LOG.info("remove restored row policy when cancelled: {}", rowPolicy.getPolicyIdent()); + try { + DropPolicyStmt stmt = new DropPolicyStmt(PolicyTypeEnum.ROW, true, rowPolicy.getPolicyName(), + new TableName(rowPolicy.getCtlName(), rowPolicy.getDbName(), rowPolicy.getTableName()), + rowPolicy.getUser(), rowPolicy.getRoleName()); + Env.getCurrentEnv().getPolicyMgr().dropPolicy(stmt); + } catch (Exception e) { + LOG.info("Drop restored row policy " + rowPolicy.getPolicyName() + + " failed when cancelled:", e.getMessage()); + } + } + + // remove restored sqlBlockRules + List ruleNames = Lists.newArrayList(); + for (SqlBlockRule sqlBlockRule : restoredSqlBlockRules) { + ruleNames.add(sqlBlockRule.getName()); + } + try { + Env.getCurrentEnv().getSqlBlockRuleMgr().dropSqlBlockRule(ruleNames, true); + } catch (Exception e) { + LOG.info("Drop restored sql block rule failed when cancelled:", e.getMessage()); + } } if (!isReplay) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRule.java b/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRule.java index 41eecf9492533a..7b37fc72b1b301 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRule.java +++ b/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRule.java @@ -19,6 +19,8 @@ import org.apache.doris.analysis.AlterSqlBlockRuleStmt; import org.apache.doris.analysis.CreateSqlBlockRuleStmt; +import org.apache.doris.common.FeConstants; +import org.apache.doris.common.io.DeepCopy; import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; import org.apache.doris.common.util.SqlBlockUtil; @@ -202,4 +204,13 @@ public void gsonPostProcess() { this.setSqlPattern(Pattern.compile(this.getSql())); } } + + @Override + public SqlBlockRule clone() { + SqlBlockRule copied = DeepCopy.copy(this, SqlBlockRule.class, FeConstants.meta_version); + if (copied == null) { + return null; + } + return copied; + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRuleMgr.java b/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRuleMgr.java index e4e288bc14b2bd..137201d438c5da 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRuleMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRuleMgr.java @@ -46,6 +46,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; @@ -68,6 +69,14 @@ private void writeUnlock() { lock.writeLock().unlock(); } + private void readLock() { + lock.readLock().lock(); + } + + private void readUnlock() { + lock.readLock().unlock(); + } + /** * Judge whether exist rule by ruleName. **/ @@ -267,6 +276,21 @@ public void matchSql(String originSql, String sqlHash, String user) throws Analy } } + public List getAllRulesCopied() { + List rules = Lists.newArrayList(); + readLock(); + try { + // get all rules + for (Entry entry : nameToSqlBlockRuleMap.entrySet()) { + SqlBlockRule rule = entry.getValue(); + rules.add(rule.clone()); + } + } finally { + readUnlock(); + } + return rules; + } + private void matchSql(SqlBlockRule rule, String originSql, String sqlHash) throws AnalysisException { if (rule.getEnable()) { if (StringUtils.isNotEmpty(rule.getSqlHash()) && !SqlBlockUtil.STRING_DEFAULT.equals(rule.getSqlHash()) diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java index f90a2a32fdc3ec..e2b7d43a2de944 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogMgr.java @@ -25,6 +25,7 @@ import org.apache.doris.analysis.ShowCatalogStmt; import org.apache.doris.analysis.ShowCreateCatalogStmt; import org.apache.doris.analysis.UserIdentity; +import org.apache.doris.backup.BackupCatalogMeta; import org.apache.doris.catalog.DatabaseIf; import org.apache.doris.catalog.Env; import org.apache.doris.catalog.EnvFactory; @@ -872,4 +873,24 @@ public Map>> getIdToCata public Set getCopyOfCatalog() { return new HashSet<>(idToCatalog.values()); } + + public List getAllCatalogsCopied() { + List catalogs = Lists.newArrayList(); + readLock(); + try { + // get all rules + for (Map.Entry entry : nameToCatalog.entrySet()) { + CatalogIf catalog = entry.getValue(); + + if (!catalog.isInternalCatalog()) { + catalogs.add(new BackupCatalogMeta(catalog.getName(), catalog.getResource(), + catalog.getProperties(), catalog.getComment())); + } + } + } finally { + readUnlock(); + } + return catalogs; + } + } diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Auth.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Auth.java index 9d6f52d5a51d7d..a0ff3140bc4b48 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Auth.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Auth.java @@ -493,7 +493,7 @@ public void replayCreateUser(PrivInfo privInfo) { } } - private void createUserInternal(UserIdentity userIdent, String roleName, byte[] password, + public void createUserInternal(UserIdentity userIdent, String roleName, byte[] password, boolean ignoreIfExists, PasswordOptions passwordOptions, String comment, String userId, boolean isReplay) throws DdlException { writeLock(); @@ -656,6 +656,41 @@ public void grant(GrantStmt stmt) throws DdlException { } } + // grant + public void grant(Role role, Set userIdentities) throws DdlException { + // table priv + for (Map.Entry entry : role.getTblPatternToPrivs().entrySet()) { + grantInternal(null, role.getRoleName(), entry.getKey(), entry.getValue(), role.getColPrivMap(), + true /* err on non exist */, false /* not replay */); + } + // resource priv + for (Map.Entry entry : role.getResourcePatternToPrivs().entrySet()) { + grantInternal(null, role.getRoleName(), entry.getKey(), entry.getValue(), + true /* err on non exist */, false /* not replay */); + } + for (Map.Entry entry : role.getClusterPatternToPrivs().entrySet()) { + grantInternal(null, role.getRoleName(), entry.getKey(), entry.getValue(), + true /* err on non exist */, false /* not replay */); + } + for (Map.Entry entry : role.getStagePatternToPrivs().entrySet()) { + grantInternal(null, role.getRoleName(), entry.getKey(), entry.getValue(), + true /* err on non exist */, false /* not replay */); + } + // workload group + for (Map.Entry entry : role.getWorkloadGroupPatternToPrivs().entrySet()) { + grantInternal(null, role.getRoleName(), entry.getKey(), entry.getValue(), + true /* err on non exist */, false /* not replay */); + } + //grant role + if (userIdentities != null && userIdentities.size() > 0) { + List roles = Lists.newArrayList(); + roles.add(role.getRoleName()); + for (UserIdentity userIdentity : userIdentities) { + grantInternal(userIdentity, roles, false); + } + } + } + public void replayGrant(PrivInfo privInfo) { try { if (privInfo.getTblPattern() != null) { @@ -1063,7 +1098,7 @@ private void alterRoleInternal(String roleName, String comment, boolean isReplay } } - private void createRoleInternal(String role, boolean ignoreIfExists, String comment, boolean isReplay) + public void createRoleInternal(String role, boolean ignoreIfExists, String comment, boolean isReplay) throws DdlException { Role emptyPrivsRole = new Role(role, comment); writeLock(); @@ -1319,6 +1354,80 @@ public List> getAuthInfo(UserIdentity specifiedUserIdent) { return userAuthInfos; } + public void getAuthInfoCopied(List users, List roles, List userProperties, + Map> roleToUsers, Map propertyMap, + Map policyMap) { + readLock(); + try { + // get all users' auth info + Map> nameToUsers = userManager.getNameToUsers(); + for (List users1 : nameToUsers.values()) { + for (User user : users1) { + if (user.getUserIdentity().isAdminUser() || user.getUserIdentity().isRootUser()) { + continue; + } + users.add(user.clone()); + } + } + + // get all roles + Map nameToRoles = roleManager.getRoles(); + for (Role role : nameToRoles.values()) { + if (Role.isOperator(role.getRoleName()) || Role.isAdmin(role.getRoleName()) + || Role.isDefaultAdmin(role.getRoleName()) || Role.isDefaultRoot(role.getRoleName())) { + continue; + } + roles.add(role.clone()); + } + + // get userProperties + for (Entry entry : propertyMgr.getPropertyMap().entrySet()) { + UserProperty userProperty = entry.getValue(); + String userName = userProperty.getQualifiedUser(); + if (userName.equals(Auth.ROOT_USER) || userName.equals(Auth.ADMIN_USER)) { + continue; + } + userProperties.add(userProperty.clone()); + } + + // get roleToUsers + for (Entry> entry : userRoleManager.getRoleToUsers().entrySet()) { + String roleName = entry.getKey(); + if (Role.isOperator(roleName) || Role.isAdmin(roleName) + || Role.isDefaultAdmin(roleName) || Role.isDefaultRoot(roleName)) { + continue; + } + Set newUserIdentities = Sets.newHashSet(); + for (UserIdentity identity : entry.getValue()) { + newUserIdentities.add(identity); + } + roleToUsers.put(roleName, newUserIdentities); + } + + // get propertyMap + for (Map.Entry entry : propertyMgr.getPropertyMap().entrySet()) { + String userName = entry.getKey(); + if (userName.equals(Auth.ROOT_USER) || userName.equals(Auth.ADMIN_USER)) { + continue; + } + UserProperty userProperty = entry.getValue(); + propertyMap.put(userName, userProperty.clone()); + } + + // get policyMap + for (Map.Entry entry : passwdPolicyManager.getPolicyMap().entrySet()) { + String userName = entry.getKey().getUser(); + if (userName.equals(Auth.ROOT_USER) || userName.equals(Auth.ADMIN_USER)) { + continue; + } + PasswordPolicy policy = entry.getValue(); + policyMap.put(entry.getKey(), policy.clone()); + } + } finally { + readUnlock(); + } + } + private void getUserAuthInfo(List> userAuthInfos, UserIdentity userIdent) { // AuthProcDir.TITLE_NAMES List userAuthInfo = Lists.newArrayList(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PasswordPolicy.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PasswordPolicy.java index 367eec89827274..8e3cc6ae183ffb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PasswordPolicy.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PasswordPolicy.java @@ -21,6 +21,8 @@ import org.apache.doris.analysis.UserIdentity; import org.apache.doris.common.AuthenticationException; import org.apache.doris.common.ErrorCode; +import org.apache.doris.common.FeConstants; +import org.apache.doris.common.io.DeepCopy; import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; import org.apache.doris.common.util.TimeUtils; @@ -56,6 +58,7 @@ public class PasswordPolicy implements Writable { private static final Logger LOG = LogManager.getLogger(PasswordPolicy.class); + @SerializedName(value = "lock") private ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); private static final String EXPIRATION_SECONDS = "password_policy.expiration_seconds"; @@ -73,12 +76,15 @@ public class PasswordPolicy implements Writable { private HistoryPolicy historyPolicy = new HistoryPolicy(); @SerializedName(value = "failedLoginPolicy") private FailedLoginPolicy failedLoginPolicy = new FailedLoginPolicy(); + @SerializedName(value = "qualifiedUser") + private UserIdentity userIdent; - public PasswordPolicy() { + public PasswordPolicy(UserIdentity userIdent) { + this.userIdent = userIdent; } - public static PasswordPolicy createDefault() { - return new PasswordPolicy(); + public static PasswordPolicy createDefault(UserIdentity userIdent) { + return new PasswordPolicy(userIdent); } public void checkAccountLockedAndPasswordExpiration(UserIdentity curUser) throws AuthenticationException { @@ -143,6 +149,14 @@ public ExpirePolicy getExpirePolicy() { return expirePolicy; } + public HistoryPolicy getHistoryPolicy() { + return historyPolicy; + } + + public FailedLoginPolicy getFailedLoginPolicy() { + return failedLoginPolicy; + } + @Override public void write(DataOutput out) throws IOException { Text.writeString(out, GsonUtils.GSON.toJson(this)); @@ -484,4 +498,15 @@ public void getInfo(List> rows) { rows.add(row4); } } + + @Override + public PasswordPolicy clone() { + PasswordPolicy copied = DeepCopy.copy(this, PasswordPolicy.class, FeConstants.meta_version); + if (copied == null) { + LOG.warn("failed to clone PasswordPolicy: " + toString()); + return null; + } + return copied; + } + } diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PasswordPolicyManager.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PasswordPolicyManager.java index a8eb45dbd6d728..229133333dc075 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PasswordPolicyManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/PasswordPolicyManager.java @@ -49,7 +49,7 @@ private PasswordPolicy getOrCreatePolicy(UserIdentity userIdent) { // When upgrading from old Doris version(before v1.2), there may be not entry in this manager. // create and return a default one. // The following logic is just to handle some concurrent issue. - PasswordPolicy policy = PasswordPolicy.createDefault(); + PasswordPolicy policy = PasswordPolicy.createDefault(userIdent); passwordPolicy = policyMap.putIfAbsent(userIdent, policy); if (passwordPolicy == null) { passwordPolicy = policy; @@ -131,4 +131,7 @@ public static PasswordPolicyManager read(DataInput in) throws IOException { return GsonUtils.GSON.fromJson(Text.readString(in), PasswordPolicyManager.class); } + public Map getPolicyMap() { + return policyMap; + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Role.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Role.java index 0054579062fcc5..f4e314c4457ed1 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Role.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/Role.java @@ -27,8 +27,10 @@ import org.apache.doris.common.AnalysisException; import org.apache.doris.common.Config; import org.apache.doris.common.DdlException; +import org.apache.doris.common.FeConstants; import org.apache.doris.common.FeMetaVersion; import org.apache.doris.common.PatternMatcherException; +import org.apache.doris.common.io.DeepCopy; import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; import org.apache.doris.mysql.privilege.Auth.PrivLevel; @@ -144,8 +146,10 @@ public Role(String roleName, TablePattern tablePattern, PrivBitSet privs) throws public Role(String roleName, TablePattern tablePattern, PrivBitSet privs, Map> colPrivileges) throws DdlException { this.roleName = roleName; - this.tblPatternToPrivs.put(tablePattern, privs); - grantPrivs(tablePattern, privs.copy()); + if (tablePattern != null) { + this.tblPatternToPrivs.put(tablePattern, privs); + grantPrivs(tablePattern, privs.copy()); + } grantCols(colPrivileges); } @@ -204,6 +208,22 @@ public static boolean isDefaultRoleName(String roleName) { return roleName.startsWith(RoleManager.DEFAULT_ROLE_PREFIX); } + public static boolean isOperator(String roleName) { + return roleName.equals(OPERATOR_ROLE); + } + + public static boolean isAdmin(String roleName) { + return roleName.equals(ADMIN_ROLE); + } + + public static boolean isDefaultRoot(String roleName) { + return roleName.equals(RoleManager.DEFAULT_ROLE_PREFIX + "root@%"); + } + + public static boolean isDefaultAdmin(String roleName) { + return roleName.equals(RoleManager.DEFAULT_ROLE_PREFIX + "admin@%"); + } + public String getRoleName() { return roleName; } @@ -232,6 +252,16 @@ public Map getWorkloadGroupPatternToPrivs() { return workloadGroupPatternToPrivs; } + @Override + public Role clone() { + Role copied = DeepCopy.copy(this, Role.class, FeConstants.meta_version); + if (copied == null) { + LOG.warn("failed to clone user: " + getRoleName()); + return null; + } + return copied; + } + // merge role not check role name. public void mergeNotCheck(Role other) throws DdlException { for (Map.Entry entry : other.getTblPatternToPrivs().entrySet()) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/User.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/User.java index 589e5b0ea3c666..1725bfaf5600e9 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/User.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/User.java @@ -19,8 +19,10 @@ import org.apache.doris.analysis.UserIdentity; import org.apache.doris.common.CaseSensibility; +import org.apache.doris.common.FeConstants; import org.apache.doris.common.PatternMatcher; import org.apache.doris.common.PatternMatcherException; +import org.apache.doris.common.io.DeepCopy; import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; import org.apache.doris.persist.gson.GsonPostProcessable; @@ -156,6 +158,16 @@ public int compareTo(@NotNull User o) { return -userIdentity.getHost().compareTo(o.userIdentity.getHost()); } + @Override + public User clone() { + User copied = DeepCopy.copy(this, User.class, FeConstants.meta_version); + if (copied == null) { + LOG.warn("failed to clone user: " + getUserIdentity().getUser()); + return null; + } + return copied; + } + @Override public String toString() { StringBuilder sb = new StringBuilder(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java index 08f64cc006e3d9..6ee6f138e3ee9c 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserProperty.java @@ -26,10 +26,12 @@ import org.apache.doris.common.AnalysisException; import org.apache.doris.common.Config; import org.apache.doris.common.DdlException; +import org.apache.doris.common.FeConstants; import org.apache.doris.common.FeMetaVersion; import org.apache.doris.common.LoadException; import org.apache.doris.common.Pair; import org.apache.doris.common.UserException; +import org.apache.doris.common.io.DeepCopy; import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; import org.apache.doris.load.DppConfig; @@ -689,4 +691,14 @@ public void readFields(DataInput in) throws IOException { this.commonProperties = CommonUserProperties.read(in); } } + + @Override + public UserProperty clone() { + UserProperty copied = DeepCopy.copy(this, UserProperty.class, FeConstants.meta_version); + if (copied == null) { + LOG.warn("failed to clone user: " + getQualifiedUser()); + return null; + } + return copied; + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserPropertyMgr.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserPropertyMgr.java index b40bb92fbfae7f..f3a066ca5b28bf 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserPropertyMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserPropertyMgr.java @@ -244,6 +244,10 @@ public String getWorkloadGroup(String qualifiedUser) { return existProperty.getWorkloadGroup(); } + public Map getPropertyMap() { + return propertyMap; + } + public Pair isWorkloadGroupInUse(String groupName) { for (Entry entry : propertyMap.entrySet()) { if (entry.getValue().getWorkloadGroup().equals(groupName)) { diff --git a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserRoleManager.java b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserRoleManager.java index b9783982e3b8c7..25089a88eb0b4a 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserRoleManager.java +++ b/fe/fe-core/src/main/java/org/apache/doris/mysql/privilege/UserRoleManager.java @@ -172,6 +172,10 @@ private void removeClusterPrefix() { userToRoles = newUserToRoles; } + public Map> getRoleToUsers() { + return roleToUsers; + } + @Override public void gsonPostProcess() throws IOException { removeClusterPrefix(); diff --git a/fe/fe-core/src/main/java/org/apache/doris/policy/PolicyMgr.java b/fe/fe-core/src/main/java/org/apache/doris/policy/PolicyMgr.java index 8e639b36a25427..120aa1a02d3c51 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/policy/PolicyMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/policy/PolicyMgr.java @@ -23,6 +23,7 @@ import org.apache.doris.analysis.DropPolicyStmt; import org.apache.doris.analysis.ShowPolicyStmt; import org.apache.doris.analysis.ShowStoragePolicyUsingStmt; +import org.apache.doris.analysis.TableName; import org.apache.doris.analysis.UserIdentity; import org.apache.doris.catalog.Database; import org.apache.doris.catalog.Env; @@ -37,6 +38,7 @@ import org.apache.doris.common.io.Writable; import org.apache.doris.datasource.InternalCatalog; import org.apache.doris.persist.gson.GsonUtils; +import org.apache.doris.qe.OriginStatement; import org.apache.doris.qe.ShowResultSet; import org.apache.doris.task.AgentBatchTask; import org.apache.doris.task.AgentTaskExecutor; @@ -117,6 +119,18 @@ public void createDefaultStoragePolicy() { LOG.info("Create default storage success."); } + /** + * Create policy through RowPolicy. + **/ + public void createRowPolicy(RowPolicy rowPolicy) throws UserException { + CreatePolicyStmt stmt = new CreatePolicyStmt(rowPolicy.getType(), true, rowPolicy.getPolicyName(), + new TableName(rowPolicy.getCtlName(), rowPolicy.getDbName(), rowPolicy.getTableName()), + rowPolicy.getFilterType().name(), rowPolicy.getUser(), + rowPolicy.getRoleName(), rowPolicy.getWherePredicate()); + stmt.setOrigStmt(new OriginStatement(rowPolicy.getOriginStmt(), 0)); + createPolicy(stmt); + } + /** * Create policy through stmt. **/ diff --git a/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroup.java b/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroup.java index 2bd4a394d84dac..46fc8f76a7c262 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroup.java +++ b/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroup.java @@ -21,8 +21,10 @@ import org.apache.doris.common.AnalysisException; import org.apache.doris.common.Config; import org.apache.doris.common.DdlException; +import org.apache.doris.common.FeConstants; import org.apache.doris.common.FeNameFormat; import org.apache.doris.common.Pair; +import org.apache.doris.common.io.DeepCopy; import org.apache.doris.common.io.Text; import org.apache.doris.common.io.Writable; import org.apache.doris.common.proc.BaseProcResult; @@ -731,4 +733,14 @@ public void gsonPostProcess() throws IOException { this.resetQueueProperty(this.properties); } + + @Override + public WorkloadGroup clone() { + WorkloadGroup copied = DeepCopy.copy(this, WorkloadGroup.class, FeConstants.meta_version); + if (copied == null) { + LOG.warn("failed to clone workload: " + getName()); + return null; + } + return copied; + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroupMgr.java b/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroupMgr.java index 94695cc8d5b8ac..f7429a40c85bbd 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroupMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/resource/workloadgroup/WorkloadGroupMgr.java @@ -693,4 +693,22 @@ public ProcResult fetchResult(UserIdentity currentUserIdentity) { return result; } } + + public List getAllWorkloadGroupsCopied() { + List workloadGroups = Lists.newArrayList(); + readLock(); + try { + // get all workload groups + for (Map.Entry entry : nameToWorkloadGroup.entrySet()) { + if (DEFAULT_GROUP_NAME.equals(entry.getKey()) || INTERNAL_GROUP_NAME.equals(entry.getKey())) { + continue; + } + WorkloadGroup workloadGroup = entry.getValue(); + workloadGroups.add(workloadGroup.clone()); + } + } finally { + readUnlock(); + } + return workloadGroups; + } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/backup/BackupHandlerTest.java b/fe/fe-core/src/test/java/org/apache/doris/backup/BackupHandlerTest.java index ba564599029994..e2e042902c7eee 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/backup/BackupHandlerTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/backup/BackupHandlerTest.java @@ -229,7 +229,7 @@ public Status getSnapshotInfoFile(String label, String backupTimestamp, List brokerAddrs) { tableRefs.add(new TableRef( new TableName(InternalCatalog.INTERNAL_CATALOG_NAME, UnitTestUtil.DB_NAME, UnitTestUtil.TABLE_NAME), null)); - job = new BackupJob("label", dbId, UnitTestUtil.DB_NAME, tableRefs, 13600 * 1000, BackupStmt.BackupContent.ALL, + job = new BackupJob("label", dbId, UnitTestUtil.DB_NAME, tableRefs, 13600 * 1000, + new BackupStmt(new LabelName(UnitTestUtil.DB_NAME, "label"), "repo", null, null), env, repo.getId(), 0); } @@ -350,7 +352,8 @@ public void testRunAbnormal() { tableRefs.add( new TableRef(new TableName(InternalCatalog.INTERNAL_CATALOG_NAME, UnitTestUtil.DB_NAME, "unknown_tbl"), null)); - job = new BackupJob("label", dbId, UnitTestUtil.DB_NAME, tableRefs, 13600 * 1000, BackupStmt.BackupContent.ALL, + job = new BackupJob("label", dbId, UnitTestUtil.DB_NAME, tableRefs, 13600 * 1000, + new BackupStmt(new LabelName(UnitTestUtil.DB_NAME, "label"), "repo", null, null), env, repo.getId(), 0); job.run(); Assert.assertEquals(Status.ErrCode.NOT_FOUND, job.getStatus().getErrCode()); @@ -367,8 +370,9 @@ public void testSerialization() throws IOException, AnalysisException { tableRefs.add( new TableRef(new TableName(InternalCatalog.INTERNAL_CATALOG_NAME, UnitTestUtil.DB_NAME, UnitTestUtil.TABLE_NAME), null)); - job = new BackupJob("label", dbId, UnitTestUtil.DB_NAME, tableRefs, 13600 * 1000, BackupStmt.BackupContent.ALL, - env, repo.getId(), 123); + job = new BackupJob("label", dbId, UnitTestUtil.DB_NAME, tableRefs, 13600 * 1000, + new BackupStmt(new LabelName(UnitTestUtil.DB_NAME, "label"), "repo", null, null), + env, repo.getId(), 123); job.write(out); out.flush(); diff --git a/fe/fe-core/src/test/java/org/apache/doris/backup/RestoreJobTest.java b/fe/fe-core/src/test/java/org/apache/doris/backup/RestoreJobTest.java index dadfdb632e394d..9320c574c5c7f4 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/backup/RestoreJobTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/backup/RestoreJobTest.java @@ -256,8 +256,8 @@ boolean await(long timeout, TimeUnit unit) { db.unregisterTable(expectedRestoreTbl.getName()); job = new RestoreJob(label, "2018-01-01 01:01:01", db.getId(), db.getFullName(), jobInfo, false, - new ReplicaAllocation((short) 3), 100000, -1, false, false, false, false, false, false, - env, repo.getId()); + new ReplicaAllocation((short) 3), 100000, -1, false, false, false, false, false, false, false, + false, false, env, repo.getId()); List tbls = Lists.newArrayList(); List resources = Lists.newArrayList(); diff --git a/regression-test/data/backup_restore/test_backup_restore_priv.out b/regression-test/data/backup_restore/test_backup_restore_priv.out new file mode 100644 index 00000000000000..0e4ccf121bf3ab --- /dev/null +++ b/regression-test/data/backup_restore/test_backup_restore_priv.out @@ -0,0 +1,36 @@ +-- This file is automatically generated. You should know what you did if you want to edit this +-- !order_before_restore -- +% user1 N N N N N N N N N N N N 0 0 0 100 864000 NO_RESTRICTION 3 86400 0 +% user2 N N N N N N N N N N N N 0 0 0 100 864000 NO_RESTRICTION 3 86400 0 +% user3 N N N N N N N N N N N N 0 0 0 100 864000 NO_RESTRICTION 3 86400 0 + +-- !order_after_restore1 -- +% user1 N N N N N N N N N N N N 0 0 0 100 864000 NO_RESTRICTION 3 86400 0 +% user2 N N N N N N N N N N N N 0 0 0 100 864000 NO_RESTRICTION 3 86400 0 +% user3 N N N N N N N N N N N N 0 0 0 100 864000 NO_RESTRICTION 3 86400 0 + +-- !order_after_restore2 -- +% user1 N N N N N N N N N N N N 0 0 0 100 864000 NO_RESTRICTION 3 86400 0 +% user2 N N N N N N N N N N N N 0 0 0 100 864000 NO_RESTRICTION 3 86400 0 +% user3 N N N N N N N N N N N N 0 0 0 100 864000 NO_RESTRICTION 3 86400 0 + +-- !order_after_restore3 -- +% user1 N N N N N N N N N N N N 0 0 0 100 864000 NO_RESTRICTION 3 86400 0 +% user2 N N N N N N N N N N N N 0 0 0 100 864000 NO_RESTRICTION 3 86400 0 +% user3 N N N N N N N N N N N N 0 0 0 100 864000 NO_RESTRICTION 3 86400 0 + +-- !order_after_restore4 -- + +-- !order_after_restore5 -- + +-- !order_after_restore6 -- +% user1 N N N N N N N N N N N N 0 0 0 100 864000 NO_RESTRICTION 3 86400 0 +% user2 N N N N N N N N N N N N 0 0 0 100 864000 NO_RESTRICTION 3 86400 0 +% user3 N N N N N N N N N N N N 0 0 0 100 864000 NO_RESTRICTION 3 86400 0 + +-- !order_after_restore7 -- +% user1 N N N N N N N N N N N N 0 0 0 100 864000 NO_RESTRICTION 3 86400 0 + +-- !order_after_restore8 -- +% user1 N N N N N N N N N N N N 0 0 0 100 864000 NO_RESTRICTION 3 86400 0 + diff --git a/regression-test/suites/backup_restore/test_backup_restore_priv.groovy b/regression-test/suites/backup_restore/test_backup_restore_priv.groovy new file mode 100644 index 00000000000000..e0fa79c6f958b7 --- /dev/null +++ b/regression-test/suites/backup_restore/test_backup_restore_priv.groovy @@ -0,0 +1,694 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_backup_restore_priv", "backup_restore") { + String suiteName = "test_backup_restore_priv" + String repoName = "${suiteName}_repo" + String dbName = "${suiteName}_db" + String tableName = "${suiteName}_table" + String tableName1 = "${suiteName}_table_1" + String tableName2 = "${suiteName}_table_2" + String snapshotName = "${suiteName}_snapshot" + def tokens = context.config.jdbcUrl.split('/') + def url=tokens[0] + "//" + tokens[2] + "/" + dbName + "?" + + def syncer = getSyncer() + syncer.createS3Repository(repoName) + + sql "DROP DATABASE IF EXISTS ${dbName}" + sql "CREATE DATABASE IF NOT EXISTS ${dbName}" + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + sql """ + CREATE TABLE if NOT EXISTS ${dbName}.${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + sql """ + CREATE TABLE if NOT EXISTS ${dbName}.${tableName1} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + sql """ + CREATE TABLE if NOT EXISTS ${dbName}.${tableName2} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + + def insert_num = 5 + for (int i = 0; i < insert_num; ++i) { + sql """ + INSERT INTO ${dbName}.${tableName} VALUES (${i}, ${i}) + """ + } + + res = sql "SELECT * FROM ${dbName}.${tableName}" + assertEquals(res.size(), insert_num) + + sql "drop user if exists user1;" + sql "drop user if exists user2;" + sql "drop user if exists user3;" + + sql "drop role if exists role_select;" + sql "drop role if exists role_load;" + sql "drop row policy if exists test_row_policy_1 on ${dbName}.${tableName};" + sql "drop sql_block_rule if exists test_block_rule;" + sql "drop catalog if exists mysql;" + sql "drop workload group if exists wg1;" + sql "drop workload group if exists wg2;" + + + sql "CREATE USER 'user1' IDENTIFIED BY '12345' PASSWORD_EXPIRE INTERVAL 10 DAY FAILED_LOGIN_ATTEMPTS 3 PASSWORD_LOCK_TIME 1 DAY;" + sql "CREATE USER 'user2' IDENTIFIED BY '12345' PASSWORD_EXPIRE INTERVAL 10 DAY FAILED_LOGIN_ATTEMPTS 3 PASSWORD_LOCK_TIME 1 DAY;" + sql "CREATE USER 'user3' IDENTIFIED BY '12345' PASSWORD_EXPIRE INTERVAL 10 DAY FAILED_LOGIN_ATTEMPTS 3 PASSWORD_LOCK_TIME 1 DAY;" + + sql "create role role_select;" + sql "GRANT Select_priv ON *.* TO ROLE 'role_select';" + + sql "create role role_load;" + sql "GRANT Load_priv ON *.* TO ROLE 'role_load';" + + sql "grant 'role_select', 'role_load' to 'user1'@'%';" + sql "grant 'role_select' to 'user2'@'%';" + sql "grant 'role_load' to 'user3'@'%';" + sql "grant Create_priv on *.* to user3" + sql "GRANT Select_priv(id1) ON ${dbName}.${tableName1} TO user3;" + sql "GRANT Select_priv(test) ON ${dbName}.${tableName2} TO role 'role_select';" + + + sql "CREATE ROW POLICY test_row_policy_1 ON ${dbName}.${tableName} AS RESTRICTIVE TO user1 USING (id = 1);" + + sql """ + CREATE SQL_BLOCK_RULE test_block_rule + PROPERTIES( + "sql"="select \\* from order_analysis", + "global"="false", + "enable"="true" + ); + """ + + String jdbcUrl = context.config.jdbcUrl + "&sessionVariables=return_object_data_as_binary=true" + String jdbcUser = context.config.jdbcUser + String jdbcPassword = context.config.jdbcPassword + String s3_endpoint = getS3Endpoint() + String bucket = getS3BucketName() + String driver_url = "https://${bucket}.${s3_endpoint}/regression/jdbc_driver/mysql-connector-java-8.0.25.jar" + //String driver_url = "mysql-connector-j-8.0.31.jar" + + sql """ + CREATE CATALOG mysql PROPERTIES ( + "type"="jdbc", + "user" = "${jdbcUser}", + "password"="${jdbcPassword}", + "jdbc_url" = "${jdbcUrl}", + "driver_url" = "${driver_url}", + "driver_class" = "com.mysql.cj.jdbc.Driver" + ); + """ + + sql """ create workload group wg1 properties('tag'='cn1', "memory_limit"="45%"); """ + sql """ create workload group wg2 properties ("max_concurrency"="5","max_queue_size" = "50"); """ + + def commonAuth = { result, UserIdentity, Password, Roles, CatalogPrivs -> + assertEquals(UserIdentity as String, result.UserIdentity[0] as String) + assertEquals(Password as String, result.Password[0] as String) + assertEquals(Roles as String, result.Roles[0] as String) + assertEquals(CatalogPrivs as String, result.CatalogPrivs[0] as String) + } + + def showRoles = { name -> + def ret = sql_return_maparray """show roles""" + ret.find { + def matcher = it.Name =~ /.*${name}$/ + matcher.matches() + } + } + + def showRowPolicy = { name -> + def ret = sql_return_maparray """show row policy""" + ret.find { + def matcher = it.PolicyName =~ /.*${name}$/ + matcher.matches() + } + } + + def showSqlBlockRule = { name -> + def ret = sql_return_maparray """show SQL_BLOCK_RULE""" + ret.find { + def matcher = it.Name =~ /.*${name}$/ + matcher.matches() + } + } + + def showCatalogs = { name -> + def ret = sql_return_maparray """show catalogs""" + ret.find { + def matcher = it.CatalogName =~ /.*${name}$/ + matcher.matches() + } + } + + def showWorkloadGroups = { name -> + def ret = sql_return_maparray """show workload groups""" + ret.find { + def matcher = it.Name =~ /.*${name}$/ + matcher.matches() + } + } + + def checkPrivileges = () -> { + def result = sql_return_maparray """show grants for user1""" + log.info(result as String) + commonAuth result, "'user1'@'%'", "Yes", "role_select,role_load", "internal: Select_priv,Load_priv" + assertEquals("internal.${dbName}.${tableName2}: Select_priv[test]" as String, result.ColPrivs[0] as String) + + result = sql_return_maparray """show grants for user2""" + log.info(result as String) + commonAuth result, "'user2'@'%'", "Yes", "role_select", "internal: Select_priv" + assertEquals("internal.${dbName}.${tableName2}: Select_priv[test]" as String, result.ColPrivs[0] as String) + + result = sql_return_maparray """show grants for user3""" + log.info(result as String) + commonAuth result, "'user3'@'%'", "Yes", "role_load", "internal: Load_priv,Create_priv" + assertEquals("internal.${dbName}.${tableName1}: Select_priv[id1]" as String, result.ColPrivs[0] as String) + + result = showRoles.call("role_select") + log.info(result as String) + //assertNull(result.CloudClusterPrivs) + assertEquals(result.Users, "'user1'@'%', 'user2'@'%'") + assertEquals(result.CatalogPrivs, "internal.*.*: Select_priv") + + result = showRoles.call("role_load") + log.info(result as String) + assertEquals(result.Users, "'user1'@'%', 'user3'@'%'") + assertEquals(result.CatalogPrivs, "internal.*.*: Load_priv") + + result = showRowPolicy.call("test_row_policy_1") + log.info(result as String) + assertNotNull(result) + + + // check row policy valid + connect(user="user1", password="12345", url=url) { + try { + res = sql "SELECT * FROM ${dbName}.${tableName}" + assertEquals(res.size(), 1) + } catch (Exception e) { + log.info(e.getMessage()) + } + } + + result = showSqlBlockRule.call("test_block_rule") + log.info(result as String) + assertEquals(result.Name, "test_block_rule") + } + + def checkNonPrivileges = () -> { + //except 'password_policy.password_creation_time' + result = sql_return_maparray "select * except(`password_policy.password_creation_time`) from mysql.user where User in ('user1', 'user2', 'user3') order by host, user;" + assertEquals(result.size(), 0) + + result = showRoles.call("role_select") + log.info(result as String) + assertNull(result) + + result = showRoles.call("role_load") + log.info(result as String) + assertNull(result) + + result = showRowPolicy.call("test_row_policy_1") + log.info(result as String) + assertNull(result) + + result = showSqlBlockRule.call("test_block_rule") + log.info(result as String) + assertNull(result) + } + + + + def checkCatalogs = () -> { + result = showCatalogs.call("mysql") + log.info(result as String) + assertEquals(result.CatalogName, "mysql") + assertEquals(result.Type, "jdbc") + } + + def checkSqlBlockRules = () -> { + result = showSqlBlockRule.call("test_block_rule") + log.info(result as String) + assertEquals(result.Name, "test_block_rule") + } + + def checkWorkloadGroups = () -> { + result = showWorkloadGroups.call("wg1") + log.info(result as String) + assertNotNull(result) + result = showWorkloadGroups.call("wg2") + log.info(result as String) + assertNotNull(result) + } + + def checkNonCatalogs = () -> { + result = showCatalogs.call("mysql") + log.info(result as String) + assertNull(result) + } + + def checkNonWorkloadGroups = () -> { + result = showWorkloadGroups.call("wg1") + log.info(result as String) + assertNull(result) + result = showWorkloadGroups.call("wg2") + log.info(result as String) + assertNull(result) + } + + checkPrivileges(); + qt_order_before_restore "select * except(`password_policy.password_creation_time`) from mysql.user where User in ('user1', 'user2', 'user3') order by host, user;" + checkCatalogs(); + checkWorkloadGroups(); + + + sql """ + BACKUP GLOBAL SNAPSHOT ${snapshotName} + TO `${repoName}` + PROPERTIES ( + "backup_privilege"="true", + "backup_catalog"="true", + "backup_workload_group"="true" + ) + """ + + syncer.waitSnapshotFinish("__internal_schema") + + sql "drop user if exists user1;" + sql "drop user if exists user2;" + sql "drop user if exists user3;" + + sql "drop role if exists role_select;" + sql "drop role if exists role_load;" + sql "drop row policy if exists test_row_policy_1 on ${dbName}.${tableName};" + sql "drop sql_block_rule if exists test_block_rule;" + sql "DROP TABLE IF EXISTS ${dbName}.${tableName}" + sql "drop catalog if exists mysql;" + sql "drop workload group if exists wg1;" + sql "drop workload group if exists wg2;" + + checkNonPrivileges(); + checkNonCatalogs(); + checkNonWorkloadGroups(); + + def snapshot = syncer.getSnapshotTimestamp(repoName, snapshotName) + assertTrue(snapshot != null) + + logger.info(""" ====================================== 1 "reserve_privilege"="true", "reserve_catalog"="true","reserve_workload_group"="true" ==================================== """) + + sql """ + RESTORE GLOBAL SNAPSHOT ${snapshotName} + FROM `${repoName}` + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "reserve_replica" = "true", + "reserve_privilege"="true", + "reserve_catalog"="true", + "reserve_workload_group"="true" + ) + """ + + syncer.waitAllRestoreFinish("__internal_schema") + + test { + sql "SELECT * FROM ${dbName}.${tableName}" + exception "does not exist in database" + } + + checkPrivileges(); + qt_order_after_restore1 "select * except(`password_policy.password_creation_time`) from mysql.user where User in ('user1', 'user2', 'user3') order by host, user;" + checkCatalogs(); + checkWorkloadGroups(); + + logger.info(" ====================================== 2 without reserve ==================================== ") + sql "drop user if exists user1;" + sql "drop user if exists user2;" + sql "drop user if exists user3;" + + sql "drop role if exists role_select;" + sql "drop role if exists role_load;" + sql "drop row policy if exists test_row_policy_1 on ${dbName}.${tableName};" + sql "drop sql_block_rule if exists test_block_rule;" + sql "drop catalog if exists mysql;" + sql "drop workload group if exists wg1;" + sql "drop workload group if exists wg2;" + + sql """ + RESTORE GLOBAL SNAPSHOT ${snapshotName} + FROM `${repoName}` + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "reserve_replica" = "true" + ) + """ + + syncer.waitAllRestoreFinish("__internal_schema") + + test { + sql "SELECT * FROM ${dbName}.${tableName}" + exception "does not exist in database" + } + + checkPrivileges(); + qt_order_after_restore2 "select * except(`password_policy.password_creation_time`) from mysql.user where User in ('user1', 'user2', 'user3') order by host, user;" + checkCatalogs(); + checkWorkloadGroups(); + + + logger.info(""" ====================================== 3 "reserve_privilege"="true" ==================================== """) + sql "drop user if exists user1;" + sql "drop user if exists user2;" + sql "drop user if exists user3;" + + sql "drop role if exists role_select;" + sql "drop role if exists role_load;" + sql "drop row policy if exists test_row_policy_1 on ${dbName}.${tableName};" + sql "drop sql_block_rule if exists test_block_rule;" + sql "drop catalog if exists mysql;" + sql "drop workload group if exists wg1;" + sql "drop workload group if exists wg2;" + + sql """ + RESTORE GLOBAL SNAPSHOT ${snapshotName} + FROM `${repoName}` + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "reserve_replica" = "true", + "reserve_privilege"="true" + ) + """ + + syncer.waitAllRestoreFinish("__internal_schema") + + test { + sql "SELECT * FROM ${dbName}.${tableName}" + exception "does not exist in database" + } + + checkPrivileges(); + qt_order_after_restore3 "select * except(`password_policy.password_creation_time`) from mysql.user where User in ('user1', 'user2', 'user3') order by host, user;" + checkNonCatalogs(); + checkNonWorkloadGroups(); + + + logger.info(""" ====================================== 4 "reserve_catalog"="true" ==================================== """) + sql "drop user if exists user1;" + sql "drop user if exists user2;" + sql "drop user if exists user3;" + + sql "drop role if exists role_select;" + sql "drop role if exists role_load;" + sql "drop row policy if exists test_row_policy_1 on ${dbName}.${tableName};" + sql "drop sql_block_rule if exists test_block_rule;" + sql "drop catalog if exists mysql;" + sql "drop workload group if exists wg1;" + sql "drop workload group if exists wg2;" + + sql """ + RESTORE GLOBAL SNAPSHOT ${snapshotName} + FROM `${repoName}` + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "reserve_replica" = "true", + "reserve_catalog"="true" + ) + """ + + syncer.waitAllRestoreFinish("__internal_schema") + + test { + sql "SELECT * FROM ${dbName}.${tableName}" + exception "does not exist in database" + } + + checkNonPrivileges(); + qt_order_after_restore4 "select * except(`password_policy.password_creation_time`) from mysql.user where User in ('user1', 'user2', 'user3') order by host, user;" + checkCatalogs(); + checkNonWorkloadGroups(); + + + logger.info(""" ====================================== 5 "reserve_workload_group"="true" ==================================== """) + sql "drop user if exists user1;" + sql "drop user if exists user2;" + sql "drop user if exists user3;" + + sql "drop role if exists role_select;" + sql "drop role if exists role_load;" + sql "drop row policy if exists test_row_policy_1 on ${dbName}.${tableName};" + sql "drop sql_block_rule if exists test_block_rule;" + sql "drop catalog if exists mysql;" + sql "drop workload group if exists wg1;" + sql "drop workload group if exists wg2;" + + sql """ + RESTORE GLOBAL SNAPSHOT ${snapshotName} + FROM `${repoName}` + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "reserve_replica" = "true", + "reserve_workload_group"="true" + ) + """ + + syncer.waitAllRestoreFinish("__internal_schema") + + test { + sql "SELECT * FROM ${dbName}.${tableName}" + exception "does not exist in database" + } + + checkNonPrivileges(); + qt_order_after_restore5 "select * except(`password_policy.password_creation_time`) from mysql.user where User in ('user1', 'user2', 'user3') order by host, user;" + checkNonCatalogs(); + checkWorkloadGroups(); + + + logger.info(""" ====================================== 6 "reserve_privilege"="true","reserve_workload_group"="true" ==================================== """) + + sql "drop user if exists user1;" + sql "drop user if exists user2;" + sql "drop user if exists user3;" + + sql "drop role if exists role_select;" + sql "drop role if exists role_load;" + sql "drop row policy if exists test_row_policy_1 on ${dbName}.${tableName};" + sql "drop sql_block_rule if exists test_block_rule;" + sql "drop catalog if exists mysql;" + sql "drop workload group if exists wg1;" + sql "drop workload group if exists wg2;" + + sql """ + RESTORE GLOBAL SNAPSHOT ${snapshotName} + FROM `${repoName}` + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "reserve_replica" = "true", + "reserve_privilege"="true", + "reserve_workload_group"="true" + ) + """ + + syncer.waitAllRestoreFinish("__internal_schema") + + test { + sql "SELECT * FROM ${dbName}.${tableName}" + exception "does not exist in database" + } + + checkPrivileges(); + qt_order_after_restore6 "select * except(`password_policy.password_creation_time`) from mysql.user where User in ('user1', 'user2', 'user3') order by host, user;" + checkNonCatalogs(); + checkWorkloadGroups(); + + logger.info(""" ====================================== 7 restore fail check cancel: wg2 exist ==================================== """) + + sql "drop user if exists user1;" + sql "drop user if exists user2;" + sql "drop user if exists user3;" + + sql "drop role if exists role_select;" + sql "drop role if exists role_load;" + sql "drop row policy if exists test_row_policy_1 on ${dbName}.${tableName};" + sql "drop sql_block_rule if exists test_block_rule;" + sql "drop catalog if exists mysql;" + sql "drop workload group if exists wg1;" + sql "drop workload group if exists wg2;" + + sql "create role role_select;" + sql "GRANT Select_priv, Load_priv ON *.* TO ROLE 'role_select';" + sql "create user user1;" + sql "grant select_priv on *.* to user1;" + sql "GRANT Select_priv(id1) ON ${dbName}.${tableName1} TO user1;" + sql """ create workload group wg2 properties ("max_concurrency"="5","max_queue_size" = "50"); """ + + + sql """ + RESTORE GLOBAL SNAPSHOT ${snapshotName} + FROM `${repoName}` + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "reserve_replica" = "true", + "reserve_privilege"="true", + "reserve_workload_group"="true" + ) + """ + + syncer.waitAllRestoreFinish("__internal_schema") + + // restore failed + records = sql_return_maparray "SHOW global restore" + row = records[records.size() - 1] + assertTrue(row.Status.contains("workload group wg2 already exist")) + + test { + sql "SELECT * FROM ${dbName}.${tableName}" + exception "does not exist in database" + } + + qt_order_after_restore7 "select * except(`password_policy.password_creation_time`) from mysql.user where User in ('user1', 'user2', 'user3') order by host, user;" + + result = sql_return_maparray """show grants for user1""" + log.info(result as String) + commonAuth result, "'user1'@'%'", "No", "", "internal: Select_priv" + + result = showRoles.call("role_select") + log.info(result as String) + assertEquals(result.Users, "") + assertEquals(result.CatalogPrivs, "internal.*.*: Select_priv,Load_priv") + + logger.info(""" ====================================== 8 restore fail check cancel : ${dbName}.${tableName1} not exist ==================================== """) + + sql "drop user if exists user1;" + sql "drop user if exists user2;" + sql "drop user if exists user3;" + + sql "drop role if exists role_select;" + sql "drop role if exists role_load;" + sql "drop row policy if exists test_row_policy_1 on ${dbName}.${tableName};" + sql "drop sql_block_rule if exists test_block_rule;" + sql "drop catalog if exists mysql;" + sql "drop workload group if exists wg1;" + sql "drop workload group if exists wg2;" + sql "DROP TABLE IF EXISTS ${dbName}.${tableName1}" + + + sql """ + CREATE TABLE if NOT EXISTS ${dbName}.${tableName} + ( + `test` INT, + `id` INT + ) + ENGINE=OLAP + UNIQUE KEY(`test`, `id`) + DISTRIBUTED BY HASH(id) BUCKETS 1 + PROPERTIES ( + "replication_allocation" = "tag.location.default: 1" + ) + """ + sql "create role role_select;" + sql "GRANT Select_priv, Load_priv ON *.* TO ROLE 'role_select';" + sql "create user user1;" + sql "grant select_priv on *.* to user1;" + sql "GRANT Select_priv(id1) ON ${dbName}.${tableName} TO user1;" + sql """ create workload group wg2 properties ("max_concurrency"="5","max_queue_size" = "50"); """ + + + sql """ + RESTORE GLOBAL SNAPSHOT ${snapshotName} + FROM `${repoName}` + PROPERTIES + ( + "backup_timestamp" = "${snapshot}", + "reserve_replica" = "true", + "reserve_privilege"="true" + ) + """ + + syncer.waitAllRestoreFinish("__internal_schema") + + // restore failed + records = sql_return_maparray "SHOW global restore" + row = records[records.size() - 1] + assertTrue(row.Status.contains("table:${tableName1} does not exist")) + + qt_order_after_restore8 "select * except(`password_policy.password_creation_time`) from mysql.user where User in ('user1', 'user2', 'user3') order by host, user;" + + result = sql_return_maparray """show grants for user1""" + log.info(result as String) + commonAuth result, "'user1'@'%'", "No", "", "internal: Select_priv" + assertEquals("internal.${dbName}.${tableName}: Select_priv[id1]" as String, result.ColPrivs[0] as String) + + result = showRoles.call("role_select") + log.info(result as String) + assertEquals(result.Users, "") + assertEquals(result.CatalogPrivs, "internal.*.*: Select_priv,Load_priv") + + //cleanup + sql "drop user if exists user1;" + sql "drop user if exists user2;" + sql "drop user if exists user3;" + + sql "drop role if exists role_select;" + sql "drop role if exists role_load;" + sql "drop row policy if exists test_row_policy_1 on ${dbName}.${tableName};" + sql "drop sql_block_rule if exists test_block_rule;" + sql "drop catalog if exists mysql;" + sql "drop workload group if exists wg1;" + sql "drop workload group if exists wg2;" + + sql "DROP DATABASE ${dbName} FORCE" + sql "DROP REPOSITORY `${repoName}`" +} \ No newline at end of file