Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[fix][dingo-executor] Solving the problem of unique index partitioning: always keeping the partition consistent with the primary table #1293

Merged
merged 1 commit into from
Nov 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1295,6 +1295,7 @@ private static IndexDefinition getIndexDefinition(
//TableDefinition indexTableDefinition = tableDefinition.copyWithName(dingoSqlKeyConstraint.getUniqueName());
indexTableDefinition.setColumns(indexColumnDefinitions);
indexTableDefinition.setProperties(properties);
indexTableDefinition.setPartDefinition(null);

validatePartitionBy(
indexTableDefinition.getKeyColumns().stream().map(ColumnDefinition::getName).collect(Collectors.toList()),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,14 @@

public class DingoSqlKeyConstraint extends SqlKeyConstraint {
private static final AtomicInteger ixNu = new AtomicInteger(1);

@Getter
private String uniqueName;

@Getter
@Setter
private boolean usePrimary;

public DingoSqlKeyConstraint(SqlParserPos pos, @Nullable SqlIdentifier name, SqlNodeList columnList) {
super(pos, name, columnList);
if (name != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,9 +86,7 @@ public static void delVerSchemaDiff() {
}

public static void watchDdlKey() {
//String resourceKey = String.format("tenantId:{%d}", TenantConstant.TENANT_ID);
WatchService watchService = new WatchService(Configuration.coordinators());
//LockService lockService = new LockService(resourceKey, Configuration.coordinators(), 45000);
Kv kv = Kv.builder().kv(KeyValue.builder()
.key(DdlUtil.ADDING_DDL_JOB_CONCURRENT_KEY.getBytes()).build()).build();
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
import io.dingodb.sdk.service.entity.meta.TableDefinitionWithId;
import io.dingodb.store.proxy.mapper.Mapper;
import io.dingodb.store.proxy.mapper.MapperImpl;
import io.dingodb.tso.TsoService;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
Expand Down Expand Up @@ -210,7 +211,7 @@ public String handleJobDone(DdlJob job) {

public Pair<Long, String> runDdlJob(DdlContext dc, DdlJob job) {
if (job.getRealStartTs() == 0) {
job.setRealStartTs(System.currentTimeMillis());
job.setRealStartTs(TsoService.getDefault().tso());
}
if (job.isFinished()) {
LogUtils.debug(log, "[ddl] finish DDL job, job:{}", job);
Expand Down Expand Up @@ -692,8 +693,8 @@ public Pair<Long, String> onDropColumn(DdlContext dc, DdlJob job) {
tableWithId.getTableDefinition()
.getColumns()
.removeIf(columnDefinition -> columnDefinition.getName().equalsIgnoreCase(columnName));
tableWithId.getTableDefinition().setName(DdlUtil.ddlTmpTableName);
String originTableName = tableWithId.getTableDefinition().getName();
tableWithId.getTableDefinition().setName(DdlUtil.ddlTmpTableName);
MetaService.root().createReplicaTable(job.getSchemaId(), tableWithId, originTableName);
IndexUtil.pickBackFillType(job);
job.setSchemaState(SchemaState.SCHEMA_WRITE_ONLY);
Expand Down Expand Up @@ -815,8 +816,8 @@ public Pair<Long, String> onAddColumn(DdlContext dc, DdlJob job) {
definitionWithId.getTableDefinition()
.getColumns().add(MapperImpl.MAPPER.columnTo(columnDefinition));
}
definitionWithId.getTableDefinition().setName("replicaTable");
String originTableName = definitionWithId.getTableDefinition().getName();
definitionWithId.getTableDefinition().setName("replicaTable");
MetaService.root().createReplicaTable(job.getSchemaId(), definitionWithId, originTableName);
job.setSchemaState(SchemaState.SCHEMA_DELETE_ONLY);
return updateSchemaVersion(dc, job);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,8 @@ public static String addHistoryDDLJob2Table(Session session, DdlJob job, boolean
String sql = "insert into mysql.dingo_ddl_history(job_id, job_meta, schema_name, table_name, schema_ids, "
+ "table_ids, create_time) values (%d, %s, %s, %s, %s, %s, %s)";
try {
byte[] meta = job.encode(updateRawArgs);
job.setRawArgs(null);
byte[] meta = job.encode(false);
String jobMeta = new String(meta);
sql = String.format(sql, job.getId(), Utils.quoteForSql(jobMeta), Utils.quoteForSql(job.getSchemaName()),
Utils.quoteForSql(job.getTableName()), Utils.quoteForSql(job.getSchemaId()),
Expand Down
Loading