Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Optimize loader options config of YAML, change maxAliasesForCollections from by default 50 to 1000 #33993

Closed
wants to merge 13 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion RELEASE-NOTES.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
1. Proxy Native: Change the Base Docker Image of ShardingSphere Proxy Native - [#33263](https://github.com/apache/shardingsphere/issues/33263)
1. Proxy Native: Support connecting to HiveServer2 with ZooKeeper Service Discovery enabled in GraalVM Native Image - [#33768](https://github.com/apache/shardingsphere/pull/33768)
1. Proxy Native: Support local transactions of ClickHouse under GraalVM Native Image - [#33801](https://github.com/apache/shardingsphere/pull/33801)
1. Sharding: Support MYSQL GroupConcat function for aggregating multiple shards - [#33808](https://github.com/apache/shardingsphere/pull/33808)
1. Sharding: Support GroupConcat function for aggregating multiple shards in MySQL, OpenGauss, Doris - [#33808](https://github.com/apache/shardingsphere/pull/33808)
1. Proxy Native: Support Seata AT integration under Proxy Native in GraalVM Native Image - [#33889](https://github.com/apache/shardingsphere/pull/33889)
1. Agent: Simplify the use of Agent's Docker Image - [#33356](https://github.com/apache/shardingsphere/pull/33356)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,9 @@ public YamlReadwriteSplittingRuleConfiguration swapToYamlConfiguration(final Rea
private YamlReadwriteSplittingDataSourceGroupRuleConfiguration swapToYamlConfiguration(final ReadwriteSplittingDataSourceGroupRuleConfiguration dataSourceGroupRuleConfig) {
YamlReadwriteSplittingDataSourceGroupRuleConfiguration result = new YamlReadwriteSplittingDataSourceGroupRuleConfiguration();
result.setWriteDataSourceName(dataSourceGroupRuleConfig.getWriteDataSourceName());
result.setReadDataSourceNames(dataSourceGroupRuleConfig.getReadDataSourceNames());
if (null != dataSourceGroupRuleConfig.getReadDataSourceNames() && !dataSourceGroupRuleConfig.getReadDataSourceNames().isEmpty()) {
result.setReadDataSourceNames(dataSourceGroupRuleConfig.getReadDataSourceNames());
}
result.setTransactionalReadQueryStrategy(dataSourceGroupRuleConfig.getTransactionalReadQueryStrategy().name());
result.setLoadBalancerName(dataSourceGroupRuleConfig.getLoadBalancerName());
return result;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,8 +102,8 @@ public static ShardingSphereDatabase create(final String name, final DatabaseTyp
final ConfigurationProperties props, final ComputeNodeInstanceContext computeNodeInstanceContext) throws SQLException {
ResourceMetaData resourceMetaData = new ResourceMetaData(databaseConfig.getDataSources(), databaseConfig.getStorageUnits());
Collection<ShardingSphereRule> databaseRules = DatabaseRulesBuilder.build(name, protocolType, databaseConfig, computeNodeInstanceContext, resourceMetaData);
Map<String, ShardingSphereSchema> schemas = new ConcurrentHashMap<>(GenericSchemaBuilder.build(new GenericSchemaBuilderMaterial(
protocolType, resourceMetaData.getStorageUnits(), databaseRules, props, new DatabaseTypeRegistry(protocolType).getDefaultSchemaName(name))));
Map<String, ShardingSphereSchema> schemas = new ConcurrentHashMap<>(GenericSchemaBuilder.build(protocolType,
new GenericSchemaBuilderMaterial(resourceMetaData.getStorageUnits(), databaseRules, props, new DatabaseTypeRegistry(protocolType).getDefaultSchemaName(name))));
SystemSchemaBuilder.build(name, protocolType, props).forEach(schemas::putIfAbsent);
return new ShardingSphereDatabase(name, protocolType, resourceMetaData, new RuleMetaData(databaseRules), schemas.values());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import org.apache.shardingsphere.infra.database.core.metadata.data.model.IndexMetaData;
import org.apache.shardingsphere.infra.database.core.metadata.data.model.SchemaMetaData;
import org.apache.shardingsphere.infra.database.core.metadata.data.model.TableMetaData;
import org.apache.shardingsphere.infra.database.core.type.DatabaseType;
import org.apache.shardingsphere.infra.database.core.type.DatabaseTypeRegistry;
import org.apache.shardingsphere.infra.metadata.database.resource.unit.StorageUnit;
import org.apache.shardingsphere.infra.metadata.database.schema.model.ShardingSphereColumn;
Expand Down Expand Up @@ -60,26 +61,28 @@ public final class GenericSchemaBuilder {
/**
* Build generic schema.
*
* @param protocolType database type
* @param material generic schema builder material
* @return generic schema map
* @throws SQLException SQL exception
*/
public static Map<String, ShardingSphereSchema> build(final GenericSchemaBuilderMaterial material) throws SQLException {
return build(getAllTableNames(material.getRules()), material);
public static Map<String, ShardingSphereSchema> build(final DatabaseType protocolType, final GenericSchemaBuilderMaterial material) throws SQLException {
return build(getAllTableNames(material.getRules()), protocolType, material);
}

/**
* Build generic schema.
*
* @param tableNames table names
* @param protocolType database type
* @param material generic schema builder material
* @return generic schema map
* @throws SQLException SQL exception
*/
public static Map<String, ShardingSphereSchema> build(final Collection<String> tableNames, final GenericSchemaBuilderMaterial material) throws SQLException {
public static Map<String, ShardingSphereSchema> build(final Collection<String> tableNames, final DatabaseType protocolType, final GenericSchemaBuilderMaterial material) throws SQLException {
Map<String, SchemaMetaData> result = loadSchemas(tableNames, material);
if (!material.isSameProtocolAndStorageTypes()) {
result = translate(result, material);
if (!isSameProtocolAndStorageTypes(protocolType, material.getStorageUnits())) {
result = translate(result, protocolType, material);
}
return revise(result, material);
}
Expand All @@ -98,13 +101,17 @@ private static Map<String, SchemaMetaData> loadSchemas(final Collection<String>
return materials.isEmpty() ? Collections.emptyMap() : MetaDataLoader.load(materials);
}

private static Map<String, SchemaMetaData> translate(final Map<String, SchemaMetaData> schemaMetaDataMap, final GenericSchemaBuilderMaterial material) {
private static boolean isSameProtocolAndStorageTypes(final DatabaseType protocolType, final Map<String, StorageUnit> storageUnits) {
return storageUnits.values().stream().map(StorageUnit::getStorageType).allMatch(protocolType::equals);
}

private static Map<String, SchemaMetaData> translate(final Map<String, SchemaMetaData> schemaMetaDataMap, final DatabaseType protocolType, final GenericSchemaBuilderMaterial material) {
Collection<TableMetaData> tableMetaDataList = new LinkedList<>();
for (StorageUnit each : material.getStorageUnits().values()) {
String defaultSchemaName = new DatabaseTypeRegistry(each.getStorageType()).getDefaultSchemaName(material.getDefaultSchemaName());
tableMetaDataList.addAll(Optional.ofNullable(schemaMetaDataMap.get(defaultSchemaName)).map(SchemaMetaData::getTables).orElseGet(Collections::emptyList));
}
String frontendSchemaName = new DatabaseTypeRegistry(material.getProtocolType()).getDefaultSchemaName(material.getDefaultSchemaName());
String frontendSchemaName = new DatabaseTypeRegistry(protocolType).getDefaultSchemaName(material.getDefaultSchemaName());
Map<String, SchemaMetaData> result = new LinkedHashMap<>();
result.put(frontendSchemaName, new SchemaMetaData(frontendSchemaName, tableMetaDataList));
return result;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import lombok.Getter;
import lombok.RequiredArgsConstructor;
import org.apache.shardingsphere.infra.config.props.ConfigurationProperties;
import org.apache.shardingsphere.infra.database.core.type.DatabaseType;
import org.apache.shardingsphere.infra.metadata.database.resource.unit.StorageUnit;
import org.apache.shardingsphere.infra.rule.ShardingSphereRule;

Expand All @@ -34,22 +33,11 @@
@Getter
public final class GenericSchemaBuilderMaterial {

private final DatabaseType protocolType;

private final Map<String, StorageUnit> storageUnits;

private final Collection<ShardingSphereRule> rules;

private final ConfigurationProperties props;

private final String defaultSchemaName;

/**
* Judge whether same protocol and storage database types.
*
* @return is same or not
*/
public boolean isSameProtocolAndStorageTypes() {
return storageUnits.values().stream().map(StorageUnit::getStorageType).allMatch(protocolType::equals);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,11 @@
package org.apache.shardingsphere.infra.metadata.database.schema.reviser;

import lombok.RequiredArgsConstructor;
import org.apache.shardingsphere.infra.database.core.type.DatabaseType;
import org.apache.shardingsphere.infra.metadata.database.resource.unit.StorageUnit;
import org.apache.shardingsphere.infra.metadata.database.schema.builder.GenericSchemaBuilderMaterial;
import org.apache.shardingsphere.infra.database.core.metadata.data.model.SchemaMetaData;
import org.apache.shardingsphere.infra.metadata.database.schema.builder.GenericSchemaBuilderMaterial;
import org.apache.shardingsphere.infra.metadata.database.schema.reviser.schema.SchemaMetaDataReviseEngine;
import org.apache.shardingsphere.infra.rule.ShardingSphereRule;

import javax.sql.DataSource;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
Expand All @@ -49,10 +46,7 @@ public final class MetaDataReviseEngine {
public Map<String, SchemaMetaData> revise(final Map<String, SchemaMetaData> schemaMetaDataMap, final GenericSchemaBuilderMaterial material) {
Map<String, SchemaMetaData> result = new LinkedHashMap<>(schemaMetaDataMap.size(), 1F);
for (Entry<String, SchemaMetaData> entry : schemaMetaDataMap.entrySet()) {
// TODO establish a corresponding relationship between tables and data sources
DatabaseType databaseType = material.getStorageUnits().values().stream().map(StorageUnit::getStorageType).findFirst().orElse(null);
DataSource dataSource = material.getStorageUnits().values().stream().map(StorageUnit::getDataSource).findFirst().orElse(null);
result.put(entry.getKey(), new SchemaMetaDataReviseEngine(rules, material.getProps(), databaseType, dataSource).revise(entry.getValue()));
result.put(entry.getKey(), new SchemaMetaDataReviseEngine(rules, material.getProps()).revise(entry.getValue()));
}
return result;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,9 @@

import lombok.RequiredArgsConstructor;
import org.apache.shardingsphere.infra.database.core.metadata.data.model.ColumnMetaData;
import org.apache.shardingsphere.infra.database.core.type.DatabaseType;
import org.apache.shardingsphere.infra.metadata.database.schema.reviser.MetaDataReviseEntry;
import org.apache.shardingsphere.infra.rule.ShardingSphereRule;

import javax.sql.DataSource;
import java.util.Collection;
import java.util.LinkedHashSet;
import java.util.Optional;
Expand All @@ -38,10 +36,6 @@ public final class ColumnReviseEngine<T extends ShardingSphereRule> {

private final T rule;

private final DatabaseType databaseType;

private final DataSource dataSource;

private final MetaDataReviseEntry<T> reviseEntry;

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,13 @@

import lombok.RequiredArgsConstructor;
import org.apache.shardingsphere.infra.config.props.ConfigurationProperties;
import org.apache.shardingsphere.infra.database.core.type.DatabaseType;
import org.apache.shardingsphere.infra.metadata.database.schema.reviser.table.TableMetaDataReviseEngine;
import org.apache.shardingsphere.infra.metadata.database.schema.reviser.MetaDataReviseEntry;
import org.apache.shardingsphere.infra.database.core.metadata.data.model.SchemaMetaData;
import org.apache.shardingsphere.infra.database.core.metadata.data.model.TableMetaData;
import org.apache.shardingsphere.infra.metadata.database.schema.reviser.MetaDataReviseEntry;
import org.apache.shardingsphere.infra.metadata.database.schema.reviser.table.TableMetaDataReviseEngine;
import org.apache.shardingsphere.infra.rule.ShardingSphereRule;
import org.apache.shardingsphere.infra.spi.type.ordered.OrderedSPILoader;

import javax.sql.DataSource;
import java.util.Collection;
import java.util.Map.Entry;
import java.util.Optional;
Expand All @@ -43,10 +41,6 @@ public final class SchemaMetaDataReviseEngine {

private final ConfigurationProperties props;

private final DatabaseType databaseType;

private final DataSource dataSource;

/**
* Revise schema meta data.
*
Expand All @@ -63,7 +57,7 @@ public SchemaMetaData revise(final SchemaMetaData originalMetaData) {
}

private <T extends ShardingSphereRule> SchemaMetaData revise(final SchemaMetaData originalMetaData, final T rule, final MetaDataReviseEntry<T> reviseEntry) {
TableMetaDataReviseEngine<T> tableMetaDataReviseEngine = new TableMetaDataReviseEngine<>(rule, databaseType, dataSource, reviseEntry);
TableMetaDataReviseEngine<T> tableMetaDataReviseEngine = new TableMetaDataReviseEngine<>(rule, reviseEntry);
Optional<? extends SchemaTableAggregationReviser<T>> aggregationReviser = reviseEntry.getSchemaTableAggregationReviser(props);
if (!aggregationReviser.isPresent()) {
return new SchemaMetaData(originalMetaData.getName(), originalMetaData.getTables().stream().map(tableMetaDataReviseEngine::revise).collect(Collectors.toList()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,13 @@
package org.apache.shardingsphere.infra.metadata.database.schema.reviser.table;

import lombok.RequiredArgsConstructor;
import org.apache.shardingsphere.infra.database.core.type.DatabaseType;
import org.apache.shardingsphere.infra.database.core.metadata.data.model.TableMetaData;
import org.apache.shardingsphere.infra.metadata.database.schema.reviser.MetaDataReviseEntry;
import org.apache.shardingsphere.infra.metadata.database.schema.reviser.column.ColumnReviseEngine;
import org.apache.shardingsphere.infra.metadata.database.schema.reviser.constraint.ConstraintReviseEngine;
import org.apache.shardingsphere.infra.metadata.database.schema.reviser.index.IndexReviseEngine;
import org.apache.shardingsphere.infra.rule.ShardingSphereRule;

import javax.sql.DataSource;
import java.util.Optional;

/**
Expand All @@ -39,10 +37,6 @@ public final class TableMetaDataReviseEngine<T extends ShardingSphereRule> {

private final T rule;

private final DatabaseType databaseType;

private final DataSource dataSource;

private final MetaDataReviseEntry<T> reviseEntry;

/**
Expand All @@ -54,7 +48,7 @@ public final class TableMetaDataReviseEngine<T extends ShardingSphereRule> {
public TableMetaData revise(final TableMetaData originalMetaData) {
Optional<? extends TableNameReviser<T>> tableNameReviser = reviseEntry.getTableNameReviser();
String revisedTableName = tableNameReviser.map(optional -> optional.revise(originalMetaData.getName(), rule)).orElse(originalMetaData.getName());
return new TableMetaData(revisedTableName, new ColumnReviseEngine<>(rule, databaseType, dataSource, reviseEntry).revise(originalMetaData.getName(), originalMetaData.getColumns()),
return new TableMetaData(revisedTableName, new ColumnReviseEngine<>(rule, reviseEntry).revise(originalMetaData.getName(), originalMetaData.getColumns()),
new IndexReviseEngine<>(rule, reviseEntry).revise(originalMetaData.getName(), originalMetaData.getIndexes()),
new ConstraintReviseEngine<>(rule, reviseEntry).revise(originalMetaData.getName(), originalMetaData.getConstraints()), originalMetaData.getType());
}
Expand Down

This file was deleted.

Loading
Loading