Skip to content

Commit

Permalink
refactor: 优化对sqlserver的支持 (#63)
Browse files Browse the repository at this point in the history
1. 优化分页支持
2. 优化表结构解析
3. 增加批量upsert支持
4. 增加索引支持
  • Loading branch information
zhou-hao authored Dec 20, 2023
1 parent 0a2c7e6 commit e2805d4
Show file tree
Hide file tree
Showing 8 changed files with 467 additions and 17 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,7 @@ protected void applyColumnInfo(RDBColumnMetadata column, Record record) {
record.getInteger("data_length").ifPresent(column::setLength);
record.getInteger("data_precision").ifPresent(column::setPrecision);
record.getInteger("data_scale").ifPresent(column::setScale);
record.getBoolean("primary_key").ifPresent(column::setPrimaryKey);

record.getString("data_type")
.map(String::toLowerCase)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ public SqlFragments doPaging(SqlFragments fragments, int pageIndex, int pageSize
BlockSqlFragments block = ((BlockSqlFragments) fragments);
LinkedList<SqlFragments> orderBy = block.getBlock(FragmentBlock.orderBy);
if (orderBy.isEmpty()) {
orderBy.add(SqlFragments.single("order by 1"));
orderBy.add(SqlFragments.single("order by (select null)"));
}
block.addBlock(FragmentBlock.after, of("offset ? rows fetch next ? rows only", pageIndex * pageSize, pageSize));

Expand All @@ -27,7 +27,7 @@ public SqlFragments doPaging(SqlFragments fragments, int pageIndex, int pageSize
PrepareSqlFragments sqlFragments = ((PrepareSqlFragments) fragments);
if (!sqlFragments.getSql().contains("order by")
&& !sqlFragments.getSql().contains("ORDER BY")) {
sqlFragments.addSql("order", "by", "1");
sqlFragments.addSql("order", "by", "(select null)");
}
sqlFragments.addSql("offset ? rows fetch next ? rows only")
.addParameter(pageIndex * pageSize, pageSize);
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
package org.hswebframework.ezorm.rdb.supports.mssql;

import org.hswebframework.ezorm.rdb.metadata.RDBSchemaMetadata;
import org.hswebframework.ezorm.rdb.metadata.RDBTableMetadata;
import org.hswebframework.ezorm.rdb.supports.commons.RDBTableMetadataParser;
import reactor.core.publisher.Flux;

import java.util.List;

/**
* @author zhouhao
Expand All @@ -12,19 +16,35 @@ public SqlServer2012TableMetadataParser(RDBSchemaMetadata schema) {
super(schema);
}

private static final String TABLE_META_SQL = String.join(" ",
"SELECT ",
"c.name as name,",
"t.name as data_type,",
"c.length as data_length,",
"c.xscale as data_scale,",
"c.xprec as data_precision,",
"case when c.isnullable=1 then 0 else 1 end as [not_null],",
"cast(p.value as varchar(500)) as comment ",
"FROM syscolumns c ",
"inner join systypes t on c.xusertype = t.xusertype ",
"left join sys.extended_properties p on c.id=p.major_id and c.colid=p.minor_id ",
"WHERE c.id = object_id(#{table})");
private static final String TABLE_META_SQL = String.join(
" ",
"SELECT",
" cols.TABLE_NAME as [table_name],",
" cols.COLUMN_NAME as [name],",
" cols.DATA_TYPE as [data_type],",
" cols.CHARACTER_MAXIMUM_LENGTH as [data_length],",
" cols.NUMERIC_PRECISION as [data_precision],",
" cols.NUMERIC_SCALE as [data_scale],",
" IIF(cols.IS_NULLABLE = 'NO', 1, 0) as [not_null],",
" IIF(cols.COLUMN_NAME IN (",
" SELECT COLUMN_NAME",
" FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE",
" WHERE TABLE_SCHEMA = cols.TABLE_SCHEMA",
" AND CONSTRAINT_NAME LIKE 'PK%'), 1, 0) as [primary_key],",
" cm.comment as [comment]",
"FROM INFORMATION_SCHEMA.COLUMNS cols",
" LEFT JOIN (",
" SELECT OBJECT_NAME(ep.major_id) as [table_name],",
" col.name as [column_name],",
" cast(ep.value as nvarchar(500)) as [comment]",
" FROM sys.extended_properties ep",
" JOIN sys.columns col ON col.object_id = ep.major_id",
" AND col.column_id = ep.minor_id",
" WHERE ep.class = 1",
" ) cm ON cols.TABLE_NAME = cm.table_name",
" AND cols.COLUMN_NAME = cm.column_name",
"WHERE cols.TABLE_SCHEMA = #{schema}",
" AND cols.TABLE_NAME LIKE #{table}");


@Override
Expand All @@ -48,4 +68,14 @@ protected String getAllTableSql() {
protected String getTableExistsSql() {
return "select count(1) as total from sysobjects where xtype='U' and name = #{table}";
}

@Override
public List<RDBTableMetadata> parseAll() {
return super.fastParseAll();
}

@Override
public Flux<RDBTableMetadata> parseAllReactive() {
return super.fastParseAllReactive();
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,279 @@
package org.hswebframework.ezorm.rdb.supports.mssql;

import lombok.AllArgsConstructor;
import org.hswebframework.ezorm.core.RuntimeDefaultValue;
import org.hswebframework.ezorm.core.param.Term;
import org.hswebframework.ezorm.rdb.executor.NullValue;
import org.hswebframework.ezorm.rdb.executor.SqlRequest;
import org.hswebframework.ezorm.rdb.executor.SyncSqlExecutor;
import org.hswebframework.ezorm.rdb.executor.reactive.ReactiveSqlExecutor;
import org.hswebframework.ezorm.rdb.mapping.defaults.SaveResult;
import org.hswebframework.ezorm.rdb.metadata.RDBColumnMetadata;
import org.hswebframework.ezorm.rdb.metadata.RDBTableMetadata;
import org.hswebframework.ezorm.rdb.operator.builder.fragments.NativeSql;
import org.hswebframework.ezorm.rdb.operator.builder.fragments.PrepareSqlFragments;
import org.hswebframework.ezorm.rdb.operator.builder.fragments.insert.InsertSqlBuilder;
import org.hswebframework.ezorm.rdb.operator.dml.insert.InsertColumn;
import org.hswebframework.ezorm.rdb.operator.dml.insert.InsertOperatorParameter;
import org.hswebframework.ezorm.rdb.operator.dml.upsert.*;
import org.hswebframework.ezorm.rdb.utils.ExceptionUtils;
import reactor.core.publisher.Mono;
import reactor.util.function.Tuple2;
import reactor.util.function.Tuples;

import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Supplier;
import java.util.stream.Collectors;

@SuppressWarnings("all")
public class SqlServerBatchUpsertOperator implements SaveOrUpdateOperator {

private final RDBTableMetadata table;

private final UpsertBatchInsertSqlBuilder builder;

private RDBColumnMetadata idColumn;

private final SaveOrUpdateOperator fallback;

public SqlServerBatchUpsertOperator(RDBTableMetadata table) {
this.table = table;
this.builder = new UpsertBatchInsertSqlBuilder(table);
this.idColumn = table.getColumns()
.stream().filter(RDBColumnMetadata::isPrimaryKey)
.findFirst().orElse(null);
this.fallback = new DefaultSaveOrUpdateOperator(table);
}

@Override
public SaveResultOperator execute(org.hswebframework.ezorm.rdb.operator.dml.upsert.UpsertOperatorParameter parameter) {
if (idColumn == null) {
this.idColumn = table
.getColumns()
.stream()
.filter(RDBColumnMetadata::isPrimaryKey)
.findFirst()
.orElse(null);

if (this.idColumn == null) {
return fallback.execute(parameter);
}
}

return new SaveResultOperatorImpl(() -> builder.build(new UpsertOperatorParameter(parameter)));
}

class UpsertOperatorParameter extends InsertOperatorParameter {

private boolean doNoThingOnConflict;

private List<Term> where;

public UpsertOperatorParameter(org.hswebframework.ezorm.rdb.operator.dml.upsert.UpsertOperatorParameter parameter) {
doNoThingOnConflict = parameter.isDoNothingOnConflict();
setColumns(parameter.toInsertColumns());
setValues(parameter.getValues());
where = parameter.getWhere();
}

}

@AllArgsConstructor
private class SaveResultOperatorImpl implements SaveResultOperator {

Supplier<SqlRequest> sqlRequest;

@Override
public SaveResult sync() {
return ExceptionUtils.translation(() -> {
SyncSqlExecutor sqlExecutor = table.findFeatureNow(SyncSqlExecutor.ID);
int updated = sqlExecutor.update(sqlRequest.get());
return SaveResult.of(0, updated);
}, table);
}

@Override
public Mono<SaveResult> reactive() {
return Mono
.fromSupplier(sqlRequest)
.as(table.findFeatureNow(ReactiveSqlExecutor.ID)::update)
.map(i -> SaveResult.of(0, i))
.as(ExceptionUtils.translation(table));
}
}

private class UpsertBatchInsertSqlBuilder implements InsertSqlBuilder {

private final RDBTableMetadata table;

public UpsertBatchInsertSqlBuilder(RDBTableMetadata table) {
this.table = table;
}

private Map<Integer, Tuple2<RDBColumnMetadata, UpsertColumn>> createColumnIndex(Set<InsertColumn> columns) {
Map<Integer, Tuple2<RDBColumnMetadata, UpsertColumn>> columnMapping = new LinkedHashMap<>(columns.size());
int index = 0;
for (InsertColumn column : columns) {

RDBColumnMetadata metadata = table.getColumn(column.getColumn()).orElse(null);
if (metadata == null) {
index++;
continue;
}
columnMapping.put(index++, Tuples.of(metadata, ((UpsertColumn) column)));
}
return columnMapping;
}


@Override
public SqlRequest build(InsertOperatorParameter parameter) {
UpsertOperatorParameter upsertParameter = (UpsertOperatorParameter) parameter;
PrepareSqlFragments fragments = PrepareSqlFragments.of();
fragments.addSql("merge into", table.getFullName(), "with(rowlock) as t using ( values");

Map<Integer, Tuple2<RDBColumnMetadata, UpsertColumn>> columnMapping = createColumnIndex(parameter.getColumns());
boolean notContainsId = true;
int rowIndex = 0;
for (List<Object> values : parameter.getValues()) {
int valueIndex = 0;
if (rowIndex > 0) {
fragments.addSql(",");
}
fragments.addSql("(");

for (Map.Entry<Integer, Tuple2<RDBColumnMetadata, UpsertColumn>> entry : columnMapping.entrySet()) {
int index = entry.getKey();
RDBColumnMetadata column = entry.getValue().getT1();
Object value = values.size() > index ? values.get(index) : null;
if (column.isPrimaryKey()) {
notContainsId = false;
}
if (valueIndex > 0) {
fragments.addSql(",");
}

if ((value == null || value instanceof NullValue)
&& column.getDefaultValue() instanceof RuntimeDefaultValue) {
value = column.getDefaultValue().get();
}

if (value instanceof NativeSql) {
throw new UnsupportedOperationException("upsert unsupported NativeSql");
} else {
if (value == null) {
value = NullValue.of(column.getType());
}
}

fragments.addSql("?").addParameter(column.encode(value));
valueIndex++;
}

if (notContainsId) {
if (idColumn.getDefaultValue() == null) {
throw new UnsupportedOperationException("column " + idColumn.getFullName() + " unsupported default value");
}
Object value = idColumn.getDefaultValue().get();
fragments.addSql(",");

if (value instanceof NativeSql) {
fragments
.addSql(((NativeSql) value).getSql())
.addParameter(((NativeSql) value).getParameters());
} else {
fragments.addSql("?").addParameter(value);
}
}
fragments.addSql(")");
rowIndex++;
}

String columnStr = columnMapping.values()
.stream()
.map(tp2 -> tp2.getT1().getQuoteName())
.collect(Collectors.joining(","));

fragments.addSql(") as t2 (", columnStr, ") on (", idColumn.getFullName("t"), "=", idColumn.getFullName("t2"), ")");

PrepareSqlFragments insertColumns = PrepareSqlFragments.of();
PrepareSqlFragments insertValues = PrepareSqlFragments.of();
PrepareSqlFragments update = PrepareSqlFragments.of();


boolean ignoreNullColumn = parameter.getValues().size() == 1;
List<Object> firstValues = parameter.getValues().get(0);
int insertIndex = 0, insertValueIndex = 0, updateIndex = 0;

if (notContainsId) {
insertIndex = 1;
insertColumns.addSql(idColumn.getQuoteName());
insertValues.addSql(idColumn.getFullName("t2"));
}

for (Tuple2<RDBColumnMetadata, UpsertColumn> columnBind : columnMapping.values()) {
RDBColumnMetadata column = columnBind.getT1();

String t2Column = column.getFullName("t2");
String tColumn = column.getFullName("t");

//insert
{
boolean canInsert = column.isInsertable();
if (canInsert && ignoreNullColumn) {
Object value = firstValues.size() > insertValueIndex ? firstValues.get(insertValueIndex) : null;
if (value == null || value instanceof NullValue) {
canInsert = false;
}
}
insertValueIndex++;

if (canInsert) {
if (insertIndex > 0) {
insertColumns.addSql(",");
insertValues.addSql(",");
}
insertColumns.addSql(column.getQuoteName());
insertValues.addSql(t2Column);
insertIndex++;
}
}

//update
{
if (column.isPrimaryKey()
|| !column.isUpdatable()
|| !column.isSaveable()
|| columnBind.getT2().isUpdateIgnore()) {

continue;
}
if (updateIndex > 0) {
update.addSql(",");
}
update.addSql(tColumn,
"=", "coalesce(" + t2Column + "," + tColumn + ")");

updateIndex++;
}
}

if (update.isNotEmpty() || upsertParameter.doNoThingOnConflict) {
fragments.addSql("when matched then update set");
fragments.addFragments(update);
}


fragments.addSql("when not matched then insert (");
fragments.addFragments(insertColumns);
fragments.addSql(") values (");
fragments.addFragments(insertValues);
fragments.addSql(");");

return fragments.toRequest();
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ public SqlServerDialect() {
super();
addDataTypeBuilder(JDBCType.CHAR, (meta) -> StringUtils.concat("char(", meta.getLength(), ")"));
addDataTypeBuilder(JDBCType.NCHAR, (meta) -> StringUtils.concat("nchar(", meta.getLength(), ")"));
addDataTypeBuilder(JDBCType.VARCHAR, (meta) -> StringUtils.concat("varchar(", meta.getLength(), ")"));
addDataTypeBuilder(JDBCType.VARCHAR, (meta) -> StringUtils.concat("nvarchar(", meta.getLength(), ")"));
addDataTypeBuilder(JDBCType.NVARCHAR, (meta) -> StringUtils.concat("nvarchar(", meta.getLength(), ")"));
addDataTypeBuilder(JDBCType.TIMESTAMP, (meta) -> "datetime2");
addDataTypeBuilder(JDBCType.TIME, (meta) -> "time");
Expand Down
Loading

0 comments on commit e2805d4

Please sign in to comment.