Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[3.2] DeltaCatalog#createTable should respect write options (#3674) #3698

Merged
merged 1 commit into from
Sep 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -341,12 +341,24 @@ class DeltaCatalog extends DelegatingCatalogExtension
properties: util.Map[String, String]) : Table =
recordFrameProfile("DeltaCatalog", "createTable") {
if (DeltaSourceUtils.isDeltaDataSourceName(getProvider(properties))) {
// TODO: we should extract write options from table properties for all the cases. We
// can remove the UC check when we have confidence.
val respectOptions = isUnityCatalog || properties.containsKey("test.simulateUC")
val (props, writeOptions) = if (respectOptions) {
val (props, writeOptions) = getTablePropsAndWriteOptions(properties)
expandTableProps(props, writeOptions, spark.sessionState.conf)
props.remove("test.simulateUC")
(props, writeOptions)
} else {
(properties, Map.empty[String, String])
}

createDeltaTable(
ident,
schema,
partitions,
properties,
Map.empty,
props,
writeOptions,
sourceQuery = None,
TableCreationModes.Create
)
Expand Down Expand Up @@ -515,6 +527,44 @@ class DeltaCatalog extends DelegatingCatalogExtension
}
}

private def getTablePropsAndWriteOptions(properties: util.Map[String, String])
: (util.Map[String, String], Map[String, String]) = {
val props = new util.HashMap[String, String]()
// Options passed in through the SQL API will show up both with an "option." prefix and
// without in Spark 3.1, so we need to remove those from the properties
val optionsThroughProperties = properties.asScala.collect {
case (k, _) if k.startsWith(TableCatalog.OPTION_PREFIX) =>
k.stripPrefix(TableCatalog.OPTION_PREFIX)
}.toSet
val writeOptions = new util.HashMap[String, String]()
properties.asScala.foreach { case (k, v) =>
if (!k.startsWith(TableCatalog.OPTION_PREFIX) && !optionsThroughProperties.contains(k)) {
// Add to properties
props.put(k, v)
} else if (optionsThroughProperties.contains(k)) {
writeOptions.put(k, v)
}
}
(props, writeOptions.asScala.toMap)
}

private def expandTableProps(
props: util.Map[String, String],
options: Map[String, String],
conf: SQLConf): Unit = {
if (conf.getConf(DeltaSQLConf.DELTA_LEGACY_STORE_WRITER_OPTIONS_AS_PROPS)) {
// Legacy behavior
options.foreach { case (k, v) => props.put(k, v) }
} else {
options.foreach { case (k, v) =>
// Continue putting in Delta prefixed options to avoid breaking workloads
if (k.toLowerCase(Locale.ROOT).startsWith("delta.")) {
props.put(k, v)
}
}
}
}

/**
* A staged delta table, which creates a HiveMetaStore entry and appends data if this was a
* CTAS/RTAS command. We have a ugly way of using this API right now, but it's the best way to
Expand All @@ -536,35 +586,11 @@ class DeltaCatalog extends DelegatingCatalogExtension
override def commitStagedChanges(): Unit = recordFrameProfile(
"DeltaCatalog", "commitStagedChanges") {
val conf = spark.sessionState.conf
val props = new util.HashMap[String, String]()
// Options passed in through the SQL API will show up both with an "option." prefix and
// without in Spark 3.1, so we need to remove those from the properties
val optionsThroughProperties = properties.asScala.collect {
case (k, _) if k.startsWith("option.") => k.stripPrefix("option.")
}.toSet
val sqlWriteOptions = new util.HashMap[String, String]()
properties.asScala.foreach { case (k, v) =>
if (!k.startsWith("option.") && !optionsThroughProperties.contains(k)) {
// Do not add to properties
props.put(k, v)
} else if (optionsThroughProperties.contains(k)) {
sqlWriteOptions.put(k, v)
}
}
if (writeOptions.isEmpty && !sqlWriteOptions.isEmpty) {
writeOptions = sqlWriteOptions.asScala.toMap
}
if (conf.getConf(DeltaSQLConf.DELTA_LEGACY_STORE_WRITER_OPTIONS_AS_PROPS)) {
// Legacy behavior
writeOptions.foreach { case (k, v) => props.put(k, v) }
} else {
writeOptions.foreach { case (k, v) =>
// Continue putting in Delta prefixed options to avoid breaking workloads
if (k.toLowerCase(Locale.ROOT).startsWith("delta.")) {
props.put(k, v)
}
}
val (props, sqlWriteOptions) = getTablePropsAndWriteOptions(properties)
if (writeOptions.isEmpty && sqlWriteOptions.nonEmpty) {
writeOptions = sqlWriteOptions
}
expandTableProps(props, writeOptions, conf)
createDeltaTable(
ident,
schema,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ case class DeltaTableV2(
// as Unity Catalog may add more table storage properties on the fly. We should respect it
// and merge the table storage properties and Delta options.
val dataSourceOptions = if (catalogTable.isDefined) {
// To be safe, here we only extra file system options from table storage properties and
// To be safe, here we only extract file system options from table storage properties and
// the original `options` has higher priority than the table storage properties.
val fileSystemOptions = catalogTable.get.storage.properties.filter { case (k, _) =>
DeltaTableUtils.validDeltaTableHadoopPrefixes.exists(k.startsWith)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,12 @@ case class CreateDeltaTableCommand(
}

val tableLocation = getDeltaTablePath(tableWithLocation)
val deltaLog = DeltaLog.forTable(sparkSession, tableLocation)
// To be safe, here we only extract file system options from table storage properties, to create
// the DeltaLog.
val fileSystemOptions = table.storage.properties.filter { case (k, _) =>
DeltaTableUtils.validDeltaTableHadoopPrefixes.exists(k.startsWith)
}
val deltaLog = DeltaLog.forTable(sparkSession, tableLocation, fileSystemOptions)

recordDeltaOperation(deltaLog, "delta.ddl.createTable") {
handleCommit(sparkSession, deltaLog, tableWithLocation)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import org.apache.spark.sql.delta.schema.InvariantViolationException
import org.apache.spark.sql.delta.sources.DeltaSQLConf
import org.apache.spark.sql.delta.test.DeltaSQLCommandTest
import org.apache.spark.sql.delta.test.DeltaSQLTestUtils
import org.apache.hadoop.fs.Path
import org.apache.hadoop.fs.{Path, UnsupportedFileSystemException}

import org.apache.spark.SparkEnv
import org.apache.spark.sql.{AnalysisException, DataFrame, QueryTest, Row}
Expand Down Expand Up @@ -84,6 +84,26 @@ class DeltaDDLSuite extends DeltaDDLTestBase with SharedSparkSession
assert(spark.table("t").collect().isEmpty)
}
}

test("CREATE TABLE with OPTIONS") {
withTempPath { path =>
spark.range(10).write.format("delta").save(path.getCanonicalPath)
withTable("t") {
def createTableWithOptions(simulateUC: Boolean): Unit = {
sql(
s"""
|CREATE TABLE t USING delta LOCATION 'fake://${path.getCanonicalPath}'
|${if (simulateUC) "TBLPROPERTIES (test.simulateUC=true)" else ""}
|OPTIONS (
| fs.fake.impl='${classOf[FakeFileSystem].getName}',
| fs.fake.impl.disable.cache=true)
|""".stripMargin)
}
intercept[UnsupportedFileSystemException](createTableWithOptions(false))
createTableWithOptions(true)
}
}
}
}


Expand Down
Loading