diff --git a/Cargo.lock b/Cargo.lock index 4f9b96a51c63c..5ee420c698eed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2733,9 +2733,8 @@ dependencies = [ name = "databend-common-ast" version = "0.1.0" dependencies = [ + "codespan-reporting", "criterion", - "databend-common-exception", - "databend-common-io", "derive-visitor", "enum-as-inner 0.5.1", "ethnum", @@ -2753,6 +2752,7 @@ dependencies = [ "pretty", "pretty_assertions", "regex", + "serde", "serde_json", "strsim 0.10.0", "strum 0.24.1", @@ -2967,8 +2967,8 @@ dependencies = [ "arrow-schema", "backtrace 0.3.69 (git+https://github.com/rust-lang/backtrace-rs.git?rev=6145fe6bac65c38375f1216a565a6cc7deb89a2d)", "bincode 2.0.0-rc.3", - "codespan-reporting", "databend-common-arrow", + "databend-common-ast", "geos", "geozero", "http 1.1.0", @@ -4839,7 +4839,6 @@ dependencies = [ "clap", "databend-client", "databend-common-ast", - "databend-common-exception", "databend-common-expression", "databend-common-formats", "databend-common-functions", @@ -4916,6 +4915,7 @@ dependencies = [ "crc32fast", "criterion", "databend-common-arrow", + "databend-common-ast", "databend-common-exception", "databend-common-expression", "databend-common-functions", @@ -5147,8 +5147,7 @@ dependencies = [ [[package]] name = "derive-visitor" version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21045832f19977a1bec46f3d826609794661c0d23370f3ee35b8ef6537861cd6" +source = "git+https://github.com/andylokandy/derive-visitor.git?rev=c07c6b6#c07c6b6095f2f137223cbe4b25ff4c19ecde4234" dependencies = [ "derive-visitor-macros", ] @@ -5156,8 +5155,7 @@ dependencies = [ [[package]] name = "derive-visitor-macros" version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22c6a42ab7b480fb19a029c5e54cb8cdf912c7798cf0192441d2d92eb4af8012" +source = "git+https://github.com/andylokandy/derive-visitor.git?rev=c07c6b6#c07c6b6095f2f137223cbe4b25ff4c19ecde4234" dependencies = [ "convert_case 0.4.0", "itertools 0.10.5", diff --git a/Cargo.toml b/Cargo.toml index 494826b7e8445..d0fa3dfce6efa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -294,7 +294,6 @@ debug-assertions = true overflow-checks = true rpath = false -# If there are dependencies that need patching, they can be listed below. [patch.crates-io] arrow-format = { git = "https://github.com/Xuanwo/arrow-format", rev = "be633a0" } icelake = { git = "https://github.com/icelake-io/icelake", rev = "be8b2c2" } @@ -303,3 +302,4 @@ async-backtrace = { git = "https://github.com/zhang2014/async-backtrace.git", re z3 = { git = "https://github.com/prove-rs/z3.rs", rev = "247d308" } z3-sys = { git = "https://github.com/prove-rs/z3.rs", rev = "247d308" } # proj = { git = "https://github.com/ariesdevil/proj", rev = "51e1c60" } +derive-visitor = { git = 'https://github.com/andylokandy/derive-visitor.git', rev = "c07c6b6" } diff --git a/src/common/cloud_control/proto/task.proto b/src/common/cloud_control/proto/task.proto index b980acb7447ac..9e7b8fdb603a2 100644 --- a/src/common/cloud_control/proto/task.proto +++ b/src/common/cloud_control/proto/task.proto @@ -12,6 +12,7 @@ message ScheduleOptions { optional string cron = 2; // CRON = '0 2 * * *' means Every night at 2 AM. UTC time zone. optional string time_zone = 3; // "UTC..." ScheduleType schedule_type = 4; + optional uint64 milliseconds_interval = 5; // milliseconds level interval } message WarehouseOptions { diff --git a/src/common/cloud_control/src/task_utils.rs b/src/common/cloud_control/src/task_utils.rs index 8f837c2f2cc08..80c15f1fa90f2 100644 --- a/src/common/cloud_control/src/task_utils.rs +++ b/src/common/cloud_control/src/task_utils.rs @@ -93,10 +93,19 @@ pub fn format_schedule_options(s: &ScheduleOptions) -> Result { } }; return match schedule_type { - ScheduleType::IntervalType => Ok(format!( - "INTERVAL {} SECOND", - s.interval.unwrap_or_default(), - )), + ScheduleType::IntervalType => { + if s.milliseconds_interval.is_some() { + return Ok(format!( + "INTERVAL {} SECOND {} MILLISECOND", + s.interval.unwrap_or_default(), + s.milliseconds_interval.unwrap_or_default(), + )); + } + Ok(format!( + "INTERVAL {} SECOND", + s.interval.unwrap_or_default(), + )) + } ScheduleType::CronType => { if s.cron.is_none() { return Err(ErrorCode::IllegalCloudControlMessageFormat( diff --git a/src/common/exception/Cargo.toml b/src/common/exception/Cargo.toml index 8178f934f9fa6..26cd10c020a71 100644 --- a/src/common/exception/Cargo.toml +++ b/src/common/exception/Cargo.toml @@ -12,11 +12,7 @@ test = true [dependencies] # In alphabetical order databend-common-arrow = { path = "../arrow" } - -# GitHub dependencies -# TODO: Use the version from crates.io once -# https://github.com/brendanzab/codespan/pull/331 is released. -codespan-reporting = { git = "https://github.com/brendanzab/codespan", rev = "c84116f5" } +databend-common-ast = { path = "../../query/ast" } anyhow = { workspace = true } arrow-schema = { workspace = true } diff --git a/src/common/exception/src/exception.rs b/src/common/exception/src/exception.rs index 2ccb3be02c58f..7077035a3a3b6 100644 --- a/src/common/exception/src/exception.rs +++ b/src/common/exception/src/exception.rs @@ -20,11 +20,11 @@ use std::fmt::Formatter; use std::sync::Arc; use backtrace::Backtrace; +use databend_common_ast::span::pretty_print_error; +use databend_common_ast::Span; use thiserror::Error; use crate::exception_backtrace::capture; -use crate::span::pretty_print_error; -use crate::Span; #[derive(Clone)] pub enum ErrorCodeBacktrace { diff --git a/src/common/exception/src/exception_into.rs b/src/common/exception/src/exception_into.rs index c3e7b576d2537..0a3f68ac01b5c 100644 --- a/src/common/exception/src/exception_into.rs +++ b/src/common/exception/src/exception_into.rs @@ -18,12 +18,12 @@ use std::fmt::Display; use std::fmt::Formatter; use std::sync::Arc; +use databend_common_ast::Span; use geozero::error::GeozeroError; use crate::exception::ErrorCodeBacktrace; use crate::exception_backtrace::capture; use crate::ErrorCode; -use crate::Span; #[derive(thiserror::Error)] enum OtherErrors { @@ -231,6 +231,12 @@ impl From for ErrorCode { } } +impl From for ErrorCode { + fn from(error: databend_common_ast::ParseError) -> Self { + ErrorCode::SyntaxException(error.1).set_span(error.0) + } +} + impl From for ErrorCode { fn from(value: GeozeroError) -> Self { ErrorCode::GeometryError(value.to_string()) diff --git a/src/common/exception/src/lib.rs b/src/common/exception/src/lib.rs index 537f2fd433fd8..9aa1d8c9733d7 100644 --- a/src/common/exception/src/lib.rs +++ b/src/common/exception/src/lib.rs @@ -18,17 +18,11 @@ mod exception_backtrace; mod exception_code; mod exception_flight; mod exception_into; -mod span; mod with_context; pub use exception::ErrorCode; pub use exception::Result; pub use exception::ToErrorCode; pub use exception_into::SerializedError; -pub use span::merge_span; -pub use span::offset_span; -pub use span::pretty_print_error; -pub use span::Range; -pub use span::Span; pub use with_context::ErrorWithContext; pub use with_context::WithContext; diff --git a/src/meta/api/src/schema_api_impl.rs b/src/meta/api/src/schema_api_impl.rs index 006ac1a33eb14..8191c2e7c5ba8 100644 --- a/src/meta/api/src/schema_api_impl.rs +++ b/src/meta/api/src/schema_api_impl.rs @@ -190,6 +190,7 @@ use databend_common_meta_kvapi::kvapi; use databend_common_meta_kvapi::kvapi::DirName; use databend_common_meta_kvapi::kvapi::Key; use databend_common_meta_kvapi::kvapi::UpsertKVReq; +use databend_common_meta_types::anyerror::AnyError; use databend_common_meta_types::protobuf as pb; use databend_common_meta_types::txn_op::Request; use databend_common_meta_types::txn_op_response::Response; @@ -197,6 +198,9 @@ use databend_common_meta_types::ConditionResult; use databend_common_meta_types::InvalidReply; use databend_common_meta_types::MatchSeq; use databend_common_meta_types::MatchSeqExt; +use databend_common_meta_types::MetaAPIError; +use databend_common_meta_types::MetaDataError; +use databend_common_meta_types::MetaDataReadError; use databend_common_meta_types::MetaError; use databend_common_meta_types::MetaId; use databend_common_meta_types::MetaNetworkError; @@ -2282,7 +2286,7 @@ impl + ?Sized> SchemaApi for KV { // Batch get all table-name by id let seq_names = self.mget_kv(&id_name_kv_keys).await?; - let mut table_names = Vec::with_capacity(id_name_kv_keys.len()); + let mut table_names = Vec::with_capacity(table_ids.len()); // None means table_name not found, maybe immutable table id. Ignore it for seq_name in seq_names.into_iter().flatten() { @@ -2297,6 +2301,15 @@ impl + ?Sized> SchemaApi for KV { } let seq_metas = self.mget_kv(&meta_kv_keys).await?; + if seq_metas.len() != table_names.len() { + return Err(KVAppError::MetaError(MetaError::APIError( + MetaAPIError::DataError(MetaDataError::ReadError(MetaDataReadError::new( + "mget_table_names_by_ids", + "", + &AnyError::error("The system is experiencing high load, please retry later"), + ))), + ))); + } for (i, seq_meta_opt) in seq_metas.iter().enumerate() { if let Some(seq_meta) = seq_meta_opt { let table_meta: TableMeta = deserialize_struct(&seq_meta.data)?; @@ -2346,7 +2359,8 @@ impl + ?Sized> SchemaApi for KV { // Batch get all table-name by id let seq_names = self.mget_kv(&kv_keys).await?; - let mut db_names = Vec::with_capacity(kv_keys.len()); + // If multi drop/create db the capacity may not same + let mut db_names = Vec::with_capacity(db_ids.len()); // None means db_name not found, maybe immutable database id. Ignore it for seq_name in seq_names.into_iter().flatten() { @@ -2361,6 +2375,15 @@ impl + ?Sized> SchemaApi for KV { } let seq_metas = self.mget_kv(&meta_kv_keys).await?; + if seq_metas.len() != db_names.len() { + return Err(KVAppError::MetaError(MetaError::APIError( + MetaAPIError::DataError(MetaDataError::ReadError(MetaDataReadError::new( + "mget_table_names_by_ids", + "", + &AnyError::error("The system is experiencing high load, please retry later"), + ))), + ))); + } for (i, seq_meta_opt) in seq_metas.iter().enumerate() { if let Some(seq_meta) = seq_meta_opt { let db_meta: DatabaseMeta = deserialize_struct(&seq_meta.data)?; diff --git a/src/query/ast/Cargo.toml b/src/query/ast/Cargo.toml index dc43ea56af71d..3b5b9307c411b 100644 --- a/src/query/ast/Cargo.toml +++ b/src/query/ast/Cargo.toml @@ -15,10 +15,6 @@ doctest = false ignored = ["geos"] [dependencies] # In alphabetical order -# Workspace dependencies -databend-common-exception = { path = "../../common/exception" } -databend-common-io = { path = "../../common/io" } - # Crates.io dependencies derive-visitor = { workspace = true } enum-as-inner = "0.5.1" @@ -34,6 +30,7 @@ nom-rule = "0.3.0" ordered-float = { workspace = true } pratt = "0.4.0" pretty = "0.11.3" +serde = { workspace = true } serde_json = { workspace = true } strsim = "0.10" strum = "0.24" @@ -41,6 +38,12 @@ strum_macros = "0.24" unindent = "0.2.3" url = "2.3.1" +# TODO: Use the version from crates.io once +# https://github.com/brendanzab/codespan/pull/331 is released. +[dependencies.codespan-reporting] +git = "https://github.com/brendanzab/codespan" +rev = "c84116f5" + [dev-dependencies] criterion = { workspace = true } goldenfile = "1.4" diff --git a/src/query/ast/src/ast/common.rs b/src/query/ast/src/ast/common.rs index 1179b8b9e0246..ab29458479e6a 100644 --- a/src/query/ast/src/ast/common.rs +++ b/src/query/ast/src/ast/common.rs @@ -14,12 +14,14 @@ use std::fmt::Display; use std::fmt::Formatter; +use std::fmt::Write as _; -use databend_common_exception::Span; use derive_visitor::Drive; use derive_visitor::DriveMut; +use ethnum::i256; use crate::ast::quote::QuotedIdent; +use crate::Span; // Identifier of table name or column name. #[derive(Debug, Clone, PartialEq, Eq, Drive, DriveMut)] @@ -269,3 +271,33 @@ pub(crate) fn write_space_separated_string_map( } Ok(()) } + +pub fn display_decimal_256(num: i256, scale: u8) -> String { + let mut buf = String::new(); + if scale == 0 { + write!(buf, "{}", num).unwrap(); + } else { + let pow_scale = i256::from(10).pow(scale as u32); + // -1/10 = 0 + if num >= 0 { + write!( + buf, + "{}.{:0>width$}", + num / pow_scale, + (num % pow_scale).abs(), + width = scale as usize + ) + .unwrap(); + } else { + write!( + buf, + "-{}.{:0>width$}", + -num / pow_scale, + (num % pow_scale).abs(), + width = scale as usize + ) + .unwrap(); + } + } + buf +} diff --git a/src/query/ast/src/ast/expr.rs b/src/query/ast/src/ast/expr.rs index a45fa5154c1b6..a8ba2757df2b9 100644 --- a/src/query/ast/src/ast/expr.rs +++ b/src/query/ast/src/ast/expr.rs @@ -15,12 +15,6 @@ use std::fmt::Display; use std::fmt::Formatter; -use databend_common_exception::merge_span; -use databend_common_exception::ErrorCode; -use databend_common_exception::Result; -use databend_common_exception::Span; -use databend_common_io::display_decimal_256; -use databend_common_io::escape_string_with_quote; use derive_visitor::Drive; use derive_visitor::DriveMut; use enum_as_inner::EnumAsInner; @@ -30,10 +24,16 @@ use pratt::Precedence; use super::ColumnRef; use super::OrderByExpr; +use crate::ast::display_decimal_256; +use crate::ast::quote::QuotedString; use crate::ast::write_comma_separated_list; use crate::ast::Identifier; use crate::ast::Query; use crate::parser::expr::ExprElement; +use crate::span::merge_span; +use crate::ParseError; +use crate::Result; +use crate::Span; #[derive(Debug, Clone, PartialEq, Drive, DriveMut)] pub enum Expr { @@ -819,7 +819,7 @@ impl Display for Literal { write!(f, "{val}") } Literal::String(val) => { - write!(f, "\'{}\'", escape_string_with_quote(val, Some('\''))) + write!(f, "{}", QuotedString(val, '\'')) } Literal::Boolean(val) => { if *val { @@ -1265,9 +1265,10 @@ impl BinaryOperator { BinaryOperator::Lte => Ok(BinaryOperator::Gt), BinaryOperator::Eq => Ok(BinaryOperator::NotEq), BinaryOperator::NotEq => Ok(BinaryOperator::Eq), - _ => Err(ErrorCode::Unimplemented(format!( - "Converting {self} to its contrary is not currently supported" - ))), + _ => Err(ParseError( + None, + format!("Converting {self} to its contrary is not currently supported"), + )), } } diff --git a/src/query/ast/src/ast/format/ast_format.rs b/src/query/ast/src/ast/format/ast_format.rs index a9fa605c95855..e9117b9a8aab3 100644 --- a/src/query/ast/src/ast/format/ast_format.rs +++ b/src/query/ast/src/ast/format/ast_format.rs @@ -16,11 +16,11 @@ use std::fmt::Display; -use databend_common_exception::Result; -use databend_common_exception::Span; use itertools::Itertools; use crate::ast::*; +use crate::Result; +use crate::Span; pub fn format_statement(stmt: Statement) -> Result { let mut visitor = AstFormatVisitor::new(); @@ -2236,7 +2236,11 @@ impl<'ast> Visitor<'ast> for AstFormatVisitor { self.children.push(node); } - fn visit_show_grant(&mut self, principal: &'ast Option) { + fn visit_show_grant( + &mut self, + principal: &'ast Option, + show_options: &'ast Option, + ) { let mut children = Vec::new(); if let Some(principal) = &principal { let principal_name = match principal { @@ -2246,6 +2250,18 @@ impl<'ast> Visitor<'ast> for AstFormatVisitor { let principal_format_ctx = AstFormatContext::new(principal_name); children.push(FormatTreeNode::new(principal_format_ctx)); } + if let Some(show_options) = show_options { + if let Some(show_limit) = &show_options.show_limit { + self.visit_show_limit(show_limit); + children.push(self.children.pop().unwrap()); + } + if let Some(limit) = show_options.limit { + let name = format!("Limit {}", limit); + let limit_format_ctx = AstFormatContext::new(name); + let node = FormatTreeNode::new(limit_format_ctx); + children.push(node); + } + } let name = "ShowGrant".to_string(); let format_ctx = AstFormatContext::with_children(name, children.len()); let node = FormatTreeNode::with_children(format_ctx, children); diff --git a/src/query/ast/src/ast/format/indent_format.rs b/src/query/ast/src/ast/format/indent_format.rs index 84c6edf046fe1..e0aa10e4dce8a 100644 --- a/src/query/ast/src/ast/format/indent_format.rs +++ b/src/query/ast/src/ast/format/indent_format.rs @@ -15,9 +15,8 @@ use std::fmt::Display; use std::fmt::Write; -use databend_common_exception::Result; - use super::FormatTreeNode; +use crate::Result; static INDENT_SIZE: usize = 4; diff --git a/src/query/ast/src/ast/format/pretty_format.rs b/src/query/ast/src/ast/format/pretty_format.rs index c56d42137273b..947673946e165 100644 --- a/src/query/ast/src/ast/format/pretty_format.rs +++ b/src/query/ast/src/ast/format/pretty_format.rs @@ -15,9 +15,8 @@ use std::fmt::Display; use std::fmt::Write; -use databend_common_exception::Result; - use super::FormatTreeNode; +use crate::Result; static REGULAR_PREFIX: &str = "├── "; static CHILD_PREFIX: &str = "│ "; diff --git a/src/query/ast/src/ast/format/syntax/mod.rs b/src/query/ast/src/ast/format/syntax/mod.rs index 06eb3e8f8a7f1..d657e9f49f484 100644 --- a/src/query/ast/src/ast/format/syntax/mod.rs +++ b/src/query/ast/src/ast/format/syntax/mod.rs @@ -17,13 +17,14 @@ mod dml; mod expr; mod query; -use databend_common_exception::Result; use ddl::*; use dml::*; use pretty::RcDoc; use query::*; use crate::ast::Statement; +use crate::ParseError; +use crate::Result; pub fn pretty_statement(stmt: Statement, max_width: usize) -> Result { let pretty_stmt = match stmt { @@ -44,8 +45,10 @@ pub fn pretty_statement(stmt: Statement, max_width: usize) -> Result { }; let mut bs = Vec::new(); - pretty_stmt.render(max_width, &mut bs)?; - Ok(String::from_utf8(bs)?) + pretty_stmt + .render(max_width, &mut bs) + .map_err(|err| ParseError(None, err.to_string()))?; + String::from_utf8(bs).map_err(|err| ParseError(None, err.to_string())) } pub(crate) const NEST_FACTOR: isize = 4; diff --git a/src/query/ast/src/ast/query.rs b/src/query/ast/src/ast/query.rs index 0d4d25cd5c663..4ec4aa267d9c3 100644 --- a/src/query/ast/src/ast/query.rs +++ b/src/query/ast/src/ast/query.rs @@ -15,7 +15,6 @@ use std::fmt::Display; use std::fmt::Formatter; -use databend_common_exception::Span; use derive_visitor::Drive; use derive_visitor::DriveMut; @@ -28,6 +27,7 @@ use crate::ast::Hint; use crate::ast::Identifier; use crate::ast::SelectStageOptions; use crate::ast::WindowDefinition; +use crate::Span; /// Root node of a query tree #[derive(Debug, Clone, PartialEq, Drive, DriveMut)] diff --git a/src/query/ast/src/ast/statements/copy.rs b/src/query/ast/src/ast/statements/copy.rs index 977559b47eea1..d528547af3f8d 100644 --- a/src/query/ast/src/ast/statements/copy.rs +++ b/src/query/ast/src/ast/statements/copy.rs @@ -16,17 +16,13 @@ use std::collections::BTreeMap; use std::collections::HashSet; use std::fmt::Display; use std::fmt::Formatter; -use std::io::Error; -use std::io::ErrorKind; -use std::io::Result; -use databend_common_exception::ErrorCode; -use databend_common_io::escape_string_with_quote; use derive_visitor::Drive; use derive_visitor::DriveMut; use itertools::Itertools; use url::Url; +use crate::ast::quote::QuotedString; use crate::ast::write_comma_separated_map; use crate::ast::write_comma_separated_string_list; use crate::ast::write_comma_separated_string_map; @@ -35,6 +31,8 @@ use crate::ast::Identifier; use crate::ast::Query; use crate::ast::TableRef; use crate::ast::With; +use crate::ParseError; +use crate::Result; /// CopyIntoTableStmt is the parsed statement of `COPY into from `. /// @@ -285,8 +283,8 @@ impl Connection { .collect(); if !diffs.is_empty() { - return Err(Error::new( - ErrorKind::InvalidInput, + return Err(ParseError( + None, format!( "connection params invalid: expected [{}], got [{}]", self.visited_keys @@ -361,14 +359,14 @@ impl UriLocation { uri: String, part_prefix: String, conns: BTreeMap, - ) -> databend_common_exception::Result { + ) -> Result { // fs location is not a valid url, let's check it in advance. if let Some(path) = uri.strip_prefix("fs://") { if !path.starts_with('/') { - return Err(ErrorCode::BadArguments(format!( - "Invalid uri: {}. fs location must start with 'fs:///'", - uri - ))); + return Err(ParseError( + None, + format!("Invalid uri: {}. fs location must start with 'fs:///'", uri), + )); } return Ok(UriLocation::new( "fs".to_string(), @@ -379,9 +377,8 @@ impl UriLocation { )); } - let parsed = Url::parse(&uri).map_err(|e| { - databend_common_exception::ErrorCode::BadArguments(format!("invalid uri {}", e)) - })?; + let parsed = + Url::parse(&uri).map_err(|e| ParseError(None, format!("invalid uri {}", e)))?; let protocol = parsed.scheme().to_string(); @@ -394,7 +391,7 @@ impl UriLocation { hostname.to_string() } }) - .ok_or_else(|| databend_common_exception::ErrorCode::BadArguments("invalid uri"))?; + .ok_or_else(|| ParseError(None, "invalid uri".to_string()))?; let path = if parsed.path().is_empty() { "/".to_string() @@ -527,7 +524,7 @@ impl Display for FileFormatValue { FileFormatValue::Bool(v) => write!(f, "{v}"), FileFormatValue::U64(v) => write!(f, "{v}"), FileFormatValue::String(v) => { - write!(f, "'{}'", escape_string_with_quote(v, Some('\''))) + write!(f, "{}", QuotedString(v, '\'')) } FileFormatValue::StringList(v) => { write!(f, "(")?; @@ -535,7 +532,7 @@ impl Display for FileFormatValue { if i > 0 { write!(f, ", ")?; } - write!(f, "'{}'", escape_string_with_quote(s, Some('\'')))?; + write!(f, "{}", QuotedString(s, '\''))?; } write!(f, ")") } diff --git a/src/query/ast/src/ast/statements/merge_into.rs b/src/query/ast/src/ast/statements/merge_into.rs index 0bd464ec26068..b60c244e6240f 100644 --- a/src/query/ast/src/ast/statements/merge_into.rs +++ b/src/query/ast/src/ast/statements/merge_into.rs @@ -16,8 +16,6 @@ use std::collections::BTreeMap; use std::fmt::Display; use std::fmt::Formatter; -use databend_common_exception::ErrorCode; -use databend_common_exception::Result; use derive_visitor::Drive; use derive_visitor::DriveMut; @@ -30,6 +28,8 @@ use crate::ast::Identifier; use crate::ast::Query; use crate::ast::TableAlias; use crate::ast::TableReference; +use crate::ParseError; +use crate::Result; #[derive(Debug, Clone, PartialEq, Drive, DriveMut)] pub struct MergeUpdateExpr { @@ -190,7 +190,7 @@ impl MergeIntoStmt { if clauses.len() > 1 { for (idx, clause) in clauses.iter().enumerate() { if clause.selection.is_none() && idx < clauses.len() - 1 { - return Err(ErrorCode::SemanticError( + return Err(ParseError(None, "when there are multi matched clauses, we must have a condition for every one except the last one".to_string(), )); } @@ -204,7 +204,7 @@ impl MergeIntoStmt { if clauses.len() > 1 { for (idx, clause) in clauses.iter().enumerate() { if clause.selection.is_none() && idx < clauses.len() - 1 { - return Err(ErrorCode::SemanticError( + return Err(ParseError(None, "when there are multi unmatched clauses, we must have a condition for every one except the last one".to_string(), )); } diff --git a/src/query/ast/src/ast/statements/script.rs b/src/query/ast/src/ast/statements/script.rs index 9a28a02e5f4df..214fc1a80be7e 100644 --- a/src/query/ast/src/ast/statements/script.rs +++ b/src/query/ast/src/ast/statements/script.rs @@ -15,11 +15,10 @@ use std::fmt::Display; use std::fmt::Formatter; -use databend_common_exception::Span; - use crate::ast::Expr; use crate::ast::Identifier; use crate::ast::Statement; +use crate::Span; const INDENT_DEPTH: usize = 4; diff --git a/src/query/ast/src/ast/statements/statement.rs b/src/query/ast/src/ast/statements/statement.rs index 027147d92cbf4..5d4d0ab37f364 100644 --- a/src/query/ast/src/ast/statements/statement.rs +++ b/src/query/ast/src/ast/statements/statement.rs @@ -15,13 +15,13 @@ use std::fmt::Display; use std::fmt::Formatter; -use databend_common_io::escape_string_with_quote; use derive_visitor::Drive; use derive_visitor::DriveMut; use itertools::Itertools; use super::merge_into::MergeIntoStmt; use super::*; +use crate::ast::quote::QuotedString; use crate::ast::statements::connection::CreateConnectionStmt; use crate::ast::statements::pipe::CreatePipeStmt; use crate::ast::statements::task::CreateTaskStmt; @@ -202,7 +202,9 @@ pub enum Statement { Grant(GrantStmt), ShowGrants { principal: Option, + show_options: Option, }, + ShowObjectPrivileges(ShowObjectPrivilegesStmt), Revoke(RevokeStmt), // UDF @@ -592,7 +594,7 @@ impl Display for Statement { if *if_not_exists { write!(f, " IF NOT EXISTS")?; } - write!(f, " '{}'", escape_string_with_quote(role, Some('\'')))?; + write!(f, " {}", QuotedString(role, '\''))?; } Statement::DropRole { if_exists, @@ -605,13 +607,20 @@ impl Display for Statement { write!(f, " '{role}'")?; } Statement::Grant(stmt) => write!(f, "{stmt}")?, - Statement::ShowGrants { principal } => { + Statement::ShowGrants { + principal, + show_options, + } => { write!(f, "SHOW GRANTS")?; if let Some(principal) = principal { write!(f, " FOR")?; write!(f, "{principal}")?; } + if let Some(show_options) = show_options { + write!(f, " {show_options}")?; + } } + Statement::ShowObjectPrivileges(stmt) => write!(f, "{stmt}")?, Statement::Revoke(stmt) => write!(f, "{stmt}")?, Statement::CreateUDF(stmt) => write!(f, "{stmt}")?, Statement::DropUDF { diff --git a/src/query/ast/src/ast/statements/task.rs b/src/query/ast/src/ast/statements/task.rs index 705608f48c465..259578f07603b 100644 --- a/src/query/ast/src/ast/statements/task.rs +++ b/src/query/ast/src/ast/statements/task.rs @@ -136,15 +136,21 @@ impl Display for WarehouseOptions { #[derive(Debug, Clone, PartialEq, Drive, DriveMut)] pub enum ScheduleOptions { - IntervalSecs(#[drive(skip)] u64), + IntervalSecs(#[drive(skip)] u64, #[drive(skip)] u64), CronExpression(#[drive(skip)] String, #[drive(skip)] Option), } impl Display for ScheduleOptions { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { match self { - ScheduleOptions::IntervalSecs(secs) => { - write!(f, "{} SECOND", secs) + ScheduleOptions::IntervalSecs(secs, ms) => { + if *ms > 0 { + write!(f, "{} MILLISECOND", ms)?; + Ok(()) + } else { + write!(f, "{} SECOND", secs)?; + Ok(()) + } } ScheduleOptions::CronExpression(expr, tz) => { write!(f, "USING CRON '{}'", expr)?; diff --git a/src/query/ast/src/ast/statements/user.rs b/src/query/ast/src/ast/statements/user.rs index 6e844eaa4d46f..a6ab5d4d4e95f 100644 --- a/src/query/ast/src/ast/statements/user.rs +++ b/src/query/ast/src/ast/statements/user.rs @@ -22,6 +22,7 @@ use crate::ast::write_comma_separated_list; use crate::ast::AuthType; use crate::ast::CreateOption; use crate::ast::PrincipalIdentity; +use crate::ast::ShowOptions; use crate::ast::UserIdentity; use crate::ast::UserPrivilegeType; @@ -135,6 +136,50 @@ impl Display for RevokeStmt { } } +#[derive(Debug, Clone, PartialEq, Drive, DriveMut)] +pub struct ShowObjectPrivilegesStmt { + pub object: GrantObjectName, + pub show_option: Option, +} + +#[derive(Debug, Clone, PartialEq, Drive, DriveMut)] +pub enum GrantObjectName { + Database(#[drive(skip)] String), + Table(#[drive(skip)] Option, #[drive(skip)] String), + UDF(#[drive(skip)] String), + Stage(#[drive(skip)] String), +} + +impl Display for GrantObjectName { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + match self { + GrantObjectName::Database(database_name) => { + write!(f, "DATABASE {database_name}") + } + GrantObjectName::Table(database_name, table_name) => { + if let Some(database_name) = database_name { + write!(f, "TABLE {database_name}.{table_name}") + } else { + write!(f, "TABLE {table_name}") + } + } + GrantObjectName::UDF(udf) => write!(f, " UDF {udf}"), + GrantObjectName::Stage(stage) => write!(f, " STAGE {stage}"), + } + } +} + +impl Display for ShowObjectPrivilegesStmt { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "SHOW GRANTS ON {}", self.object)?; + + if let Some(show_option) = &self.show_option { + write!(f, " {show_option}")?; + } + Ok(()) + } +} + #[derive(Debug, Clone, PartialEq, Eq, Drive, DriveMut)] pub enum AccountMgrSource { Role { @@ -158,48 +203,12 @@ impl Display for AccountMgrSource { write!(f, " ")?; write_comma_separated_list(f, privileges.iter().map(|p| p.to_string()))?; write!(f, " ON")?; - match level { - AccountMgrLevel::Global => write!(f, " *.*")?, - AccountMgrLevel::Database(database_name) => { - if let Some(database_name) = database_name { - write!(f, " {database_name}.*")?; - } else { - write!(f, " *")?; - } - } - AccountMgrLevel::Table(database_name, table_name) => { - if let Some(database_name) = database_name { - write!(f, " {database_name}.{table_name}")?; - } else { - write!(f, " {table_name}")?; - } - } - AccountMgrLevel::UDF(udf) => write!(f, " UDF {udf}")?, - AccountMgrLevel::Stage(stage) => write!(f, " STAGE {stage}")?, - } + write!(f, " {}", level)?; } AccountMgrSource::ALL { level, .. } => { write!(f, " ALL PRIVILEGES")?; write!(f, " ON")?; - match level { - AccountMgrLevel::Global => write!(f, " *.*")?, - AccountMgrLevel::Database(database_name) => { - if let Some(database_name) = database_name { - write!(f, " {database_name}.*")?; - } else { - write!(f, " *")?; - } - } - AccountMgrLevel::Table(database_name, table_name) => { - if let Some(database_name) = database_name { - write!(f, " {database_name}.{table_name}")?; - } else { - write!(f, " {table_name}")?; - } - } - AccountMgrLevel::UDF(udf) => write!(f, " UDF {udf}")?, - AccountMgrLevel::Stage(stage) => write!(f, " STAGE {stage}")?, - } + write!(f, " {}", level)?; } } Ok(()) @@ -215,6 +224,30 @@ pub enum AccountMgrLevel { Stage(#[drive(skip)] String), } +impl Display for AccountMgrLevel { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + match self { + AccountMgrLevel::Global => write!(f, " *.*"), + AccountMgrLevel::Database(database_name) => { + if let Some(database_name) = database_name { + write!(f, " {database_name}.*") + } else { + write!(f, " *") + } + } + AccountMgrLevel::Table(database_name, table_name) => { + if let Some(database_name) = database_name { + write!(f, " {database_name}.{table_name}") + } else { + write!(f, " {table_name}") + } + } + AccountMgrLevel::UDF(udf) => write!(f, " UDF {udf}"), + AccountMgrLevel::Stage(stage) => write!(f, " STAGE {stage}"), + } + } +} + #[derive(Debug, Clone, PartialEq, Eq, Drive, DriveMut)] pub enum SecondaryRolesOption { None, diff --git a/src/query/ast/src/ast/visitors/visitor.rs b/src/query/ast/src/ast/visitors/visitor.rs index 61babccb613c5..59ea6a6aeed29 100644 --- a/src/query/ast/src/ast/visitors/visitor.rs +++ b/src/query/ast/src/ast/visitors/visitor.rs @@ -12,9 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use databend_common_exception::Span; - use crate::ast::*; +use crate::Span; #[deprecated = "Use derive_visitor::Visitor instead"] pub trait Visitor<'ast>: Sized { @@ -612,7 +611,14 @@ pub trait Visitor<'ast>: Sized { fn visit_grant(&mut self, _grant: &'ast GrantStmt) {} - fn visit_show_grant(&mut self, _principal: &'ast Option) {} + fn visit_show_grant( + &mut self, + _principal: &'ast Option, + _show_options: &'ast Option, + ) { + } + + fn visit_show_object_priv(&mut self, _show: &'ast ShowObjectPrivilegesStmt) {} fn visit_revoke(&mut self, _revoke: &'ast RevokeStmt) {} diff --git a/src/query/ast/src/ast/visitors/visitor_mut.rs b/src/query/ast/src/ast/visitors/visitor_mut.rs index 1418958f19b0a..738fd76d884b9 100644 --- a/src/query/ast/src/ast/visitors/visitor_mut.rs +++ b/src/query/ast/src/ast/visitors/visitor_mut.rs @@ -12,9 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use databend_common_exception::Span; - use crate::ast::*; +use crate::Span; #[deprecated = "Use derive_visitor::VisitorMut instead"] pub trait VisitorMut: Sized { @@ -624,7 +623,14 @@ pub trait VisitorMut: Sized { fn visit_grant(&mut self, _grant: &mut GrantStmt) {} - fn visit_show_grant(&mut self, _principal: &mut Option) {} + fn visit_show_grant( + &mut self, + _principal: &mut Option, + _show_options: &mut Option, + ) { + } + + fn visit_show_object_priv(&mut self, _show: &mut ShowObjectPrivilegesStmt) {} fn visit_revoke(&mut self, _revoke: &mut RevokeStmt) {} diff --git a/src/query/ast/src/ast/visitors/walk.rs b/src/query/ast/src/ast/visitors/walk.rs index 2beb82fc08f15..663cbeebc91de 100644 --- a/src/query/ast/src/ast/visitors/walk.rs +++ b/src/query/ast/src/ast/visitors/walk.rs @@ -503,7 +503,11 @@ pub fn walk_statement<'a, V: Visitor<'a>>(visitor: &mut V, statement: &'a Statem role_name, } => visitor.visit_drop_role(*if_exists, role_name), Statement::Grant(stmt) => visitor.visit_grant(stmt), - Statement::ShowGrants { principal } => visitor.visit_show_grant(principal), + Statement::ShowGrants { + principal, + show_options, + } => visitor.visit_show_grant(principal, show_options), + Statement::ShowObjectPrivileges(stmt) => visitor.visit_show_object_priv(stmt), Statement::Revoke(stmt) => visitor.visit_revoke(stmt), Statement::CreateUDF(stmt) => visitor.visit_create_udf(stmt), Statement::DropUDF { diff --git a/src/query/ast/src/ast/visitors/walk_mut.rs b/src/query/ast/src/ast/visitors/walk_mut.rs index 7768e8d05e39d..baae6a066906f 100644 --- a/src/query/ast/src/ast/visitors/walk_mut.rs +++ b/src/query/ast/src/ast/visitors/walk_mut.rs @@ -498,7 +498,11 @@ pub fn walk_statement_mut(visitor: &mut V, statement: &mut Statem role_name, } => visitor.visit_drop_role(*if_exists, role_name), Statement::Grant(stmt) => visitor.visit_grant(stmt), - Statement::ShowGrants { principal } => visitor.visit_show_grant(principal), + Statement::ShowGrants { + principal, + show_options, + } => visitor.visit_show_grant(principal, show_options), + Statement::ShowObjectPrivileges(stmt) => visitor.visit_show_object_priv(stmt), Statement::Revoke(stmt) => visitor.visit_revoke(stmt), Statement::CreateUDF(stmt) => visitor.visit_create_udf(stmt), Statement::DropUDF { diff --git a/src/query/ast/src/error.rs b/src/query/ast/src/error.rs new file mode 100644 index 0000000000000..781961c820bec --- /dev/null +++ b/src/query/ast/src/error.rs @@ -0,0 +1,40 @@ +// Copyright 2021 Datafuse Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::fmt::Display; +use std::fmt::Formatter; + +use crate::span::pretty_print_error; +use crate::Span; + +pub type Result = std::result::Result; + +#[derive(Debug)] +pub struct ParseError(pub Span, pub String); + +impl ParseError { + /// Pretty display the error message onto source if span is available. + pub fn display_with_source(mut self, source: &str) -> Self { + if let Some(span) = self.0.take() { + self.1 = pretty_print_error(source, vec![(span, self.1.to_string())]); + } + self + } +} + +impl Display for ParseError { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "{}", self.1) + } +} diff --git a/src/query/ast/src/lib.rs b/src/query/ast/src/lib.rs index 23e2e2d6f76cb..0104459768995 100644 --- a/src/query/ast/src/lib.rs +++ b/src/query/ast/src/lib.rs @@ -19,4 +19,11 @@ #![allow(clippy::type_complexity)] pub mod ast; +mod error; pub mod parser; +pub mod span; + +pub use error::ParseError; +pub use error::Result; +pub use span::Range; +pub use span::Span; diff --git a/src/query/ast/src/parser/common.rs b/src/query/ast/src/parser/common.rs index d7ea1baa0b12f..a70d5cb0c3afc 100644 --- a/src/query/ast/src/parser/common.rs +++ b/src/query/ast/src/parser/common.rs @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use databend_common_exception::Range; -use databend_common_exception::Span; use nom::branch::alt; use nom::combinator::consumed; use nom::combinator::map; @@ -36,6 +34,8 @@ use crate::parser::token::*; use crate::parser::Error; use crate::parser::ErrorKind; use crate::rule; +use crate::Range; +use crate::Span; pub type IResult<'a, Output> = nom::IResult, Output, Error<'a>>; diff --git a/src/query/ast/src/parser/error.rs b/src/query/ast/src/parser/error.rs index afbbd46ab2f0b..47c5d53507838 100644 --- a/src/query/ast/src/parser/error.rs +++ b/src/query/ast/src/parser/error.rs @@ -18,14 +18,14 @@ use std::fmt::Write; use std::num::IntErrorKind; use std::num::ParseIntError; -use databend_common_exception::pretty_print_error; -use databend_common_exception::Range; use itertools::Itertools; use ordered_float::OrderedFloat; use crate::parser::common::transform_span; use crate::parser::input::Input; use crate::parser::token::*; +use crate::span::pretty_print_error; +use crate::Range; const MAX_DISPLAY_ERROR_COUNT: usize = 60; diff --git a/src/query/ast/src/parser/parser.rs b/src/query/ast/src/parser/parser.rs index 324e22942c7b2..bdfc8cc573a37 100644 --- a/src/query/ast/src/parser/parser.rs +++ b/src/query/ast/src/parser/parser.rs @@ -12,9 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use databend_common_exception::ErrorCode; -use databend_common_exception::Result; - use super::input::ParseMode; use super::statement::insert_stmt; use super::statement::replace_stmt; @@ -36,6 +33,8 @@ use crate::parser::token::Token; use crate::parser::token::TokenKind; use crate::parser::token::Tokenizer; use crate::parser::Backtrace; +use crate::ParseError; +use crate::Result; pub fn tokenize_sql(sql: &str) -> Result> { Tokenizer::new(sql).collect::>>() @@ -49,7 +48,7 @@ pub fn parse_sql(tokens: &[Token], dialect: Dialect) -> Result<(Statement, Optio #[cfg(debug_assertions)] { // Check that the statement can be displayed and reparsed without loss - let res: Result<(), ErrorCode> = try { + let res: Result<()> = try { let reparse_sql = stmt.stmt.to_string(); let reparse_tokens = crate::parser::tokenize_sql(&reparse_sql)?; let reparsed = run_parser( @@ -144,17 +143,15 @@ pub fn run_parser( if is_complete || allow_partial { Ok(res) } else { - Err( - ErrorCode::SyntaxException("unable to parse rest of the sql".to_string()) - .set_span(transform_span(&rest[..1])), - ) + Err(ParseError( + transform_span(&rest[..1]), + "unable to parse rest of the sql".to_string(), + )) } } Err(nom::Err::Error(err) | nom::Err::Failure(err)) => { let source = tokens[0].source; - Err(ErrorCode::SyntaxException(display_parser_error( - err, source, - ))) + Err(ParseError(None, display_parser_error(err, source))) } Err(nom::Err::Incomplete(_)) => unreachable!(), } diff --git a/src/query/ast/src/parser/statement.rs b/src/query/ast/src/parser/statement.rs index f143fb536f0e7..84d31959fb20b 100644 --- a/src/query/ast/src/parser/statement.rs +++ b/src/query/ast/src/parser/statement.rs @@ -43,6 +43,7 @@ use crate::rule; pub enum ShowGrantOption { PrincipalIdentity(PrincipalIdentity), + GrantObjectName(GrantObjectName), ShareGrantObjectName(ShareGrantObjectName), ShareName(String), } @@ -1278,11 +1279,12 @@ pub fn statement_body(i: Input) -> IResult { ); let show_grants = map( rule! { - SHOW ~ GRANTS ~ #show_grant_option? + SHOW ~ GRANTS ~ #show_grant_option? ~ ^#show_options? }, - |(_, _, show_grant_option)| match show_grant_option { + |(_, _, show_grant_option, opt_limit)| match show_grant_option { Some(ShowGrantOption::PrincipalIdentity(principal)) => Statement::ShowGrants { principal: Some(principal), + show_options: opt_limit, }, Some(ShowGrantOption::ShareGrantObjectName(object)) => { Statement::ShowObjectGrantPrivileges(ShowObjectGrantPrivilegesStmt { object }) @@ -1290,7 +1292,16 @@ pub fn statement_body(i: Input) -> IResult { Some(ShowGrantOption::ShareName(share_name)) => { Statement::ShowGrantsOfShare(ShowGrantsOfShareStmt { share_name }) } - None => Statement::ShowGrants { principal: None }, + None => Statement::ShowGrants { + principal: None, + show_options: opt_limit, + }, + Some(ShowGrantOption::GrantObjectName(object)) => { + Statement::ShowObjectPrivileges(ShowObjectPrivilegesStmt { + object, + show_option: opt_limit, + }) + } }, ); let revoke = map( @@ -2905,6 +2916,40 @@ pub fn grant_share_object_name(i: Input) -> IResult { )(i) } +pub fn on_object_name(i: Input) -> IResult { + let database = map( + rule! { + DATABASE ~ #ident + }, + |(_, database)| GrantObjectName::Database(database.to_string()), + ); + + // `db01`.'tb1' or `db01`.`tb1` or `db01`.tb1 + let table = map( + rule! { + TABLE ~ #dot_separated_idents_1_to_2 + }, + |(_, (database, table))| { + GrantObjectName::Table(database.map(|db| db.to_string()), table.to_string()) + }, + ); + + let stage = map(rule! { STAGE ~ #ident}, |(_, stage_name)| { + GrantObjectName::Stage(stage_name.to_string()) + }); + + let udf = map(rule! { UDF ~ #ident}, |(_, udf_name)| { + GrantObjectName::UDF(udf_name.to_string()) + }); + + rule!( + #database : "DATABASE " + | #table : "TABLE .
" + | #stage : "STAGE " + | #udf : "UDF " + )(i) +} + pub fn grant_level(i: Input) -> IResult { // *.* let global = map(rule! { "*" ~ "." ~ "*" }, |_| AccountMgrLevel::Global); @@ -3023,9 +3068,9 @@ pub fn show_grant_option(i: Input) -> IResult { let share_object_name = map( rule! { - ON ~ #grant_share_object_name + ON ~ #on_object_name }, - |(_, object_name)| ShowGrantOption::ShareGrantObjectName(object_name), + |(_, object_name)| ShowGrantOption::GrantObjectName(object_name), ); let share_name = map( @@ -3037,7 +3082,7 @@ pub fn show_grant_option(i: Input) -> IResult { rule!( #grant_role: "FOR { ROLE | [USER] }" - | #share_object_name: "ON {DATABASE | TABLE .}" + | #share_object_name: "ON {DATABASE | TABLE . | UDF | STAGE }" | #share_name: "OF SHARE " )(i) } @@ -3656,7 +3701,7 @@ pub fn task_schedule_option(i: Input) -> IResult { rule! { #literal_u64 ~ MINUTE }, - |(mins, _)| ScheduleOptions::IntervalSecs(mins * 60), + |(mins, _)| ScheduleOptions::IntervalSecs(mins * 60, 0), ); let cron_expr = map( rule! { @@ -3668,12 +3713,19 @@ pub fn task_schedule_option(i: Input) -> IResult { rule! { #literal_u64 ~ SECOND }, - |(secs, _)| ScheduleOptions::IntervalSecs(secs), + |(secs, _)| ScheduleOptions::IntervalSecs(secs, 0), + ); + let interval_millis = map( + rule! { + #literal_u64 ~ MILLISECOND + }, + |(millis, _)| ScheduleOptions::IntervalSecs(0, millis), ); rule!( #interval | #cron_expr | #interval_sec + | #interval_millis )(i) } diff --git a/src/query/ast/src/parser/token.rs b/src/query/ast/src/parser/token.rs index 933c4656a205c..1e335491e6a48 100644 --- a/src/query/ast/src/parser/token.rs +++ b/src/query/ast/src/parser/token.rs @@ -12,15 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -use databend_common_exception::ErrorCode; -use databend_common_exception::Range; -use databend_common_exception::Result; use logos::Lexer; use logos::Logos; use strum::IntoEnumIterator; use strum_macros::EnumIter; pub use self::TokenKind::*; +use crate::ParseError; +use crate::Range; +use crate::Result; #[derive(Clone, PartialEq, Eq)] pub struct Token<'a> { @@ -72,10 +72,13 @@ impl<'a> Iterator for Tokenizer<'a> { fn next(&mut self) -> Option { match self.lexer.next() { - Some(TokenKind::Error) => Some(Err(ErrorCode::SyntaxException( - "unable to recognize the rest tokens".to_string(), - ) - .set_span(Some((self.lexer.span().start..self.source.len()).into())))), + Some(TokenKind::Error) => { + let span = Some((self.lexer.span().start..self.source.len()).into()); + Some(Err(ParseError( + span, + "unable to recognize the rest tokens".to_string(), + ))) + } Some(kind) => { // Skip hint-like comment that is in the invalid position. if !matches!( @@ -964,6 +967,8 @@ pub enum TokenKind { SCHEMAS, #[token("SECOND", ignore(ascii_case))] SECOND, + #[token("MILLISECOND", ignore(ascii_case))] + MILLISECOND, #[token("SELECT", ignore(ascii_case))] SELECT, #[token("PIVOT", ignore(ascii_case))] diff --git a/src/common/exception/src/span.rs b/src/query/ast/src/span.rs similarity index 100% rename from src/common/exception/src/span.rs rename to src/query/ast/src/span.rs diff --git a/src/query/ast/tests/it/parser.rs b/src/query/ast/tests/it/parser.rs index aadde37734a4d..d168541ced794 100644 --- a/src/query/ast/tests/it/parser.rs +++ b/src/query/ast/tests/it/parser.rs @@ -310,6 +310,7 @@ fn test_statement() { r#"SHOW GRANTS FOR USER 'test-grant';"#, r#"SHOW GRANTS FOR ROLE role1;"#, r#"SHOW GRANTS FOR ROLE 'role1';"#, + r#"SHOW GRANTS ON TABLE t;"#, r#"REVOKE SELECT, CREATE ON * FROM 'test-grant';"#, r#"REVOKE SELECT ON tb1 FROM ROLE role1;"#, r#"REVOKE SELECT ON tb1 FROM ROLE 'role1';"#, @@ -911,6 +912,7 @@ fn test_statement_error() { r#"drop table :a"#, r#"drop table IDENTIFIER(a)"#, r#"drop table IDENTIFIER(:a)"#, + r#"SHOW GRANTS ON task t1;"#, ]; for case in cases { @@ -919,7 +921,7 @@ fn test_statement_error() { writeln!(file, "---------- Input ----------").unwrap(); writeln!(file, "{}", case).unwrap(); writeln!(file, "---------- Output ---------").unwrap(); - writeln!(file, "{}", err.message()).unwrap(); + writeln!(file, "{}", err.1).unwrap(); } } diff --git a/src/query/ast/tests/it/testdata/statement-error.txt b/src/query/ast/tests/it/testdata/statement-error.txt index 7f4946109ebbf..3844dd90deca2 100644 --- a/src/query/ast/tests/it/testdata/statement-error.txt +++ b/src/query/ast/tests/it/testdata/statement-error.txt @@ -916,3 +916,13 @@ error: | while parsing `DROP TABLE [IF EXISTS] [.]
` +---------- Input ---------- +SHOW GRANTS ON task t1; +---------- Output --------- +error: + --> SQL:1:16 + | +1 | SHOW GRANTS ON task t1; + | ^^^^ unexpected `task`, expecting `STAGE`, `TABLE`, `DATABASE`, or `UDF` + + diff --git a/src/query/ast/tests/it/testdata/statement.txt b/src/query/ast/tests/it/testdata/statement.txt index 3322650aa244a..5ba37a6b12adf 100644 --- a/src/query/ast/tests/it/testdata/statement.txt +++ b/src/query/ast/tests/it/testdata/statement.txt @@ -11681,7 +11681,7 @@ CreateTable( ---------- Input ---------- GRANT CREATE, CREATE USER ON * TO 'test-grant'; ---------- Output --------- -GRANT CREATE, CREATE USER ON * TO USER 'test-grant'@'%' +GRANT CREATE, CREATE USER ON * TO USER 'test-grant'@'%' ---------- AST ------------ Grant( GrantStmt { @@ -11707,7 +11707,7 @@ Grant( ---------- Input ---------- GRANT SELECT, CREATE ON * TO 'test-grant'; ---------- Output --------- -GRANT SELECT, CREATE ON * TO USER 'test-grant'@'%' +GRANT SELECT, CREATE ON * TO USER 'test-grant'@'%' ---------- AST ------------ Grant( GrantStmt { @@ -11733,7 +11733,7 @@ Grant( ---------- Input ---------- GRANT SELECT, CREATE ON *.* TO 'test-grant'; ---------- Output --------- -GRANT SELECT, CREATE ON *.* TO USER 'test-grant'@'%' +GRANT SELECT, CREATE ON *.* TO USER 'test-grant'@'%' ---------- AST ------------ Grant( GrantStmt { @@ -11757,7 +11757,7 @@ Grant( ---------- Input ---------- GRANT SELECT, CREATE ON * TO USER 'test-grant'; ---------- Output --------- -GRANT SELECT, CREATE ON * TO USER 'test-grant'@'%' +GRANT SELECT, CREATE ON * TO USER 'test-grant'@'%' ---------- AST ------------ Grant( GrantStmt { @@ -11783,7 +11783,7 @@ Grant( ---------- Input ---------- GRANT SELECT, CREATE ON * TO ROLE role1; ---------- Output --------- -GRANT SELECT, CREATE ON * TO ROLE 'role1' +GRANT SELECT, CREATE ON * TO ROLE 'role1' ---------- AST ------------ Grant( GrantStmt { @@ -11806,7 +11806,7 @@ Grant( ---------- Input ---------- GRANT ALL ON *.* TO 'test-grant'; ---------- Output --------- -GRANT ALL PRIVILEGES ON *.* TO USER 'test-grant'@'%' +GRANT ALL PRIVILEGES ON *.* TO USER 'test-grant'@'%' ---------- AST ------------ Grant( GrantStmt { @@ -11826,7 +11826,7 @@ Grant( ---------- Input ---------- GRANT ALL ON *.* TO ROLE role2; ---------- Output --------- -GRANT ALL PRIVILEGES ON *.* TO ROLE 'role2' +GRANT ALL PRIVILEGES ON *.* TO ROLE 'role2' ---------- AST ------------ Grant( GrantStmt { @@ -11843,7 +11843,7 @@ Grant( ---------- Input ---------- GRANT ALL PRIVILEGES ON * TO 'test-grant'; ---------- Output --------- -GRANT ALL PRIVILEGES ON * TO USER 'test-grant'@'%' +GRANT ALL PRIVILEGES ON * TO USER 'test-grant'@'%' ---------- AST ------------ Grant( GrantStmt { @@ -11865,7 +11865,7 @@ Grant( ---------- Input ---------- GRANT ALL PRIVILEGES ON * TO ROLE role3; ---------- Output --------- -GRANT ALL PRIVILEGES ON * TO ROLE 'role3' +GRANT ALL PRIVILEGES ON * TO ROLE 'role3' ---------- AST ------------ Grant( GrantStmt { @@ -11941,7 +11941,7 @@ Grant( ---------- Input ---------- GRANT SELECT ON db01.* TO 'test-grant'; ---------- Output --------- -GRANT SELECT ON db01.* TO USER 'test-grant'@'%' +GRANT SELECT ON db01.* TO USER 'test-grant'@'%' ---------- AST ------------ Grant( GrantStmt { @@ -11968,7 +11968,7 @@ Grant( ---------- Input ---------- GRANT SELECT ON db01.* TO USER 'test-grant'; ---------- Output --------- -GRANT SELECT ON db01.* TO USER 'test-grant'@'%' +GRANT SELECT ON db01.* TO USER 'test-grant'@'%' ---------- AST ------------ Grant( GrantStmt { @@ -11995,7 +11995,7 @@ Grant( ---------- Input ---------- GRANT SELECT ON db01.* TO ROLE role1 ---------- Output --------- -GRANT SELECT ON db01.* TO ROLE 'role1' +GRANT SELECT ON db01.* TO ROLE 'role1' ---------- AST ------------ Grant( GrantStmt { @@ -12019,7 +12019,7 @@ Grant( ---------- Input ---------- GRANT SELECT ON db01.tb1 TO 'test-grant'; ---------- Output --------- -GRANT SELECT ON db01.tb1 TO USER 'test-grant'@'%' +GRANT SELECT ON db01.tb1 TO USER 'test-grant'@'%' ---------- AST ------------ Grant( GrantStmt { @@ -12047,7 +12047,7 @@ Grant( ---------- Input ---------- GRANT SELECT ON db01.tb1 TO USER 'test-grant'; ---------- Output --------- -GRANT SELECT ON db01.tb1 TO USER 'test-grant'@'%' +GRANT SELECT ON db01.tb1 TO USER 'test-grant'@'%' ---------- AST ------------ Grant( GrantStmt { @@ -12075,7 +12075,7 @@ Grant( ---------- Input ---------- GRANT SELECT ON db01.tb1 TO ROLE role1; ---------- Output --------- -GRANT SELECT ON db01.tb1 TO ROLE 'role1' +GRANT SELECT ON db01.tb1 TO ROLE 'role1' ---------- AST ------------ Grant( GrantStmt { @@ -12100,7 +12100,7 @@ Grant( ---------- Input ---------- GRANT SELECT ON tb1 TO ROLE role1; ---------- Output --------- -GRANT SELECT ON tb1 TO ROLE 'role1' +GRANT SELECT ON tb1 TO ROLE 'role1' ---------- AST ------------ Grant( GrantStmt { @@ -12123,7 +12123,7 @@ Grant( ---------- Input ---------- GRANT ALL ON tb1 TO 'u1'; ---------- Output --------- -GRANT ALL PRIVILEGES ON tb1 TO USER 'u1'@'%' +GRANT ALL PRIVILEGES ON tb1 TO USER 'u1'@'%' ---------- AST ------------ Grant( GrantStmt { @@ -12146,17 +12146,23 @@ Grant( ---------- Input ---------- SHOW GRANTS; ---------- Output --------- -SHOW GRANTS +SHOW GRANTS ---------- AST ------------ ShowGrants { principal: None, + show_options: Some( + ShowOptions { + show_limit: None, + limit: None, + }, + ), } ---------- Input ---------- SHOW GRANTS FOR 'test-grant'; ---------- Output --------- -SHOW GRANTS FOR USER 'test-grant'@'%' +SHOW GRANTS FOR USER 'test-grant'@'%' ---------- AST ------------ ShowGrants { principal: Some( @@ -12167,13 +12173,19 @@ ShowGrants { }, ), ), + show_options: Some( + ShowOptions { + show_limit: None, + limit: None, + }, + ), } ---------- Input ---------- SHOW GRANTS FOR USER 'test-grant'; ---------- Output --------- -SHOW GRANTS FOR USER 'test-grant'@'%' +SHOW GRANTS FOR USER 'test-grant'@'%' ---------- AST ------------ ShowGrants { principal: Some( @@ -12184,13 +12196,19 @@ ShowGrants { }, ), ), + show_options: Some( + ShowOptions { + show_limit: None, + limit: None, + }, + ), } ---------- Input ---------- SHOW GRANTS FOR ROLE role1; ---------- Output --------- -SHOW GRANTS FOR ROLE 'role1' +SHOW GRANTS FOR ROLE 'role1' ---------- AST ------------ ShowGrants { principal: Some( @@ -12198,13 +12216,19 @@ ShowGrants { "role1", ), ), + show_options: Some( + ShowOptions { + show_limit: None, + limit: None, + }, + ), } ---------- Input ---------- SHOW GRANTS FOR ROLE 'role1'; ---------- Output --------- -SHOW GRANTS FOR ROLE 'role1' +SHOW GRANTS FOR ROLE 'role1' ---------- AST ------------ ShowGrants { principal: Some( @@ -12212,13 +12236,40 @@ ShowGrants { "role1", ), ), + show_options: Some( + ShowOptions { + show_limit: None, + limit: None, + }, + ), } +---------- Input ---------- +SHOW GRANTS ON TABLE t; +---------- Output --------- +SHOW GRANTS ON TABLE t +---------- AST ------------ +ShowObjectPrivileges( + ShowObjectPrivilegesStmt { + object: Table( + None, + "t", + ), + show_option: Some( + ShowOptions { + show_limit: None, + limit: None, + }, + ), + }, +) + + ---------- Input ---------- REVOKE SELECT, CREATE ON * FROM 'test-grant'; ---------- Output --------- -REVOKE SELECT, CREATE ON * FROM USER 'test-grant'@'%' +REVOKE SELECT, CREATE ON * FROM USER 'test-grant'@'%' ---------- AST ------------ Revoke( RevokeStmt { @@ -12244,7 +12295,7 @@ Revoke( ---------- Input ---------- REVOKE SELECT ON tb1 FROM ROLE role1; ---------- Output --------- -REVOKE SELECT ON tb1 FROM ROLE 'role1' +REVOKE SELECT ON tb1 FROM ROLE 'role1' ---------- AST ------------ Revoke( RevokeStmt { @@ -12267,7 +12318,7 @@ Revoke( ---------- Input ---------- REVOKE SELECT ON tb1 FROM ROLE 'role1'; ---------- Output --------- -REVOKE SELECT ON tb1 FROM ROLE 'role1' +REVOKE SELECT ON tb1 FROM ROLE 'role1' ---------- AST ------------ Revoke( RevokeStmt { @@ -12368,7 +12419,7 @@ SetRole { ---------- Input ---------- REVOKE ALL ON tb1 FROM 'u1'; ---------- Output --------- -REVOKE ALL PRIVILEGES ON tb1 FROM USER 'u1'@'%' +REVOKE ALL PRIVILEGES ON tb1 FROM USER 'u1'@'%' ---------- AST ------------ Revoke( RevokeStmt { @@ -14487,7 +14538,7 @@ GrantShareObject( ---------- Input ---------- GRANT all ON stage s1 TO a; ---------- Output --------- -GRANT ALL PRIVILEGES ON STAGE s1 TO USER 'a'@'%' +GRANT ALL PRIVILEGES ON STAGE s1 TO USER 'a'@'%' ---------- AST ------------ Grant( GrantStmt { @@ -14509,7 +14560,7 @@ Grant( ---------- Input ---------- GRANT read ON stage s1 TO a; ---------- Output --------- -GRANT Read ON STAGE s1 TO USER 'a'@'%' +GRANT Read ON STAGE s1 TO USER 'a'@'%' ---------- AST ------------ Grant( GrantStmt { @@ -14534,7 +14585,7 @@ Grant( ---------- Input ---------- GRANT write ON stage s1 TO a; ---------- Output --------- -GRANT Write ON STAGE s1 TO USER 'a'@'%' +GRANT Write ON STAGE s1 TO USER 'a'@'%' ---------- AST ------------ Grant( GrantStmt { @@ -14559,7 +14610,7 @@ Grant( ---------- Input ---------- REVOKE write ON stage s1 FROM a; ---------- Output --------- -REVOKE Write ON STAGE s1 FROM USER 'a'@'%' +REVOKE Write ON STAGE s1 FROM USER 'a'@'%' ---------- AST ------------ Revoke( RevokeStmt { @@ -14584,7 +14635,7 @@ Revoke( ---------- Input ---------- GRANT all ON UDF a TO 'test-grant'; ---------- Output --------- -GRANT USAGE ON UDF a TO USER 'test-grant'@'%' +GRANT USAGE ON UDF a TO USER 'test-grant'@'%' ---------- AST ------------ Grant( GrantStmt { @@ -14609,7 +14660,7 @@ Grant( ---------- Input ---------- GRANT usage ON UDF a TO 'test-grant'; ---------- Output --------- -GRANT USAGE ON UDF a TO USER 'test-grant'@'%' +GRANT USAGE ON UDF a TO USER 'test-grant'@'%' ---------- AST ------------ Grant( GrantStmt { @@ -14634,7 +14685,7 @@ Grant( ---------- Input ---------- REVOKE usage ON UDF a FROM 'test-grant'; ---------- Output --------- -REVOKE USAGE ON UDF a FROM USER 'test-grant'@'%' +REVOKE USAGE ON UDF a FROM USER 'test-grant'@'%' ---------- AST ------------ Revoke( RevokeStmt { @@ -14659,7 +14710,7 @@ Revoke( ---------- Input ---------- REVOKE all ON UDF a FROM 'test-grant'; ---------- Output --------- -REVOKE USAGE ON UDF a FROM USER 'test-grant'@'%' +REVOKE USAGE ON UDF a FROM USER 'test-grant'@'%' ---------- AST ------------ Revoke( RevokeStmt { @@ -14917,26 +14968,20 @@ ShowShares( ---------- Input ---------- SHOW GRANTS ON TABLE db1.tb1; ---------- Output --------- -SHOW GRANTS ON TABLE db1.tb1 +SHOW GRANTS ON TABLE db1.tb1 ---------- AST ------------ -ShowObjectGrantPrivileges( - ShowObjectGrantPrivilegesStmt { +ShowObjectPrivileges( + ShowObjectPrivilegesStmt { object: Table( - Identifier { - span: Some( - 21..24, - ), - name: "db1", - quote: None, - is_hole: false, - }, - Identifier { - span: Some( - 25..28, - ), - name: "tb1", - quote: None, - is_hole: false, + Some( + "db1", + ), + "tb1", + ), + show_option: Some( + ShowOptions { + show_limit: None, + limit: None, }, ), }, @@ -14946,18 +14991,17 @@ ShowObjectGrantPrivileges( ---------- Input ---------- SHOW GRANTS ON DATABASE db; ---------- Output --------- -SHOW GRANTS ON DATABASE db +SHOW GRANTS ON DATABASE db ---------- AST ------------ -ShowObjectGrantPrivileges( - ShowObjectGrantPrivilegesStmt { +ShowObjectPrivileges( + ShowObjectPrivilegesStmt { object: Database( - Identifier { - span: Some( - 24..26, - ), - name: "db", - quote: None, - is_hole: false, + "db", + ), + show_option: Some( + ShowOptions { + show_limit: None, + limit: None, }, ), }, @@ -18367,6 +18411,7 @@ CreateTask( schedule_opts: Some( IntervalSecs( 900, + 0, ), ), session_parameters: { @@ -18408,6 +18453,7 @@ CreateTask( schedule_opts: Some( IntervalSecs( 15, + 0, ), ), session_parameters: {}, @@ -18444,6 +18490,7 @@ CreateTask( schedule_opts: Some( IntervalSecs( 1215, + 0, ), ), session_parameters: {}, @@ -18741,6 +18788,7 @@ CreateTask( schedule_opts: Some( IntervalSecs( 1, + 0, ), ), session_parameters: {}, @@ -18785,6 +18833,7 @@ CreateTask( schedule_opts: Some( IntervalSecs( 1, + 0, ), ), session_parameters: {}, @@ -18915,6 +18964,7 @@ AlterTask( schedule: Some( IntervalSecs( 780, + 0, ), ), suspend_task_after_num_failures: Some( @@ -18946,6 +18996,7 @@ AlterTask( schedule: Some( IntervalSecs( 5, + 0, ), ), suspend_task_after_num_failures: Some( @@ -20133,7 +20184,7 @@ Query( ---------- Input ---------- GRANT OWNERSHIP ON d20_0014.* TO ROLE 'd20_0015_owner'; ---------- Output --------- -GRANT OWNERSHIP ON d20_0014.* TO ROLE 'd20_0015_owner' +GRANT OWNERSHIP ON d20_0014.* TO ROLE 'd20_0015_owner' ---------- AST ------------ Grant( GrantStmt { @@ -20157,7 +20208,7 @@ Grant( ---------- Input ---------- GRANT OWNERSHIP ON d20_0014.t TO ROLE 'd20_0015_owner'; ---------- Output --------- -GRANT OWNERSHIP ON d20_0014.t TO ROLE 'd20_0015_owner' +GRANT OWNERSHIP ON d20_0014.t TO ROLE 'd20_0015_owner' ---------- AST ------------ Grant( GrantStmt { @@ -20182,7 +20233,7 @@ Grant( ---------- Input ---------- GRANT OWNERSHIP ON STAGE s1 TO ROLE 'd20_0015_owner'; ---------- Output --------- -GRANT OWNERSHIP ON STAGE s1 TO ROLE 'd20_0015_owner' +GRANT OWNERSHIP ON STAGE s1 TO ROLE 'd20_0015_owner' ---------- AST ------------ Grant( GrantStmt { @@ -20204,7 +20255,7 @@ Grant( ---------- Input ---------- GRANT OWNERSHIP ON UDF f1 TO ROLE 'd20_0015_owner'; ---------- Output --------- -GRANT OWNERSHIP ON UDF f1 TO ROLE 'd20_0015_owner' +GRANT OWNERSHIP ON UDF f1 TO ROLE 'd20_0015_owner' ---------- AST ------------ Grant( GrantStmt { diff --git a/src/query/ast/tests/it/token.rs b/src/query/ast/tests/it/token.rs index b2c65d6cb9867..af6ba131322e4 100644 --- a/src/query/ast/tests/it/token.rs +++ b/src/query/ast/tests/it/token.rs @@ -16,7 +16,7 @@ use std::fs::File; use std::io::Write; use databend_common_ast::parser::token::*; -use databend_common_exception::Result; +use databend_common_ast::Result; use goldenfile::Mint; fn run_lexer(file: &mut File, source: &str) { @@ -34,7 +34,11 @@ fn run_lexer(file: &mut File, source: &str) { writeln!(file, "\n").unwrap(); } Err(err) => { - let report = err.display_with_sql(source).message().trim().to_string(); + let report = err + .display_with_source(source) + .to_string() + .trim() + .to_string(); writeln!(file, "---------- Input ----------").unwrap(); writeln!(file, "{}", source).unwrap(); writeln!(file, "---------- Output ---------").unwrap(); diff --git a/src/query/async_functions/src/async_function.rs b/src/query/async_functions/src/async_function.rs index 786a8c5162b4c..aab49f56e2f00 100644 --- a/src/query/async_functions/src/async_function.rs +++ b/src/query/async_functions/src/async_function.rs @@ -16,10 +16,10 @@ use std::sync::Arc; use databend_common_ast::ast::ColumnID; use databend_common_ast::ast::Expr; +use databend_common_ast::Span; use databend_common_catalog::catalog::Catalog; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_exception::Span; use databend_common_expression::types::DataType; use databend_common_expression::types::NumberDataType; use databend_common_expression::Scalar; diff --git a/src/query/expression/Cargo.toml b/src/query/expression/Cargo.toml index 718a5474996f5..9b34015a7cce7 100644 --- a/src/query/expression/Cargo.toml +++ b/src/query/expression/Cargo.toml @@ -12,6 +12,7 @@ test = true [dependencies] # In alphabetical order # Workspace dependencies databend-common-arrow = { path = "../../common/arrow" } +databend-common-ast = { path = "../ast" } databend-common-base = { path = "../../common/base" } databend-common-datavalues = { path = "../datavalues" } databend-common-exception = { path = "../../common/exception" } @@ -64,7 +65,6 @@ unicode-segmentation = "1.10.1" [dev-dependencies] arrow-ord = { workspace = true } -databend-common-ast = { path = "../ast" } goldenfile = "1.4" pretty_assertions = "1.3.0" rand = { workspace = true } diff --git a/src/query/expression/src/evaluator.rs b/src/query/expression/src/evaluator.rs index 6f9d6ce3ee36b..dea781004db2c 100644 --- a/src/query/expression/src/evaluator.rs +++ b/src/query/expression/src/evaluator.rs @@ -18,9 +18,9 @@ use std::ops::Not; use databend_common_arrow::arrow::bitmap; use databend_common_arrow::arrow::bitmap::Bitmap; use databend_common_arrow::arrow::bitmap::MutableBitmap; +use databend_common_ast::Span; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_exception::Span; use itertools::Itertools; use log::error; diff --git a/src/query/expression/src/expression.rs b/src/query/expression/src/expression.rs index c85ad73b0bb56..c0e6de4ba4899 100644 --- a/src/query/expression/src/expression.rs +++ b/src/query/expression/src/expression.rs @@ -17,7 +17,7 @@ use std::fmt::Debug; use std::hash::Hash; use std::sync::Arc; -use databend_common_exception::Span; +use databend_common_ast::Span; use educe::Educe; use enum_as_inner::EnumAsInner; use serde::Deserialize; diff --git a/src/query/expression/src/filter/select_expr.rs b/src/query/expression/src/filter/select_expr.rs index ac6b9792e99cf..4210e75a8f4c8 100644 --- a/src/query/expression/src/filter/select_expr.rs +++ b/src/query/expression/src/filter/select_expr.rs @@ -122,11 +122,14 @@ impl SelectExprBuilder { let select_op = SelectOp::try_from_func_name(&function.signature.name).unwrap(); let select_op = if not { select_op.not() } else { select_op }; + let can_reorder = + Self::can_reorder(&args[0]) && Self::can_reorder(&args[1]); SelectExprBuildResult::new(SelectExpr::Compare(( select_op, args.clone(), generics.clone(), ))) + .can_reorder(can_reorder) } "not" => { self.not_function = Some((id.clone(), function.clone())); @@ -158,6 +161,7 @@ impl SelectExprBuilder { .can_push_down_not(false); } }; + let can_reorder = Self::can_reorder(column); if matches!(column_data_type, DataType::String | DataType::Nullable(box DataType::String)) && let Scalar::String(like_str) = scalar { @@ -168,15 +172,17 @@ impl SelectExprBuilder { like_str.clone(), not, ))) + .can_reorder(can_reorder) } else { SelectExprBuildResult::new(SelectExpr::Others(expr.clone())) .can_push_down_not(false) + .can_reorder(can_reorder) } } "is_true" => self.build_select_expr(&args[0], not), _ => self .other_select_expr(expr, not) - .can_reorder(Self::can_reorder(func_name)), + .can_reorder(Self::can_reorder(expr)), } } } @@ -207,13 +213,24 @@ impl SelectExprBuilder { // If a function may be use for filter short-circuiting, we can not perform filter reorder, // for example, for predicates `a != 0 and 3 / a > 1`,if we swap `a != 0` and `3 / a > 1`, // there will be a divide by zero error. - fn can_reorder(func_name: &str) -> bool { - // There may be other functions that can be used for filter short-circuiting. - if matches!(func_name, "cast" | "div" | "divide" | "modulo") || func_name.starts_with("to_") - { - return false; + pub fn can_reorder(expr: &Expr) -> bool { + match expr { + Expr::FunctionCall { function, args, .. } => { + let func_name = function.signature.name.as_str(); + // There may be other functions that can be used for filter short-circuiting. + let mut can_reorder = !matches!(func_name, "cast" | "div" | "divide" | "modulo") + && !func_name.starts_with("to_"); + if can_reorder { + for arg in args { + can_reorder &= Self::can_reorder(arg); + } + } + can_reorder + } + Expr::ColumnRef { .. } | Expr::Constant { .. } => true, + Expr::Cast { is_try, .. } if *is_try => true, + _ => false, } - true } fn other_select_expr(&self, expr: &Expr, not: bool) -> SelectExprBuildResult { diff --git a/src/query/expression/src/function.rs b/src/query/expression/src/function.rs index 183e10e3cbe04..db390b1edc38c 100755 --- a/src/query/expression/src/function.rs +++ b/src/query/expression/src/function.rs @@ -21,9 +21,9 @@ use std::sync::Arc; use databend_common_arrow::arrow::bitmap::Bitmap; use databend_common_arrow::arrow::bitmap::MutableBitmap; +use databend_common_ast::Span; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_exception::Span; use databend_common_io::GeometryDataType; use enum_as_inner::EnumAsInner; use itertools::Itertools; diff --git a/src/query/expression/src/type_check.rs b/src/query/expression/src/type_check.rs index ea45ba9f055c0..9692736316931 100755 --- a/src/query/expression/src/type_check.rs +++ b/src/query/expression/src/type_check.rs @@ -15,9 +15,9 @@ use std::collections::HashMap; use std::fmt::Write; +use databend_common_ast::Span; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_exception::Span; use itertools::Itertools; use crate::cast_scalar; diff --git a/src/query/expression/src/utils/mod.rs b/src/query/expression/src/utils/mod.rs index 7b4c3f2fa1db3..3d65daf97acd9 100644 --- a/src/query/expression/src/utils/mod.rs +++ b/src/query/expression/src/utils/mod.rs @@ -25,8 +25,8 @@ pub mod udf_client; pub mod variant_transform; use databend_common_arrow::arrow::bitmap::Bitmap; +use databend_common_ast::Span; use databend_common_exception::Result; -use databend_common_exception::Span; use ethnum::i256; pub use self::column_from::*; diff --git a/src/query/script/src/compiler.rs b/src/query/script/src/compiler.rs index a4c2a0eeb2660..d7cf006c7b4d3 100644 --- a/src/query/script/src/compiler.rs +++ b/src/query/script/src/compiler.rs @@ -34,9 +34,9 @@ use databend_common_ast::ast::SetExpr; use databend_common_ast::ast::Statement; use databend_common_ast::ast::TableReference; use databend_common_ast::ast::UnaryOperator; +use databend_common_ast::Span; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_exception::Span; use derive_visitor::DriveMut; use derive_visitor::VisitorMut; diff --git a/src/query/script/src/executor.rs b/src/query/script/src/executor.rs index dd67932dd0710..2041fec37a462 100644 --- a/src/query/script/src/executor.rs +++ b/src/query/script/src/executor.rs @@ -15,9 +15,9 @@ use std::collections::HashMap; use databend_common_ast::ast::Expr; +use databend_common_ast::Span; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_exception::Span; use crate::ir::ColumnAccess; use crate::ir::IterRef; diff --git a/src/query/script/src/ir.rs b/src/query/script/src/ir.rs index d1588e5422d37..84237836de921 100644 --- a/src/query/script/src/ir.rs +++ b/src/query/script/src/ir.rs @@ -20,9 +20,9 @@ use databend_common_ast::ast::Expr; use databend_common_ast::ast::Identifier; use databend_common_ast::ast::Literal; use databend_common_ast::ast::Statement; +use databend_common_ast::Span; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_exception::Span; use derive_visitor::DriveMut; use derive_visitor::VisitorMut; diff --git a/src/query/script/tests/it/main.rs b/src/query/script/tests/it/main.rs index f664e696d422f..af781da277f2c 100644 --- a/src/query/script/tests/it/main.rs +++ b/src/query/script/tests/it/main.rs @@ -29,8 +29,8 @@ use databend_common_ast::parser::script::script_stmts; use databend_common_ast::parser::tokenize_sql; use databend_common_ast::parser::Dialect; use databend_common_ast::parser::ParseMode; +use databend_common_ast::Range; use databend_common_exception::ErrorCode; -use databend_common_exception::Range; use databend_common_exception::Result; use databend_common_script::compile; use databend_common_script::ir::ColumnAccess; diff --git a/src/query/service/src/interpreters/access/management_mode_access.rs b/src/query/service/src/interpreters/access/management_mode_access.rs index c1ca700a04209..b2e3d767f2fc9 100644 --- a/src/query/service/src/interpreters/access/management_mode_access.rs +++ b/src/query/service/src/interpreters/access/management_mode_access.rs @@ -53,6 +53,8 @@ impl AccessChecker for ManagementModeAccess { | RewriteKind::ShowUserFunctions | RewriteKind::ShowTableFunctions | RewriteKind::ShowUsers + // show grants will access meta, can not true in mm. + // | RewriteKind::ShowGrants | RewriteKind::ShowStages | RewriteKind::DescribeStage | RewriteKind::ListStage @@ -66,7 +68,6 @@ impl AccessChecker for ManagementModeAccess { // Show. Plan::ShowCreateDatabase(_) | Plan::ShowCreateTable(_) - | Plan::ShowGrants(_) // Set | Plan::SetVariable(_) diff --git a/src/query/service/src/interpreters/access/privilege_access.rs b/src/query/service/src/interpreters/access/privilege_access.rs index c6a3fd7398de8..a6cbfdf7df1cc 100644 --- a/src/query/service/src/interpreters/access/privilege_access.rs +++ b/src/query/service/src/interpreters/access/privilege_access.rs @@ -22,7 +22,6 @@ use databend_common_exception::ErrorCode; use databend_common_exception::Result; use databend_common_meta_app::principal::GrantObject; use databend_common_meta_app::principal::OwnershipObject; -use databend_common_meta_app::principal::PrincipalIdentity; use databend_common_meta_app::principal::StageInfo; use databend_common_meta_app::principal::StageType; use databend_common_meta_app::principal::UserGrantSet; @@ -993,32 +992,6 @@ impl AccessChecker for PrivilegeAccess { self.validate_access(&GrantObject::Global, UserPrivilegeType::Super) .await?; } - Plan::ShowGrants(plan) => { - let current_user = self.ctx.get_current_user()?; - if let Some(principal) = &plan.principal { - match principal { - PrincipalIdentity::User(user) => { - if current_user.identity() == *user { - return Ok(()); - } else { - self.validate_access(&GrantObject::Global, UserPrivilegeType::Grant) - .await?; - } - } - PrincipalIdentity::Role(role) => { - let roles=current_user.grants.roles(); - if roles.contains(role) || role.to_lowercase() == "public" { - return Ok(()); - } else { - self.validate_access(&GrantObject::Global, UserPrivilegeType::Grant) - .await?; - } - } - } - } else { - return Ok(()); - } - } Plan::AlterUser(_) | Plan::RenameDatabase(_) | Plan::RevertTable(_) diff --git a/src/query/service/src/interpreters/common/task.rs b/src/query/service/src/interpreters/common/task.rs index 775caf658d705..d69919f82b8ed 100644 --- a/src/query/service/src/interpreters/common/task.rs +++ b/src/query/service/src/interpreters/common/task.rs @@ -29,16 +29,21 @@ pub fn make_schedule_options( opt: ScheduleOptions, ) -> databend_common_cloud_control::pb::ScheduleOptions { match opt { - ScheduleOptions::IntervalSecs(secs) => databend_common_cloud_control::pb::ScheduleOptions { - interval: Some(secs as i32), - cron: None, - time_zone: None, - schedule_type: i32::from(ScheduleType::IntervalType), - }, + ScheduleOptions::IntervalSecs(secs, ms) => { + databend_common_cloud_control::pb::ScheduleOptions { + interval: Some(secs as i32), + // none if ms is 0, else some ms + milliseconds_interval: if ms == 0 { None } else { Some(ms) }, + cron: None, + time_zone: None, + schedule_type: i32::from(ScheduleType::IntervalType), + } + } ScheduleOptions::CronExpression(expr, timezone) => { databend_common_cloud_control::pb::ScheduleOptions { interval: None, + milliseconds_interval: None, cron: Some(expr), time_zone: timezone, schedule_type: i32::from(ScheduleType::CronType), diff --git a/src/query/service/src/interpreters/interpreter_copy_into_table.rs b/src/query/service/src/interpreters/interpreter_copy_into_table.rs index 68474d6e9181b..33ce659c91b87 100644 --- a/src/query/service/src/interpreters/interpreter_copy_into_table.rs +++ b/src/query/service/src/interpreters/interpreter_copy_into_table.rs @@ -106,7 +106,13 @@ impl CopyIntoTableInterpreter { .await?; let mut update_stream_meta_reqs = vec![]; let (source, project_columns) = if let Some(ref query) = plan.query { - let (query_interpreter, update_stream_meta) = self.build_query(query).await?; + let query = if plan.enable_distributed { + query.remove_exchange_for_select() + } else { + *query.clone() + }; + + let (query_interpreter, update_stream_meta) = self.build_query(&query).await?; update_stream_meta_reqs = update_stream_meta; let query_physical_plan = Box::new(query_interpreter.build_physical_plan().await?); diff --git a/src/query/service/src/interpreters/interpreter_factory.rs b/src/query/service/src/interpreters/interpreter_factory.rs index 00c9ae6a39ed5..d5e2494292e3b 100644 --- a/src/query/service/src/interpreters/interpreter_factory.rs +++ b/src/query/service/src/interpreters/interpreter_factory.rs @@ -402,10 +402,6 @@ impl InterpreterFactory { ctx, *grant_role.clone(), )?)), - Plan::ShowGrants(show_grants) => Ok(Arc::new(ShowGrantsInterpreter::try_create( - ctx, - *show_grants.clone(), - )?)), Plan::RevokePriv(revoke_priv) => Ok(Arc::new(RevokePrivilegeInterpreter::try_create( ctx, *revoke_priv.clone(), diff --git a/src/query/service/src/interpreters/interpreter_insert.rs b/src/query/service/src/interpreters/interpreter_insert.rs index d017e340aecb2..c62cea6ac8c48 100644 --- a/src/query/service/src/interpreters/interpreter_insert.rs +++ b/src/query/service/src/interpreters/interpreter_insert.rs @@ -216,6 +216,7 @@ impl Interpreter for InsertInterpreter { let catalog = self.ctx.get_catalog(&self.plan.catalog).await?; let catalog_info = catalog.info(); + // here we remove the last exchange merge plan to trigger distribute insert let insert_select_plan = match select_plan { PhysicalPlan::Exchange(ref mut exchange) => { // insert can be dispatched to different nodes diff --git a/src/query/service/src/interpreters/interpreter_show_grants.rs b/src/query/service/src/interpreters/interpreter_show_grants.rs deleted file mode 100644 index 38fb16e4240cf..0000000000000 --- a/src/query/service/src/interpreters/interpreter_show_grants.rs +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2021 Datafuse Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::collections::HashMap; -use std::sync::Arc; - -use databend_common_exception::Result; -use databend_common_expression::types::StringType; -use databend_common_expression::types::UInt64Type; -use databend_common_expression::DataBlock; -use databend_common_expression::FromData; -use databend_common_meta_app::principal::GrantEntry; -use databend_common_meta_app::principal::GrantObject; -use databend_common_meta_app::principal::PrincipalIdentity; -use databend_common_meta_app::principal::UserPrivilegeSet; -use databend_common_sql::plans::ShowGrantsPlan; -use databend_common_users::RoleCacheManager; -use databend_common_users::UserApiProvider; - -use crate::interpreters::Interpreter; -use crate::pipelines::PipelineBuildResult; -use crate::sessions::QueryContext; -use crate::sessions::TableContext; - -pub struct ShowGrantsInterpreter { - ctx: Arc, - plan: ShowGrantsPlan, -} - -impl ShowGrantsInterpreter { - pub fn try_create(ctx: Arc, plan: ShowGrantsPlan) -> Result { - Ok(ShowGrantsInterpreter { ctx, plan }) - } -} - -#[async_trait::async_trait] -impl Interpreter for ShowGrantsInterpreter { - fn name(&self) -> &str { - "ShowGrantsInterpreter" - } - - fn is_ddl(&self) -> bool { - true - } - - #[async_backtrace::framed] - async fn execute2(&self) -> Result { - let tenant = self.ctx.get_tenant(); - - // TODO: add permission check on reading user grants - let (grant_to, name, identity, grant_set) = match self.plan.principal { - None => { - let user = self.ctx.get_current_user()?; - ( - "USER".to_string(), - user.name.to_string(), - user.identity().display().to_string(), - user.grants, - ) - } - Some(ref principal) => match principal { - PrincipalIdentity::User(user) => { - let user = UserApiProvider::instance() - .get_user(&tenant, user.clone()) - .await?; - ( - "USER".to_string(), - user.name.to_string(), - user.identity().display().to_string(), - user.grants, - ) - } - PrincipalIdentity::Role(role) => { - let role_info = UserApiProvider::instance() - .get_role(&tenant, role.clone()) - .await?; - ( - "ROLE".to_string(), - role.to_string(), - format!("ROLE `{}`", role_info.identity()), - role_info.grants, - ) - } - }, - }; - // TODO: display roles list instead of the inherited roles - let grant_entries = RoleCacheManager::instance() - .find_related_roles(&tenant, &grant_set.roles()) - .await? - .into_iter() - .map(|role| role.grants) - .fold(grant_set, |a, b| a | b) - .entries(); - - let mut grant_list: Vec = Vec::new(); - - // must split with two hashmap, hashmap key is catalog name. - // maybe contain: default.db1 and default.db2.t, - // It will re-write the exists key. - let mut catalog_db_ids: HashMap> = HashMap::new(); - let mut catalog_table_ids: HashMap> = HashMap::new(); - - fn get_priv_str(grant_entry: &GrantEntry) -> String { - if grant_entry.has_all_available_privileges() { - "ALL".to_string() - } else { - let privileges: UserPrivilegeSet = (*grant_entry.privileges()).into(); - privileges.to_string() - } - } - - let mut object_id = vec![]; - let mut object_name = vec![]; - let mut privileges = vec![]; - for grant_entry in grant_entries { - let object = grant_entry.object(); - match object { - GrantObject::TableById(catalog_name, db_id, table_id) => { - let privileges_str = get_priv_str(&grant_entry); - if let Some(tables_id_priv) = catalog_table_ids.get(catalog_name) { - let mut tables_id_priv = tables_id_priv.clone(); - tables_id_priv.push((*db_id, *table_id, privileges_str)); - catalog_table_ids.insert(catalog_name.clone(), tables_id_priv.clone()); - } else { - catalog_table_ids.insert(catalog_name.clone(), vec![( - *db_id, - *table_id, - privileges_str, - )]); - } - } - GrantObject::DatabaseById(catalog_name, db_id) => { - let privileges_str = get_priv_str(&grant_entry); - if let Some(dbs_id_priv) = catalog_db_ids.get(catalog_name) { - let mut dbs_id_priv = dbs_id_priv.clone(); - dbs_id_priv.push((*db_id, privileges_str)); - catalog_db_ids.insert(catalog_name.clone(), dbs_id_priv.clone()); - } else { - catalog_db_ids.insert(catalog_name.clone(), vec![(*db_id, privileges_str)]); - } - } - GrantObject::Database(catalog_name, database_name) => { - object_name.push(format!("{}.{}.*", catalog_name, database_name)); - object_id.push(None); - privileges.push(get_priv_str(&grant_entry)); - grant_list.push(format!("{} TO {}", grant_entry, identity)); - } - GrantObject::Table(catalog_name, database_name, table_name) => { - object_name.push(format!("{}.{}.{}", catalog_name, database_name, table_name)); - object_id.push(None); - privileges.push(get_priv_str(&grant_entry)); - grant_list.push(format!("{} TO {}", grant_entry, identity)); - } - GrantObject::Stage(stage_name) => { - object_name.push(stage_name.to_string()); - object_id.push(None); - privileges.push(get_priv_str(&grant_entry)); - grant_list.push(format!("{} TO {}", grant_entry, identity)); - } - GrantObject::UDF(udf_name) => { - object_name.push(udf_name.to_string()); - object_id.push(None); - privileges.push(get_priv_str(&grant_entry)); - grant_list.push(format!("{} TO {}", grant_entry, identity)); - } - GrantObject::Global => { - // grant all on *.* to a - object_name.push("*.*".to_string()); - object_id.push(None); - privileges.push(get_priv_str(&grant_entry)); - grant_list.push(format!("{} TO {}", grant_entry, identity)); - } - } - } - - for (catalog_name, dbs_priv_id) in catalog_db_ids { - let catalog = self.ctx.get_catalog(&catalog_name).await?; - let db_ids = dbs_priv_id.iter().map(|res| res.0).collect::>(); - let privileges_strs = dbs_priv_id - .iter() - .map(|res| res.1.clone()) - .collect::>(); - let dbs_name = catalog.mget_database_names_by_ids(&tenant, &db_ids).await?; - - for (i, db_name) in dbs_name.iter().enumerate() { - if let Some(db_name) = db_name { - object_name.push(db_name.to_string()); - object_id.push(Some(db_ids[i])); - privileges.push(privileges_strs[i].to_string()); - grant_list.push(format!( - "GRANT {} ON '{}'.'{}'.* TO {}", - &privileges_strs[i], catalog_name, db_name, identity - )); - } - } - } - - for (catalog_name, tables_priv_id) in catalog_table_ids { - let catalog = self.ctx.get_catalog(&catalog_name).await?; - let db_ids = tables_priv_id.iter().map(|res| res.0).collect::>(); - let table_ids = tables_priv_id.iter().map(|res| res.1).collect::>(); - let privileges_strs = tables_priv_id - .iter() - .map(|res| res.2.clone()) - .collect::>(); - let dbs_name = catalog.mget_database_names_by_ids(&tenant, &db_ids).await?; - let tables_name = catalog.mget_table_names_by_ids(&tenant, &table_ids).await?; - - for (i, table_name) in tables_name.iter().enumerate() { - if let Some(table_name) = table_name { - if let Some(db_name) = &dbs_name[i] { - object_name.push(format!("{}.{}.{}", catalog_name, db_name, table_name)); - object_id.push(Some(table_ids[i])); - privileges.push(privileges_strs[i].to_string()); - grant_list.push(format!( - "GRANT {} ON '{}'.'{}'.'{}' TO {}", - &privileges_strs[i], catalog_name, db_name, table_name, identity - )); - } - } - } - } - - let names: Vec = vec![name; privileges.len()]; - let grant_tos: Vec = vec![grant_to; privileges.len()]; - PipelineBuildResult::from_blocks(vec![DataBlock::new_from_columns(vec![ - StringType::from_data(privileges), - StringType::from_data(object_name), - UInt64Type::from_opt_data(object_id), - StringType::from_data(grant_tos), - StringType::from_data(names), - StringType::from_data(grant_list), - ])]) - } -} diff --git a/src/query/service/src/interpreters/mod.rs b/src/query/service/src/interpreters/mod.rs index 989a2954ea077..af48178755f68 100644 --- a/src/query/service/src/interpreters/mod.rs +++ b/src/query/service/src/interpreters/mod.rs @@ -91,7 +91,6 @@ mod interpreter_share_grant_object; mod interpreter_share_revoke_object; mod interpreter_share_show; mod interpreter_share_show_grant_tenants; -mod interpreter_show_grants; mod interpreter_show_object_grant_privileges; mod interpreter_stream_create; mod interpreter_stream_drop; @@ -209,7 +208,6 @@ pub use interpreter_share_grant_object::GrantShareObjectInterpreter; pub use interpreter_share_revoke_object::RevokeShareObjectInterpreter; pub use interpreter_share_show::ShowSharesInterpreter; pub use interpreter_share_show_grant_tenants::ShowGrantTenantsOfShareInterpreter; -pub use interpreter_show_grants::ShowGrantsInterpreter; pub use interpreter_show_object_grant_privileges::ShowObjectGrantPrivilegesInterpreter; pub use interpreter_stream_create::CreateStreamInterpreter; pub use interpreter_stream_drop::DropStreamInterpreter; diff --git a/src/query/service/src/pipelines/builders/builder_window.rs b/src/query/service/src/pipelines/builders/builder_window.rs index 41043661a3644..b33f932f6f69e 100644 --- a/src/query/service/src/pipelines/builders/builder_window.rs +++ b/src/query/service/src/pipelines/builders/builder_window.rs @@ -56,22 +56,6 @@ impl PipelineBuilder { .collect::>>()?; let old_output_len = self.main_pipeline.output_len(); - if !partition_by.is_empty() || !order_by.is_empty() { - let mut sort_desc = Vec::with_capacity(partition_by.len() + order_by.len()); - - for offset in &partition_by { - sort_desc.push(SortColumnDescription { - offset: *offset, - asc: true, - nulls_first: true, - is_nullable: input_schema.field(*offset).is_nullable(), // This information is not needed here. - }) - } - - sort_desc.extend(order_by.clone()); - - self.build_sort_pipeline(input_schema.clone(), sort_desc, window.limit, None)?; - } // `TransformWindow` is a pipeline breaker. self.main_pipeline.try_resize(1)?; let func = WindowFunctionInfo::try_create(&window.func, &input_schema)?; diff --git a/src/query/service/src/pipelines/processors/transforms/aggregator/aggregate_meta.rs b/src/query/service/src/pipelines/processors/transforms/aggregator/aggregate_meta.rs index a0628527852e2..f8016c9e3ff1b 100644 --- a/src/query/service/src/pipelines/processors/transforms/aggregator/aggregate_meta.rs +++ b/src/query/service/src/pipelines/processors/transforms/aggregator/aggregate_meta.rs @@ -233,7 +233,7 @@ impl Debug for AggregateMeta f.debug_struct("AggregateMeta::Serialized").finish() } AggregateMeta::Spilling(_) => f.debug_struct("Aggregate::Spilling").finish(), - AggregateMeta::Spilled(_) => f.debug_struct("Aggregate::Spilling").finish(), + AggregateMeta::Spilled(_) => f.debug_struct("Aggregate::Spilled").finish(), AggregateMeta::BucketSpilled(_) => f.debug_struct("Aggregate::BucketSpilled").finish(), AggregateMeta::AggregatePayload(_) => { f.debug_struct("AggregateMeta:AggregatePayload").finish() diff --git a/src/query/service/src/schedulers/fragments/query_fragment_actions_display.rs b/src/query/service/src/schedulers/fragments/query_fragment_actions_display.rs index 8f7ba28307fbd..f635a2a0ecd1a 100644 --- a/src/query/service/src/schedulers/fragments/query_fragment_actions_display.rs +++ b/src/query/service/src/schedulers/fragments/query_fragment_actions_display.rs @@ -80,7 +80,7 @@ impl<'a> Display for QueryFragmentActionsWrap<'a> { let plan_display_string = fragment_action .physical_plan .format(self.metadata.clone(), Default::default()) - .and_then(|node| node.format_pretty_with_prefix(" ")) + .and_then(|node| Ok(node.format_pretty_with_prefix(" ")?)) .unwrap(); write!(f, "{}", plan_display_string)?; } diff --git a/src/query/service/src/servers/http/v1/json_block.rs b/src/query/service/src/servers/http/v1/json_block.rs index 83d4cc59e9eb7..08b395edfc8d7 100644 --- a/src/query/service/src/servers/http/v1/json_block.rs +++ b/src/query/service/src/servers/http/v1/json_block.rs @@ -21,7 +21,7 @@ use databend_common_formats::field_encoder::FieldEncoderValues; use databend_common_io::prelude::FormatSettings; use serde_json::Value as JsonValue; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct JsonBlock { pub(crate) data: Vec>, } diff --git a/src/query/service/src/servers/http/v1/query/page_manager.rs b/src/query/service/src/servers/http/v1/query/page_manager.rs index eb240b58aa76d..1fd3d1d8e2289 100644 --- a/src/query/service/src/servers/http/v1/query/page_manager.rs +++ b/src/query/service/src/servers/http/v1/query/page_manager.rs @@ -106,10 +106,13 @@ impl PageManager { Ok(page) } else { // when end is set to true, client should recv a response with next_url = final_url - Err(ErrorCode::Internal(format!( - "expect /final from client, got /page/{}.", - page_no - ))) + // but the response may be lost and client will retry, + // we simply return an empty page. + let page = Page { + data: JsonBlock::default(), + total_rows: self.total_rows, + }; + Ok(page) } } else if page_no + 1 == next_no { // later, there may be other ways to ack and drop the last page except collect_new_page. diff --git a/src/query/service/src/sessions/query_ctx_shared.rs b/src/query/service/src/sessions/query_ctx_shared.rs index cc55c410ffd13..2b3673cfaf24c 100644 --- a/src/query/service/src/sessions/query_ctx_shared.rs +++ b/src/query/service/src/sessions/query_ctx_shared.rs @@ -582,12 +582,11 @@ impl Drop for QueryContextShared { pub fn short_sql(sql: String) -> String { use unicode_segmentation::UnicodeSegmentation; - const MAX_LENGTH: usize = 30 * 1024; // 30KB + const MAX_LENGTH: usize = 128; let query = sql.trim_start(); if query.as_bytes().len() > MAX_LENGTH && query.as_bytes()[..6].eq_ignore_ascii_case(b"INSERT") { - // keep first 30KB let mut result = Vec::new(); let mut bytes_taken = 0; for grapheme in query.graphemes(true) { diff --git a/src/query/service/src/table_functions/mod.rs b/src/query/service/src/table_functions/mod.rs index dd939aae13549..fa892542a35a7 100644 --- a/src/query/service/src/table_functions/mod.rs +++ b/src/query/service/src/table_functions/mod.rs @@ -20,6 +20,7 @@ mod list_stage; mod numbers; mod openai; mod others; +mod show_grants; mod srf; mod sync_crash_me; mod table_function; diff --git a/src/query/service/src/table_functions/show_grants/mod.rs b/src/query/service/src/table_functions/show_grants/mod.rs new file mode 100644 index 0000000000000..fcb6268aebcbe --- /dev/null +++ b/src/query/service/src/table_functions/show_grants/mod.rs @@ -0,0 +1,17 @@ +// Copyright 2021 Datafuse Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod show_grants_table; + +pub use show_grants_table::ShowGrants; diff --git a/src/query/service/src/table_functions/show_grants/show_grants_table.rs b/src/query/service/src/table_functions/show_grants/show_grants_table.rs new file mode 100644 index 0000000000000..73f5a35e68879 --- /dev/null +++ b/src/query/service/src/table_functions/show_grants/show_grants_table.rs @@ -0,0 +1,596 @@ +// Copyright 2021 Datafuse Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::any::Any; +use std::collections::HashMap; +use std::sync::Arc; + +use databend_common_catalog::plan::DataSourcePlan; +use databend_common_catalog::plan::PartStatistics; +use databend_common_catalog::plan::Partitions; +use databend_common_catalog::plan::PushDownInfo; +use databend_common_catalog::table::Table; +use databend_common_catalog::table_args::TableArgs; +use databend_common_catalog::table_context::TableContext; +use databend_common_catalog::table_function::TableFunction; +use databend_common_exception::ErrorCode; +use databend_common_exception::Result; +use databend_common_expression::types::NumberDataType; +use databend_common_expression::types::StringType; +use databend_common_expression::types::UInt64Type; +use databend_common_expression::DataBlock; +use databend_common_expression::FromData; +use databend_common_expression::Scalar; +use databend_common_expression::TableDataType; +use databend_common_expression::TableField; +use databend_common_expression::TableSchema; +use databend_common_expression::TableSchemaRefExt; +use databend_common_meta_app::principal::GrantEntry; +use databend_common_meta_app::principal::GrantObject; +use databend_common_meta_app::principal::UserIdentity; +use databend_common_meta_app::principal::UserPrivilegeSet; +use databend_common_meta_app::principal::UserPrivilegeType; +use databend_common_meta_app::schema::TableIdent; +use databend_common_meta_app::schema::TableInfo; +use databend_common_meta_app::schema::TableMeta; +use databend_common_pipeline_core::processors::OutputPort; +use databend_common_pipeline_core::processors::ProcessorPtr; +use databend_common_pipeline_core::Pipeline; +use databend_common_pipeline_sources::AsyncSource; +use databend_common_pipeline_sources::AsyncSourcer; +use databend_common_sql::validate_function_arg; +use databend_common_users::RoleCacheManager; +use databend_common_users::UserApiProvider; + +const SHOW_GRANTS: &str = "show_grants"; + +pub struct ShowGrants { + grant_type: String, + name: String, + catalog: String, + db_name: String, + table_info: TableInfo, +} + +// show grants for user/role name +// show grants to table/database/stage/udf name + +impl ShowGrants { + pub fn create( + database_name: &str, + table_func_name: &str, + table_id: u64, + table_args: TableArgs, + ) -> Result> { + let args = table_args.positioned; + // Check args len. + validate_function_arg(table_func_name, args.len(), Some((2, 4)), 2)?; + + if !args.iter().all(|arg| matches!(arg, Scalar::String(_))) { + return Err(ErrorCode::BadDataValueType(format!( + "Expected String type, but got {:?}", + args + ))); + } + + let grant_type = args[0].as_string().unwrap().to_string(); + let name = args[1].as_string().unwrap().to_string(); + let (catalog, db_name) = if args.len() == 3 { + (args[2].as_string().unwrap().to_string(), "".to_string()) + } else if args.len() == 4 { + ( + args[2].as_string().unwrap().to_string(), + args[3].as_string().unwrap().to_string(), + ) + } else { + ("".to_string(), "".to_string()) + }; + + let table_info = TableInfo { + ident: TableIdent::new(table_id, 0), + desc: format!("'{}'.'{}'", database_name, table_func_name), + name: table_func_name.to_string(), + meta: TableMeta { + schema: Self::schema(), + engine: SHOW_GRANTS.to_owned(), + ..Default::default() + }, + ..Default::default() + }; + + Ok(Arc::new(Self { + grant_type, + name, + catalog, + db_name, + table_info, + })) + } + + fn schema() -> Arc { + TableSchemaRefExt::create(vec![ + TableField::new("privileges", TableDataType::String), + TableField::new("object_name", TableDataType::String), + TableField::new( + "object_id", + TableDataType::Nullable(Box::from(TableDataType::Number(NumberDataType::UInt64))), + ), + TableField::new("grant_to", TableDataType::String), + TableField::new("name", TableDataType::String), + TableField::new( + "grants", + TableDataType::Nullable(Box::new(TableDataType::String)), + ), + ]) + } +} + +#[async_trait::async_trait] +impl Table for ShowGrants { + fn as_any(&self) -> &dyn Any { + self + } + + fn get_table_info(&self) -> &TableInfo { + &self.table_info + } + + #[async_backtrace::framed] + async fn read_partitions( + &self, + _ctx: Arc, + _push_downs: Option, + _dry_run: bool, + ) -> Result<(PartStatistics, Partitions)> { + Ok((PartStatistics::default(), Partitions::default())) + } + + fn table_args(&self) -> Option { + Some(TableArgs::new_positioned(vec![ + Scalar::String(self.grant_type.clone()), + Scalar::String(self.name.clone()), + Scalar::String(self.catalog.clone()), + Scalar::String(self.db_name.clone()), + ])) + } + + fn read_data( + &self, + ctx: Arc, + _plan: &DataSourcePlan, + pipeline: &mut Pipeline, + _put_cache: bool, + ) -> Result<()> { + pipeline.add_source( + |output| { + ShowGrantsSource::create( + ctx.clone(), + output, + self.grant_type.clone(), + self.name.clone(), + self.catalog.clone(), + self.db_name.clone(), + ) + }, + 1, + )?; + + Ok(()) + } +} + +struct ShowGrantsSource { + ctx: Arc, + grant_type: String, + name: String, + catalog: String, + db_name: String, + finished: bool, +} + +impl ShowGrantsSource { + pub fn create( + ctx: Arc, + output: Arc, + grant_type: String, + name: String, + catalog: String, + db_name: String, + ) -> Result { + AsyncSourcer::create(ctx.clone(), output, ShowGrantsSource { + ctx, + grant_type, + name, + catalog, + db_name, + finished: false, + }) + } +} + +#[async_trait::async_trait] +impl AsyncSource for ShowGrantsSource { + const NAME: &'static str = "show_grants"; + + #[async_trait::unboxed_simple] + #[async_backtrace::framed] + async fn generate(&mut self) -> Result> { + if self.finished { + return Ok(None); + } + + let res = match self.grant_type.to_lowercase().as_str() { + "role" | "user" => { + show_account_grants(self.ctx.clone(), &self.grant_type, &self.name).await? + } + "table" | "database" | "udf" | "stage" => { + show_object_grant( + self.ctx.clone(), + &self.grant_type, + &self.name, + &self.catalog, + &self.db_name, + ) + .await? + } + _ => { + return Err(ErrorCode::InvalidArgument(format!( + "Expected 'user|role|table|database|udf|stage', but got {:?}", + self.grant_type + ))); + } + }; + + // Mark done. + self.finished = true; + Ok(res) + } +} + +async fn show_account_grants( + ctx: Arc, + grant_type: &str, + name: &str, +) -> Result> { + let tenant = ctx.get_tenant(); + let current_user = ctx.get_current_user()?; + let has_grant_priv = current_user + .grants + .entries() + .iter() + .any(|entry| entry.verify_privilege(&GrantObject::Global, UserPrivilegeType::Grant)); + // TODO: add permission check on reading user grants + let (grant_to, name, identity, grant_set) = match grant_type { + "user" => { + let user = UserApiProvider::instance() + .get_user(&tenant, UserIdentity::new(name, "%")) + .await?; + if current_user.identity().username != name && !has_grant_priv { + let mut roles = current_user.grants.roles(); + roles.sort(); + + return Err(ErrorCode::PermissionDenied(format!( + "Permission denied: privilege [Grant] is required on *.* for user {} with roles [{}]", + ¤t_user.identity().display(), + roles.join(",") + ))); + } + ( + "USER".to_string(), + user.name.to_string(), + user.identity().display().to_string(), + user.grants, + ) + } + "role" => { + if !current_user.grants.roles().contains(&name.to_string()) && !has_grant_priv { + let mut roles = current_user.grants.roles(); + roles.sort(); + return Err(ErrorCode::PermissionDenied(format!( + "Permission denied: privilege [Grant] is required on *.* for user {} with roles [{}]", + ¤t_user.identity().display(), + roles.join(",") + ))); + } + let role_info = UserApiProvider::instance() + .get_role(&tenant, name.to_string()) + .await?; + ( + "ROLE".to_string(), + name.to_string(), + format!("ROLE `{}`", role_info.identity()), + role_info.grants, + ) + } + _ => { + return Err(ErrorCode::InvalidArgument(format!( + "Expected 'user|role', but got {:?}", + grant_type + ))); + } + }; + + // TODO: display roles list instead of the inherited roles + let user_roles = RoleCacheManager::instance() + .find_related_roles(&tenant, &grant_set.roles()) + .await?; + + let grant_entries = user_roles + .into_iter() + .map(|role| role.grants) + .fold(grant_set, |a, b| a | b) + .entries(); + + let mut grant_list: Vec = Vec::new(); + + // must split with two hashmap, hashmap key is catalog name. + // maybe contain: default.db1 and default.db2.t, + // It will re-write the exists key. + let mut catalog_db_ids: HashMap> = HashMap::new(); + let mut catalog_table_ids: HashMap> = HashMap::new(); + + fn get_priv_str(grant_entry: &GrantEntry) -> String { + if grant_entry.has_all_available_privileges() { + "ALL".to_string() + } else { + let privileges: UserPrivilegeSet = (*grant_entry.privileges()).into(); + privileges.to_string() + } + } + + let mut object_id = vec![]; + let mut object_name = vec![]; + let mut privileges = vec![]; + for grant_entry in grant_entries { + let object = grant_entry.object(); + match object { + GrantObject::TableById(catalog_name, db_id, table_id) => { + let privileges_str = get_priv_str(&grant_entry); + if let Some(tables_id_priv) = catalog_table_ids.get(catalog_name) { + let mut tables_id_priv = tables_id_priv.clone(); + tables_id_priv.push((*db_id, *table_id, privileges_str)); + catalog_table_ids.insert(catalog_name.clone(), tables_id_priv.clone()); + } else { + catalog_table_ids.insert(catalog_name.clone(), vec![( + *db_id, + *table_id, + privileges_str, + )]); + } + } + GrantObject::DatabaseById(catalog_name, db_id) => { + let privileges_str = get_priv_str(&grant_entry); + if let Some(dbs_id_priv) = catalog_db_ids.get(catalog_name) { + let mut dbs_id_priv = dbs_id_priv.clone(); + dbs_id_priv.push((*db_id, privileges_str)); + catalog_db_ids.insert(catalog_name.clone(), dbs_id_priv.clone()); + } else { + catalog_db_ids.insert(catalog_name.clone(), vec![(*db_id, privileges_str)]); + } + } + GrantObject::Database(catalog_name, database_name) => { + object_name.push(format!("{}.{}.*", catalog_name, database_name)); + object_id.push(None); + privileges.push(get_priv_str(&grant_entry)); + grant_list.push(format!("{} TO {}", grant_entry, identity)); + } + GrantObject::Table(catalog_name, database_name, table_name) => { + object_name.push(format!("{}.{}.{}", catalog_name, database_name, table_name)); + object_id.push(None); + privileges.push(get_priv_str(&grant_entry)); + grant_list.push(format!("{} TO {}", grant_entry, identity)); + } + GrantObject::Stage(stage_name) => { + object_name.push(stage_name.to_string()); + object_id.push(None); + privileges.push(get_priv_str(&grant_entry)); + grant_list.push(format!("{} TO {}", grant_entry, identity)); + } + GrantObject::UDF(udf_name) => { + object_name.push(udf_name.to_string()); + object_id.push(None); + privileges.push(get_priv_str(&grant_entry)); + grant_list.push(format!("{} TO {}", grant_entry, identity)); + } + GrantObject::Global => { + // grant all on *.* to a + object_name.push("*.*".to_string()); + object_id.push(None); + privileges.push(get_priv_str(&grant_entry)); + grant_list.push(format!("{} TO {}", grant_entry, identity)); + } + } + } + + for (catalog_name, dbs_priv_id) in catalog_db_ids { + let catalog = ctx.get_catalog(&catalog_name).await?; + let db_ids = dbs_priv_id.iter().map(|res| res.0).collect::>(); + let privileges_strs = dbs_priv_id + .iter() + .map(|res| res.1.clone()) + .collect::>(); + let dbs_name = catalog.mget_database_names_by_ids(&tenant, &db_ids).await?; + + for (i, db_name) in dbs_name.iter().enumerate() { + if let Some(db_name) = db_name { + object_name.push(db_name.to_string()); + object_id.push(Some(db_ids[i])); + privileges.push(privileges_strs[i].to_string()); + grant_list.push(format!( + "GRANT {} ON '{}'.'{}'.* TO {}", + &privileges_strs[i], catalog_name, db_name, identity + )); + } + } + } + + for (catalog_name, tables_priv_id) in catalog_table_ids { + let catalog = ctx.get_catalog(&catalog_name).await?; + let db_ids = tables_priv_id.iter().map(|res| res.0).collect::>(); + let table_ids = tables_priv_id.iter().map(|res| res.1).collect::>(); + let privileges_strs = tables_priv_id + .iter() + .map(|res| res.2.clone()) + .collect::>(); + let dbs_name = catalog.mget_database_names_by_ids(&tenant, &db_ids).await?; + let tables_name = catalog.mget_table_names_by_ids(&tenant, &table_ids).await?; + + for (i, table_name) in tables_name.iter().enumerate() { + if let Some(table_name) = table_name { + if let Some(db_name) = &dbs_name[i] { + object_name.push(format!("{}.{}.{}", catalog_name, db_name, table_name)); + object_id.push(Some(table_ids[i])); + privileges.push(privileges_strs[i].to_string()); + grant_list.push(format!( + "GRANT {} ON '{}'.'{}'.'{}' TO {}", + &privileges_strs[i], catalog_name, db_name, table_name, identity + )); + } + } + } + } + + let names: Vec = vec![name; privileges.len()]; + let grant_tos: Vec = vec![grant_to; privileges.len()]; + Ok(Some(DataBlock::new_from_columns(vec![ + StringType::from_data(privileges), + StringType::from_data(object_name), + UInt64Type::from_opt_data(object_id), + StringType::from_data(grant_tos), + StringType::from_data(names), + StringType::from_data(grant_list), + ]))) +} + +async fn show_object_grant( + ctx: Arc, + grant_type: &str, + name: &str, + catalog_name: &str, + db_name: &str, +) -> Result> { + let tenant = ctx.get_tenant(); + let roles = UserApiProvider::instance().get_roles(&tenant).await?; + let visibility_checker = ctx.get_visibility_checker().await?; + let current_user = ctx.get_current_user()?.identity().username; + let (object, object_id, object_name) = match grant_type { + "table" => { + let catalog = ctx.get_catalog(catalog_name).await?; + let db_id = catalog + .get_database(&tenant, db_name) + .await? + .get_db_info() + .ident + .db_id; + let table_id = catalog.get_table(&tenant, db_name, name).await?.get_id(); + if !visibility_checker.check_table_visibility( + catalog_name, + db_name, + name, + db_id, + table_id, + ) { + return Err(ErrorCode::PermissionDenied(format!( + "Permission denied: No privilege on table {} for user {}.", + name, current_user + ))); + } + ( + GrantObject::TableById(catalog_name.to_string(), db_id, table_id), + Some(table_id), + name, + ) + } + "database" => { + // db_name is empty string, name is real database name + let catalog = ctx.get_catalog(catalog_name).await?; + let db_id = catalog + .get_database(&tenant, name) + .await? + .get_db_info() + .ident + .db_id; + if !visibility_checker.check_database_visibility(catalog_name, name, db_id) { + return Err(ErrorCode::PermissionDenied(format!( + "Permission denied: No privilege on database {} for user {}.", + name, current_user + ))); + } + ( + GrantObject::DatabaseById(catalog_name.to_string(), db_id), + Some(db_id), + name, + ) + } + "udf" => { + if !visibility_checker.check_udf_visibility(name) { + return Err(ErrorCode::PermissionDenied(format!( + "Permission denied: privilege USAGE is required on udf {} for user {}.", + name, current_user + ))); + } + (GrantObject::UDF(name.to_string()), None, name) + } + "stage" => { + if !visibility_checker.check_stage_visibility(name) { + return Err(ErrorCode::PermissionDenied(format!( + "Permission denied: privilege READ is required on stage {} for user {}. Or no need to show the stage privilege", + name, current_user + ))); + } + (GrantObject::Stage(name.to_string()), None, name) + } + _ => { + return Err(ErrorCode::InvalidArgument(format!( + "Expected 'table|database|udf|stage', but got {:?}", + grant_type + ))); + } + }; + + let mut names = vec![]; + let mut privileges = vec![]; + for role in roles { + for entry in role.grants.entries() { + if entry.matches_entry(&object) { + let privilege: UserPrivilegeSet = (*entry.privileges()).into(); + privileges.push(privilege.to_string()); + names.push(role.name.to_string()); + } + } + } + let object_ids = vec![object_id; privileges.len()]; + let object_names = vec![object_name; privileges.len()]; + let grant_tos: Vec = vec!["ROLE".to_string(); privileges.len()]; + let grant_list = vec!["".to_string(); privileges.len()]; + Ok(Some(DataBlock::new_from_columns(vec![ + StringType::from_data(privileges), + StringType::from_data(object_names), + UInt64Type::from_opt_data(object_ids), + StringType::from_data(grant_tos), + StringType::from_data(names), + StringType::from_data(grant_list), + ]))) +} + +impl TableFunction for ShowGrants { + fn function_name(&self) -> &str { + self.name() + } + + fn as_table<'a>(self: Arc) -> Arc + where Self: 'a { + self + } +} diff --git a/src/query/service/src/table_functions/table_function_factory.rs b/src/query/service/src/table_functions/table_function_factory.rs index 00e36f40ede44..00aaa2ca980b6 100644 --- a/src/query/service/src/table_functions/table_function_factory.rs +++ b/src/query/service/src/table_functions/table_function_factory.rs @@ -43,6 +43,7 @@ use crate::table_functions::infer_schema::InferSchemaTable; use crate::table_functions::inspect_parquet::InspectParquetTable; use crate::table_functions::list_stage::ListStageTable; use crate::table_functions::numbers::NumbersTable; +use crate::table_functions::show_grants::ShowGrants; use crate::table_functions::srf::RangeTable; use crate::table_functions::sync_crash_me::SyncCrashMeTable; use crate::table_functions::GPT2SQLTable; @@ -218,6 +219,11 @@ impl TableFunctionFactory { (next_id(), Arc::new(TaskDependentsEnableTable::create)), ); + creators.insert( + "show_grants".to_string(), + (next_id(), Arc::new(ShowGrants::create)), + ); + TableFunctionFactory { creators: RwLock::new(creators), } diff --git a/src/query/service/tests/it/servers/http/http_query_handlers.rs b/src/query/service/tests/it/servers/http/http_query_handlers.rs index 05158a08cf522..f9a54bb2a2d5d 100644 --- a/src/query/service/tests/it/servers/http/http_query_handlers.rs +++ b/src/query/service/tests/it/servers/http/http_query_handlers.rs @@ -305,14 +305,22 @@ async fn test_simple_sql() -> Result<()> { assert_eq!(result.state, ExecuteStateKind::Succeeded, "{:?}", result); } - // get page not expected + // client retry let page_1_uri = make_page_uri(query_id, 1); - let response = get_uri(&ep, &page_1_uri).await; + let (_, result) = get_uri_checked(&ep, &page_1_uri).await?; + assert_eq!(status, StatusCode::OK, "{:?}", result); + assert!(result.error.is_none(), "{:?}", result); + assert_eq!(result.data.len(), 0, "{:?}", result); + assert_eq!(result.next_uri, Some(final_uri.clone()), "{:?}", result); + + // get page not expected + let page_2_uri = make_page_uri(query_id, 2); + let response = get_uri(&ep, &page_2_uri).await; assert_eq!(response.status(), StatusCode::NOT_FOUND, "{:?}", result); let body = response.into_body().into_string().await.unwrap(); assert_eq!( body, - r#"{"error":{"code":"404","message":"expect /final from client, got /page/1."}}"# + r#"{"error":{"code":"404","message":"wrong page number 2"}}"# ); // final diff --git a/src/query/service/tests/it/sessions/query_ctx.rs b/src/query/service/tests/it/sessions/query_ctx.rs index e5a1fc615ab39..438a1424c5fcb 100644 --- a/src/query/service/tests/it/sessions/query_ctx.rs +++ b/src/query/service/tests/it/sessions/query_ctx.rs @@ -69,19 +69,19 @@ async fn test_get_storage_accessor_fs() -> Result<()> { #[test] fn test_short_sql() { - // Test case 1: SQL query shorter than 30KB + // Test case 1: SQL query shorter than 128 bytes let sql1 = "SELECT * FROM users WHERE id = 1;".to_string(); assert_eq!(short_sql(sql1.clone()), sql1); - // Test case 2: SQL query longer than 30KB and starts with "INSERT" + // Test case 2: SQL query longer than 128 bytes and starts with "INSERT" let long_sql = "INSERT INTO users (id, name, email) VALUES ".to_string() - + &"(1, 'John Doe', 'john@example.com'), ".repeat(1500); // Adjusted for 30KB - let expected_result = long_sql.as_bytes()[..30 * 1024].to_vec(); + + &"(1, 'John Doe', 'john@example.com'), ".repeat(5); // Adjusted for 128 bytes + let expected_result = long_sql.as_bytes()[..128].to_vec(); let expected_result = String::from_utf8(expected_result).unwrap() + "..."; assert_eq!(short_sql(long_sql), expected_result); - // Test case 3: SQL query longer than 30KB but does not start with "INSERT" - let long_sql = "SELECT * FROM users WHERE ".to_string() + &"id = 1 OR ".repeat(1500); // Adjusted for 30KB + // Test case 3: SQL query longer than 128 bytes but does not start with "INSERT" + let long_sql = "SELECT * FROM users WHERE ".to_string() + &"id = 1 OR ".repeat(20); // Adjusted for 128 bytes assert_eq!(short_sql(long_sql.clone()), long_sql); // Test case 4: Empty SQL query diff --git a/src/query/settings/src/settings_default.rs b/src/query/settings/src/settings_default.rs index d85126edea9f1..c47f9d1555d13 100644 --- a/src/query/settings/src/settings_default.rs +++ b/src/query/settings/src/settings_default.rs @@ -632,7 +632,7 @@ impl DefaultSettings { }), ("enable_experimental_rbac_check", DefaultSettingValue { value: UserSettingValue::UInt64(1), - desc: "experiment setting disables stage and udf privilege check(disable by default).", + desc: "experiment setting disables stage and udf privilege check(enable by default).", mode: SettingMode::Both, range: Some(SettingRange::Numeric(0..=1)), }), diff --git a/src/query/sql/src/executor/physical_plans/physical_window.rs b/src/query/sql/src/executor/physical_plans/physical_window.rs index fccf386c873a2..598a862253296 100644 --- a/src/query/sql/src/executor/physical_plans/physical_window.rs +++ b/src/query/sql/src/executor/physical_plans/physical_window.rs @@ -36,7 +36,6 @@ use crate::executor::physical_plans::common::SortDesc; use crate::executor::PhysicalPlan; use crate::executor::PhysicalPlanBuilder; use crate::optimizer::SExpr; -use crate::plans::ScalarItem; use crate::plans::WindowFuncFrame; use crate::plans::WindowFuncFrameBound; use crate::plans::WindowFuncType; @@ -152,7 +151,7 @@ impl PhysicalPlanBuilder { s_expr: &SExpr, window: &crate::plans::Window, mut required: ColumnSet, - stat_info: PlanStatsInfo, + _stat_info: PlanStatsInfo, ) -> Result { // 1. DO NOT Prune unused Columns cause window may not in required, eg: // select s1.a from ( select t1.a as a, dense_rank() over(order by t1.a desc) as rk @@ -175,37 +174,12 @@ impl PhysicalPlanBuilder { required.insert(item.order_by_item.index); }); - let column_projections = required.clone().into_iter().collect::>(); - // 2. Build physical plan. let input = self.build(s_expr.child(0)?, required).await?; let mut w = window.clone(); - // Generate a `EvalScalar` as the input of `Window`. - let mut scalar_items: Vec = Vec::new(); - for arg in &w.arguments { - scalar_items.push(arg.clone()); - } - for part in &w.partition_by { - scalar_items.push(part.clone()); - } - for order in &w.order_by { - scalar_items.push(order.order_by_item.clone()) - } - let input = if !scalar_items.is_empty() { - self.create_eval_scalar( - &crate::planner::plans::EvalScalar { - items: scalar_items, - }, - column_projections, - input, - stat_info.clone(), - )? - } else { - input - }; + let input_schema = input.output_schema()?; - // Unify the data type for range frame. if w.frame.units.is_range() && w.order_by.len() == 1 { let order_by = &mut w.order_by[0].order_by_item.scalar; diff --git a/src/query/sql/src/planner/binder/aggregate.rs b/src/query/sql/src/planner/binder/aggregate.rs index 4e0df6e0e5a41..b342930679960 100644 --- a/src/query/sql/src/planner/binder/aggregate.rs +++ b/src/query/sql/src/planner/binder/aggregate.rs @@ -206,10 +206,11 @@ impl<'a> AggregateRewriter<'a> { column: column_binding, })); } else { - let index = self - .metadata - .write() - .add_derived_column(name.clone(), arg.data_type()?); + let index = self.metadata.write().add_derived_column( + name.clone(), + arg.data_type()?, + Some(arg.clone()), + ); // Generate a ColumnBinding for each argument of aggregates let column_binding = ColumnBindingBuilder::new( @@ -237,6 +238,7 @@ impl<'a> AggregateRewriter<'a> { let index = self.metadata.write().add_derived_column( aggregate.display_name.clone(), *aggregate.return_type.clone(), + Some(ScalarExpr::AggregateFunction(aggregate.clone())), ); let replaced_agg = AggregateFunction { @@ -377,7 +379,11 @@ impl Binder { column.column_name = item.alias.clone(); column } else { - self.create_derived_column_binding(item.alias.clone(), item.scalar.data_type()?) + self.create_derived_column_binding( + item.alias.clone(), + item.scalar.data_type()?, + Some(item.scalar.clone()), + ) }; available_aliases.push((column, item.scalar.clone())); } @@ -528,6 +534,7 @@ impl Binder { let dummy = self.create_derived_column_binding( format!("_dup_group_item_{i}"), item.scalar.data_type()?, + Some(item.scalar.clone()), ); dup_group_items.push((dummy.index, *dummy.data_type)); } @@ -535,6 +542,7 @@ impl Binder { let grouping_id_column = self.create_derived_column_binding( "_grouping_id".to_string(), DataType::Number(NumberDataType::UInt32), + None, ); let bound_grouping_id_col = BoundColumnRef { @@ -614,7 +622,11 @@ impl Binder { { column_ref.column.clone() } else { - self.create_derived_column_binding(alias, scalar.data_type()?) + self.create_derived_column_binding( + alias, + scalar.data_type()?, + Some(scalar.clone()), + ) }; bind_context.aggregate_info.group_items.push(ScalarItem { scalar: scalar.clone(), @@ -665,9 +677,11 @@ impl Binder { { *index } else { - self.metadata - .write() - .add_derived_column(group_item_name.clone(), scalar_expr.data_type()?) + self.metadata.write().add_derived_column( + group_item_name.clone(), + scalar_expr.data_type()?, + Some(scalar_expr.clone()), + ) }; bind_context.aggregate_info.group_items.push(ScalarItem { diff --git a/src/query/sql/src/planner/binder/bind_context.rs b/src/query/sql/src/planner/binder/bind_context.rs index 808d1fe16af09..a7e77fe48af94 100644 --- a/src/query/sql/src/planner/binder/bind_context.rs +++ b/src/query/sql/src/planner/binder/bind_context.rs @@ -21,11 +21,11 @@ use databend_common_ast::ast::Identifier; use databend_common_ast::ast::Query; use databend_common_ast::ast::TableAlias; use databend_common_ast::ast::WindowSpec; +use databend_common_ast::Span; use databend_common_catalog::plan::InternalColumn; use databend_common_catalog::plan::InvertedIndexInfo; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_exception::Span; use databend_common_expression::ColumnId; use databend_common_expression::DataField; use databend_common_expression::DataSchemaRef; diff --git a/src/query/sql/src/planner/binder/binder.rs b/src/query/sql/src/planner/binder/binder.rs index d33528c45f9e6..08b349d194ec8 100644 --- a/src/query/sql/src/planner/binder/binder.rs +++ b/src/query/sql/src/planner/binder/binder.rs @@ -62,7 +62,6 @@ use crate::plans::RelOperator; use crate::plans::RewriteKind; use crate::plans::ShowConnectionsPlan; use crate::plans::ShowFileFormatsPlan; -use crate::plans::ShowGrantsPlan; use crate::plans::ShowRolesPlan; use crate::plans::UseDatabasePlan; use crate::plans::Visitor; @@ -446,9 +445,8 @@ impl<'a> Binder { // Permissions Statement::Grant(stmt) => self.bind_grant(stmt).await?, - Statement::ShowGrants { principal } => Plan::ShowGrants(Box::new(ShowGrantsPlan { - principal: principal.clone().map(Into::into), - })), + Statement::ShowGrants { principal, show_options } => self.bind_show_account_grants(bind_context, principal, show_options).await?, + Statement::ShowObjectPrivileges(stmt) => self.bind_show_object_privileges(bind_context, stmt).await?, Statement::Revoke(stmt) => self.bind_revoke(stmt).await?, // File Formats @@ -705,11 +703,13 @@ impl<'a> Binder { &mut self, column_name: String, data_type: DataType, + scalar_expr: Option, ) -> ColumnBinding { - let index = self - .metadata - .write() - .add_derived_column(column_name.clone(), data_type.clone()); + let index = self.metadata.write().add_derived_column( + column_name.clone(), + data_type.clone(), + scalar_expr, + ); ColumnBindingBuilder::new(column_name, index, Box::new(data_type), Visibility::Visible) .build() } diff --git a/src/query/sql/src/planner/binder/ddl/account.rs b/src/query/sql/src/planner/binder/ddl/account.rs index c27840765ae16..326f8f8d1d6f7 100644 --- a/src/query/sql/src/planner/binder/ddl/account.rs +++ b/src/query/sql/src/planner/binder/ddl/account.rs @@ -17,8 +17,12 @@ use databend_common_ast::ast::AccountMgrLevel; use databend_common_ast::ast::AccountMgrSource; use databend_common_ast::ast::AlterUserStmt; use databend_common_ast::ast::CreateUserStmt; +use databend_common_ast::ast::GrantObjectName; use databend_common_ast::ast::GrantStmt; +use databend_common_ast::ast::PrincipalIdentity as AstPrincipalIdentity; use databend_common_ast::ast::RevokeStmt; +use databend_common_ast::ast::ShowObjectPrivilegesStmt; +use databend_common_ast::ast::ShowOptions; use databend_common_exception::ErrorCode; use databend_common_exception::Result; use databend_common_meta_app::principal::AuthInfo; @@ -28,6 +32,7 @@ use databend_common_meta_app::principal::UserOption; use databend_common_meta_app::principal::UserPrivilegeSet; use databend_common_users::UserApiProvider; +use crate::binder::show::get_show_options; use crate::binder::util::illegal_ident_name; use crate::plans::AlterUserPlan; use crate::plans::CreateUserPlan; @@ -36,6 +41,8 @@ use crate::plans::GrantRolePlan; use crate::plans::Plan; use crate::plans::RevokePrivilegePlan; use crate::plans::RevokeRolePlan; +use crate::plans::RewriteKind; +use crate::BindContext; use crate::Binder; impl Binder { @@ -331,4 +338,78 @@ impl Binder { Ok(Plan::AlterUser(Box::new(plan))) } + + #[async_backtrace::framed] + pub(in crate::planner::binder) async fn bind_show_account_grants( + &mut self, + bind_context: &mut BindContext, + principal: &Option, + show_options: &Option, + ) -> Result { + let query = if let Some(principal) = principal { + match principal { + AstPrincipalIdentity::User(user) => { + format!("SELECT * FROM show_grants('user', '{}')", user.username) + } + AstPrincipalIdentity::Role(role) => { + format!("SELECT * FROM show_grants('role', '{}')", role) + } + } + } else { + let name = self.ctx.get_current_user()?.name; + format!("SELECT * FROM show_grants('user', '{}')", name) + }; + + let (show_limit, limit_str) = + get_show_options(show_options, Some("object_name".to_string())); + let query = format!("{} {} {}", query, show_limit, limit_str,); + + self.bind_rewrite_to_query(bind_context, &query, RewriteKind::ShowGrants) + .await + } + + #[async_backtrace::framed] + pub(in crate::planner::binder) async fn bind_show_object_privileges( + &mut self, + bind_context: &mut BindContext, + stmt: &ShowObjectPrivilegesStmt, + ) -> Result { + let ShowObjectPrivilegesStmt { + object, + show_option, + } = stmt; + + let catalog = self.ctx.get_current_catalog(); + let query = match object { + GrantObjectName::Database(db) => { + format!( + "SELECT * FROM show_grants('database', '{}', '{}')", + db, catalog + ) + } + GrantObjectName::Table(db, tb) => { + let db = if let Some(db) = db { + db.to_string() + } else { + self.ctx.get_current_database() + }; + format!( + "SELECT * FROM show_grants('table', '{}', '{}', '{}')", + tb, catalog, db + ) + } + GrantObjectName::UDF(name) => { + format!("SELECT * FROM show_grants('udf', '{}')", name) + } + GrantObjectName::Stage(name) => { + format!("SELECT * FROM show_grants('stage', '{}')", name) + } + }; + + let (show_limit, limit_str) = get_show_options(show_option, Some("name".to_string())); + let query = format!("{} {} {}", query, show_limit, limit_str,); + + self.bind_rewrite_to_query(bind_context, &query, RewriteKind::ShowGrants) + .await + } } diff --git a/src/query/sql/src/planner/binder/ddl/table.rs b/src/query/sql/src/planner/binder/ddl/table.rs index 799c2f6290abb..26114acaa7f2f 100644 --- a/src/query/sql/src/planner/binder/ddl/table.rs +++ b/src/query/sql/src/planner/binder/ddl/table.rs @@ -92,8 +92,6 @@ use crate::binder::scalar::ScalarBinder; use crate::binder::Binder; use crate::binder::ColumnBindingBuilder; use crate::binder::Visibility; -use crate::optimizer::optimize; -use crate::optimizer::OptimizerContext; use crate::parse_computed_expr_to_string; use crate::parse_default_expr_to_string; use crate::planner::semantic::normalize_identifier; @@ -661,10 +659,7 @@ impl Binder { let mut bind_context = BindContext::new(); let stmt = Statement::Query(Box::new(*query.clone())); let select_plan = self.bind_statement(&mut bind_context, &stmt).await?; - // Don't enable distributed optimization for `CREATE TABLE ... AS SELECT ...` for now - let opt_ctx = OptimizerContext::new(self.ctx.clone(), self.metadata.clone()); - let optimized_plan = optimize(opt_ctx, select_plan).await?; - Some(Box::new(optimized_plan)) + Some(Box::new(select_plan)) } else { None }, diff --git a/src/query/sql/src/planner/binder/ddl/task.rs b/src/query/sql/src/planner/binder/ddl/task.rs index 30d224957f3f6..da846bede8350 100644 --- a/src/query/sql/src/planner/binder/ddl/task.rs +++ b/src/query/sql/src/planner/binder/ddl/task.rs @@ -72,8 +72,8 @@ fn verify_scheduler_option(schedule_opts: &Option) -> Result<() return Ok(()); } let schedule_opts = schedule_opts.clone().unwrap(); - if let ScheduleOptions::CronExpression(cron_expr, time_zone) = schedule_opts { - if cron::Schedule::from_str(&cron_expr).is_err() { + if let ScheduleOptions::CronExpression(cron_expr, time_zone) = &schedule_opts { + if cron::Schedule::from_str(cron_expr).is_err() { return Err(ErrorCode::SemanticError(format!( "invalid cron expression {}", cron_expr @@ -81,7 +81,7 @@ fn verify_scheduler_option(schedule_opts: &Option) -> Result<() } if let Some(time_zone) = time_zone && !time_zone.is_empty() - && chrono_tz::Tz::from_str(&time_zone).is_err() + && chrono_tz::Tz::from_str(time_zone).is_err() { return Err(ErrorCode::SemanticError(format!( "invalid time zone {}", @@ -89,6 +89,18 @@ fn verify_scheduler_option(schedule_opts: &Option) -> Result<() ))); } } + + // ONLY allow milliseconds_interval value between + // [500, 1000) + if let ScheduleOptions::IntervalSecs(_, ms) = schedule_opts { + if ms != 0 && !(500..1000).contains(&ms) { + return Err(ErrorCode::SemanticError(format!( + "invalid milliseconds_interval value {}, must be in [500, 1000)", + ms + ))); + } + } + Ok(()) } diff --git a/src/query/sql/src/planner/binder/distinct.rs b/src/query/sql/src/planner/binder/distinct.rs index 8f4b9aed7407d..82d829f63759d 100644 --- a/src/query/sql/src/planner/binder/distinct.rs +++ b/src/query/sql/src/planner/binder/distinct.rs @@ -15,8 +15,8 @@ use std::collections::HashMap; use std::sync::Arc; +use databend_common_ast::Span; use databend_common_exception::Result; -use databend_common_exception::Span; use crate::binder::Binder; use crate::binder::ColumnBinding; diff --git a/src/query/sql/src/planner/binder/insert.rs b/src/query/sql/src/planner/binder/insert.rs index f8e2ac03257bf..e29ab7e09a243 100644 --- a/src/query/sql/src/planner/binder/insert.rs +++ b/src/query/sql/src/planner/binder/insert.rs @@ -29,8 +29,6 @@ use databend_common_meta_app::principal::OnErrorMode; use crate::binder::Binder; use crate::normalize_identifier; -use crate::optimizer::optimize; -use crate::optimizer::OptimizerContext; use crate::plans::insert::InsertValue; use crate::plans::CopyIntoTableMode; use crate::plans::Insert; @@ -179,11 +177,7 @@ impl Binder { InsertSource::Select { query } => { let statement = Statement::Query(query); let select_plan = self.bind_statement(bind_context, &statement).await?; - let opt_ctx = OptimizerContext::new(self.ctx.clone(), self.metadata.clone()) - .with_enable_distributed_optimization(!self.ctx.get_cluster().is_empty()); - - let optimized_plan = optimize(opt_ctx, select_plan).await?; - Ok(InsertInputSource::SelectPlan(Box::new(optimized_plan))) + Ok(InsertInputSource::SelectPlan(Box::new(select_plan))) } }; diff --git a/src/query/sql/src/planner/binder/insert_multi_table.rs b/src/query/sql/src/planner/binder/insert_multi_table.rs index 617e4c5fc0625..bf51fcae548ac 100644 --- a/src/query/sql/src/planner/binder/insert_multi_table.rs +++ b/src/query/sql/src/planner/binder/insert_multi_table.rs @@ -26,8 +26,6 @@ use databend_common_expression::DataSchemaRef; use databend_common_expression::TableSchema; use crate::binder::ScalarBinder; -use crate::optimizer::optimize; -use crate::optimizer::OptimizerContext; use crate::plans::Else; use crate::plans::InsertMultiTable; use crate::plans::Into; @@ -62,9 +60,6 @@ impl Binder { }; let (s_expr, bind_context) = self.bind_single_table(bind_context, &table_ref).await?; - let opt_ctx = OptimizerContext::new(self.ctx.clone(), self.metadata.clone()) - .with_enable_distributed_optimization(!self.ctx.get_cluster().is_empty()); - let select_plan = Plan::Query { s_expr: Box::new(s_expr), metadata: self.metadata.clone(), @@ -74,8 +69,7 @@ impl Binder { ignore_result: false, }; - let optimized_plan = optimize(opt_ctx, select_plan).await?; - (optimized_plan, bind_context) + (select_plan, bind_context) }; let source_schema = input_source.schema(); diff --git a/src/query/sql/src/planner/binder/join.rs b/src/query/sql/src/planner/binder/join.rs index 179791935ccb8..32a0605044f6e 100644 --- a/src/query/sql/src/planner/binder/join.rs +++ b/src/query/sql/src/planner/binder/join.rs @@ -21,10 +21,10 @@ use databend_common_ast::ast::split_equivalent_predicate_expr; use databend_common_ast::ast::Expr; use databend_common_ast::ast::JoinCondition; use databend_common_ast::ast::JoinOperator; +use databend_common_ast::Span; use databend_common_catalog::table_context::TableContext; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_exception::Span; use indexmap::IndexMap; use super::Finder; diff --git a/src/query/sql/src/planner/binder/location.rs b/src/query/sql/src/planner/binder/location.rs index 483b7011769a4..1c9825d7cf8ed 100644 --- a/src/query/sql/src/planner/binder/location.rs +++ b/src/query/sql/src/planner/binder/location.rs @@ -74,7 +74,9 @@ fn parse_azure_params(l: &mut UriLocation, root: String) -> Result Result { external_id, }); - l.connection.check()?; + l.connection + .check() + .map_err(|err| Error::new(ErrorKind::InvalidInput, err.to_string()))?; Ok(sp) } @@ -201,7 +205,9 @@ fn parse_gcs_params(l: &mut UriLocation) -> Result { credential: l.connection.get("credential").cloned().unwrap_or_default(), }); - l.connection.check()?; + l.connection + .check() + .map_err(|err| Error::new(ErrorKind::InvalidInput, err.to_string()))?; Ok(sp) } @@ -217,7 +223,9 @@ fn parse_ipfs_params(l: &mut UriLocation) -> Result { root: "/ipfs/".to_string() + l.name.as_str(), }); - l.connection.check()?; + l.connection + .check() + .map_err(|err| Error::new(ErrorKind::InvalidInput, err.to_string()))?; Ok(sp) } @@ -254,7 +262,9 @@ fn parse_oss_params(l: &mut UriLocation, root: String) -> Result server_side_encryption_key_id: "".to_string(), }); - l.connection.check()?; + l.connection + .check() + .map_err(|err| Error::new(ErrorKind::InvalidInput, err.to_string()))?; Ok(sp) } @@ -287,7 +297,9 @@ fn parse_obs_params(l: &mut UriLocation, root: String) -> Result root, }); - l.connection.check()?; + l.connection + .check() + .map_err(|err| Error::new(ErrorKind::InvalidInput, err.to_string()))?; Ok(sp) } @@ -312,7 +324,9 @@ fn parse_cos_params(l: &mut UriLocation, root: String) -> Result root, }); - l.connection.check()?; + l.connection + .check() + .map_err(|err| Error::new(ErrorKind::InvalidInput, err.to_string()))?; Ok(sp) } @@ -357,7 +371,9 @@ fn parse_hdfs_params(l: &mut UriLocation) -> Result { name_node, root: l.path.clone(), }); - l.connection.check()?; + l.connection + .check() + .map_err(|err| Error::new(ErrorKind::InvalidInput, err.to_string()))?; Ok(sp) } @@ -391,7 +407,9 @@ fn parse_webhdfs_params(l: &mut UriLocation) -> Result { delegation, }); - l.connection.check()?; + l.connection + .check() + .map_err(|err| Error::new(ErrorKind::InvalidInput, err.to_string()))?; Ok(sp) } @@ -426,7 +444,9 @@ fn parse_huggingface_params(l: &mut UriLocation, root: String) -> Result self.create_derived_column_binding( async_func.display_name.clone(), async_func.return_type.as_ref().clone(), + Some(item.scalar.clone()), + ), + _ => self.create_derived_column_binding( + item.alias.clone(), + item.scalar.data_type()?, + Some(item.scalar.clone()), ), - _ => { - self.create_derived_column_binding(item.alias.clone(), item.scalar.data_type()?) - } }; if is_grouping_sets_item { diff --git a/src/query/sql/src/planner/binder/project_set.rs b/src/query/sql/src/planner/binder/project_set.rs index a2521d2b006c7..e63da88382401 100644 --- a/src/query/sql/src/planner/binder/project_set.rs +++ b/src/query/sql/src/planner/binder/project_set.rs @@ -169,10 +169,11 @@ impl Binder { let return_types = srf_expr.data_type().as_tuple().unwrap(); // Add result column to metadata - let column_index = self - .metadata - .write() - .add_derived_column(name.clone(), srf_expr.data_type().clone()); + let column_index = self.metadata.write().add_derived_column( + name.clone(), + srf_expr.data_type().clone(), + Some(srf_scalar.clone()), + ); let column = ColumnBindingBuilder::new( name.clone(), column_index, diff --git a/src/query/sql/src/planner/binder/replace.rs b/src/query/sql/src/planner/binder/replace.rs index 670672e7e5041..ebc547c023729 100644 --- a/src/query/sql/src/planner/binder/replace.rs +++ b/src/query/sql/src/planner/binder/replace.rs @@ -25,8 +25,6 @@ use databend_common_meta_app::principal::OnErrorMode; use crate::binder::Binder; use crate::normalize_identifier; -use crate::optimizer::optimize; -use crate::optimizer::OptimizerContext; use crate::plans::insert::InsertValue; use crate::plans::CopyIntoTableMode; use crate::plans::InsertInputSource; @@ -161,10 +159,7 @@ impl Binder { InsertSource::Select { query } => { let statement = Statement::Query(query); let select_plan = self.bind_statement(bind_context, &statement).await?; - let opt_ctx = OptimizerContext::new(self.ctx.clone(), self.metadata.clone()) - .with_enable_distributed_optimization(false); - let optimized_plan = optimize(opt_ctx, select_plan).await?; - Ok(InsertInputSource::SelectPlan(Box::new(optimized_plan))) + Ok(InsertInputSource::SelectPlan(Box::new(select_plan))) } }; diff --git a/src/query/sql/src/planner/binder/select.rs b/src/query/sql/src/planner/binder/select.rs index 0c2cecb702c49..eaa7a96b78fe5 100644 --- a/src/query/sql/src/planner/binder/select.rs +++ b/src/query/sql/src/planner/binder/select.rs @@ -37,9 +37,9 @@ use databend_common_ast::ast::SelectTarget; use databend_common_ast::ast::SetExpr; use databend_common_ast::ast::SetOperator; use databend_common_ast::ast::TableReference; +use databend_common_ast::Span; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_exception::Span; use databend_common_expression::type_check::common_super_type; use databend_common_expression::types::DataType; use databend_common_expression::ROW_ID_COLUMN_ID; @@ -726,17 +726,6 @@ impl Binder { .enumerate() { let left_index = if *left_col.data_type != coercion_types[idx] { - let new_column_index = self - .metadata - .write() - .add_derived_column(left_col.column_name.clone(), coercion_types[idx].clone()); - let column_binding = ColumnBindingBuilder::new( - left_col.column_name.clone(), - new_column_index, - Box::new(coercion_types[idx].clone()), - Visibility::Visible, - ) - .build(); let left_coercion_expr = CastExpr { span: left_span, is_try: false, @@ -749,6 +738,18 @@ impl Binder { ), target_type: Box::new(coercion_types[idx].clone()), }; + let new_column_index = self.metadata.write().add_derived_column( + left_col.column_name.clone(), + coercion_types[idx].clone(), + Some(ScalarExpr::CastExpr(left_coercion_expr.clone())), + ); + let column_binding = ColumnBindingBuilder::new( + left_col.column_name.clone(), + new_column_index, + Box::new(coercion_types[idx].clone()), + Visibility::Visible, + ) + .build(); left_scalar_items.push(ScalarItem { scalar: left_coercion_expr.into(), index: new_column_index, @@ -760,10 +761,6 @@ impl Binder { left_col.index }; let right_index = if *right_col.data_type != coercion_types[idx] { - let new_column_index = self - .metadata - .write() - .add_derived_column(right_col.column_name.clone(), coercion_types[idx].clone()); let right_coercion_expr = CastExpr { span: right_span, is_try: false, @@ -776,6 +773,11 @@ impl Binder { ), target_type: Box::new(coercion_types[idx].clone()), }; + let new_column_index = self.metadata.write().add_derived_column( + right_col.column_name.clone(), + coercion_types[idx].clone(), + Some(ScalarExpr::CastExpr(right_coercion_expr.clone())), + ); right_scalar_items.push(ScalarItem { scalar: right_coercion_expr.into(), index: new_column_index, diff --git a/src/query/sql/src/planner/binder/sort.rs b/src/query/sql/src/planner/binder/sort.rs index 2e729b50846a6..961da1ae03bbb 100644 --- a/src/query/sql/src/planner/binder/sort.rs +++ b/src/query/sql/src/planner/binder/sort.rs @@ -161,6 +161,7 @@ impl Binder { self.create_derived_column_binding( format!("{:#}", order.expr), rewrite_scalar.data_type()?, + Some(rewrite_scalar.clone()), ) }; let item = ScalarItem { diff --git a/src/query/sql/src/planner/binder/table.rs b/src/query/sql/src/planner/binder/table.rs index edd948c1ed001..e2543361d63a7 100644 --- a/src/query/sql/src/planner/binder/table.rs +++ b/src/query/sql/src/planner/binder/table.rs @@ -42,6 +42,7 @@ use databend_common_ast::ast::TimeTravelPoint; use databend_common_ast::ast::UriLocation; use databend_common_ast::parser::parse_sql; use databend_common_ast::parser::tokenize_sql; +use databend_common_ast::Span; use databend_common_catalog::catalog_kind::CATALOG_DEFAULT; use databend_common_catalog::plan::ParquetReadOptions; use databend_common_catalog::plan::StageTableInfo; @@ -53,7 +54,6 @@ use databend_common_catalog::table_context::TableContext; use databend_common_catalog::table_function::TableFunction; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_exception::Span; use databend_common_expression::is_stream_column; use databend_common_expression::type_check::check_number; use databend_common_expression::types::DataType; @@ -451,10 +451,11 @@ impl Binder { arguments: vec![scalar.clone()], }); let data_type = field_expr.data_type()?; - let index = self - .metadata - .write() - .add_derived_column(field.clone(), data_type.clone()); + let index = self.metadata.write().add_derived_column( + field.clone(), + data_type.clone(), + Some(field_expr.clone()), + ); let column_binding = ColumnBindingBuilder::new( field, @@ -543,10 +544,11 @@ impl Binder { } else { // Add result column to metadata let data_type = srf_result.data_type()?; - let index = self - .metadata - .write() - .add_derived_column(srf.to_string(), data_type.clone()); + let index = self.metadata.write().add_derived_column( + srf.to_string(), + data_type.clone(), + Some(srf_result.clone()), + ); ColumnBindingBuilder::new( srf.to_string(), index, diff --git a/src/query/sql/src/planner/binder/values.rs b/src/query/sql/src/planner/binder/values.rs index fc2ba3ab47372..df8884e653e25 100644 --- a/src/query/sql/src/planner/binder/values.rs +++ b/src/query/sql/src/planner/binder/values.rs @@ -16,10 +16,10 @@ use std::collections::HashMap; use std::sync::Arc; use databend_common_ast::ast::Expr as AExpr; +use databend_common_ast::Span; use databend_common_catalog::table_context::TableContext; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_exception::Span; use databend_common_expression::type_check::common_super_type; use databend_common_expression::types::DataType; use databend_common_expression::ColumnBuilder; @@ -34,6 +34,7 @@ use indexmap::IndexMap; use crate::binder::wrap_cast; use crate::optimizer::ColumnSet; use crate::optimizer::SExpr; +use crate::plans::BoundColumnRef; use crate::plans::ConstantTableScan; use crate::BindContext; use crate::Binder; @@ -41,6 +42,7 @@ use crate::ColumnBindingBuilder; use crate::MetadataRef; use crate::NameResolutionContext; use crate::ScalarBinder; +use crate::ScalarExpr; use crate::Visibility; impl Binder { @@ -183,9 +185,7 @@ pub async fn bind_values( let mut columns = ColumnSet::new(); let mut fields = Vec::with_capacity(values.len()); for value_field in value_schema.fields() { - let index = metadata - .write() - .add_derived_column(value_field.name().clone(), value_field.data_type().clone()); + let index = metadata.read().columns().len(); columns.insert(index); let column_binding = ColumnBindingBuilder::new( @@ -195,6 +195,14 @@ pub async fn bind_values( Visibility::Visible, ) .build(); + let _ = metadata.write().add_derived_column( + value_field.name().clone(), + value_field.data_type().clone(), + Some(ScalarExpr::BoundColumnRef(BoundColumnRef { + span, + column: column_binding.clone(), + })), + ); bind_context.add_column_binding(column_binding); let field = DataField::new(&index.to_string(), value_field.data_type().clone()); diff --git a/src/query/sql/src/planner/binder/window.rs b/src/query/sql/src/planner/binder/window.rs index d44cfb8bb9880..eb5699c36733e 100644 --- a/src/query/sql/src/planner/binder/window.rs +++ b/src/query/sql/src/planner/binder/window.rs @@ -17,9 +17,9 @@ use std::sync::Arc; use databend_common_ast::ast::WindowDefinition; use databend_common_ast::ast::WindowSpec; +use databend_common_ast::Span; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_exception::Span; use super::select::SelectList; use crate::binder::ColumnBinding; @@ -28,10 +28,13 @@ use crate::optimizer::SExpr; use crate::plans::walk_expr_mut; use crate::plans::AggregateFunction; use crate::plans::BoundColumnRef; +use crate::plans::EvalScalar; use crate::plans::LagLeadFunction; use crate::plans::NthValueFunction; use crate::plans::ScalarExpr; use crate::plans::ScalarItem; +use crate::plans::Sort; +use crate::plans::SortItem; use crate::plans::SubqueryExpr; use crate::plans::VisitorMut; use crate::plans::Window; @@ -63,6 +66,66 @@ impl Binder { limit: None, }; + // eval scalars before sort + // Generate a `EvalScalar` as the input of `Window`. + let mut scalar_items: Vec = Vec::new(); + for arg in &window_plan.arguments { + scalar_items.push(arg.clone()); + } + for part in &window_plan.partition_by { + scalar_items.push(part.clone()); + } + for order in &window_plan.order_by { + scalar_items.push(order.order_by_item.clone()) + } + + let child = if !scalar_items.is_empty() { + let eval_scalar_plan = EvalScalar { + items: scalar_items, + }; + SExpr::create_unary(Arc::new(eval_scalar_plan.into()), Arc::new(child)) + } else { + child + }; + + let default_nulls_first = !self + .ctx + .get_settings() + .get_sql_dialect() + .unwrap() + .is_null_biggest(); + + let mut sort_items: Vec = vec![]; + if !window_plan.partition_by.is_empty() { + for part in window_plan.partition_by.iter() { + sort_items.push(SortItem { + index: part.index, + asc: true, + nulls_first: default_nulls_first, + }); + } + } + + for order in window_plan.order_by.iter() { + sort_items.push(SortItem { + index: order.order_by_item.index, + asc: order.asc.unwrap_or(true), + nulls_first: order.nulls_first.unwrap_or(default_nulls_first), + }); + } + + let child = if !sort_items.is_empty() { + let sort_plan = Sort { + items: sort_items, + limit: window_plan.limit, + after_exchange: None, + pre_projection: None, + }; + SExpr::create_unary(Arc::new(sort_plan.into()), Arc::new(child)) + } else { + child + }; + Ok(SExpr::create_unary( Arc::new(window_plan.into()), Arc::new(child), @@ -330,10 +393,11 @@ impl<'a> WindowRewriter<'a> { }); } - let index = self - .metadata - .write() - .add_derived_column(window.display_name.clone(), window.func.return_type()); + let index = self.metadata.write().add_derived_column( + window.display_name.clone(), + window.func.return_type(), + Some(ScalarExpr::WindowFunction(window.clone())), + ); // create window info let window_info = WindowFunctionInfo { @@ -405,10 +469,11 @@ impl<'a> WindowRewriter<'a> { Ok(col.clone()) } else { let ty = arg.data_type()?; - let index = self - .metadata - .write() - .add_derived_column(name.to_string(), ty.clone()); + let index = self.metadata.write().add_derived_column( + name.to_string(), + ty.clone(), + Some(arg.clone()), + ); // Generate a ColumnBinding for each argument of aggregates let column = ColumnBindingBuilder::new( diff --git a/src/query/sql/src/planner/format/display_plan.rs b/src/query/sql/src/planner/format/display_plan.rs index cc2872b742f5c..129de43e4c4b4 100644 --- a/src/query/sql/src/planner/format/display_plan.rs +++ b/src/query/sql/src/planner/format/display_plan.rs @@ -46,7 +46,7 @@ impl Plan { s_expr, metadata, .. } => { let metadata = &*metadata.read(); - s_expr.to_format_tree(metadata, verbose)?.format_pretty() + Ok(s_expr.to_format_tree(metadata, verbose)?.format_pretty()?) } Plan::Explain { kind, plan, .. } => { let result = plan.format_indent(false)?; @@ -143,7 +143,6 @@ impl Plan { // Account Plan::GrantRole(_) => Ok("GrantRole".to_string()), Plan::GrantPriv(_) => Ok("GrantPrivilege".to_string()), - Plan::ShowGrants(_) => Ok("ShowGrants".to_string()), Plan::RevokePriv(_) => Ok("RevokePrivilege".to_string()), Plan::RevokeRole(_) => Ok("RevokeRole".to_string()), Plan::CreateUser(_) => Ok("CreateUser".to_string()), @@ -298,8 +297,10 @@ fn format_create_table(create_table: &CreateTablePlan) -> Result { } => { let metadata = &*metadata.read(); let res = s_expr.to_format_tree(metadata, false)?; - FormatTreeNode::with_children("CreateTableAsSelect".to_string(), vec![res]) - .format_pretty() + Ok( + FormatTreeNode::with_children("CreateTableAsSelect".to_string(), vec![res]) + .format_pretty()?, + ) } _ => Err(ErrorCode::Internal("Invalid create table plan")), }, diff --git a/src/query/sql/src/planner/metadata.rs b/src/query/sql/src/planner/metadata.rs index 25bae3b708487..6071d8f9afc66 100644 --- a/src/query/sql/src/planner/metadata.rs +++ b/src/query/sql/src/planner/metadata.rs @@ -31,6 +31,7 @@ use databend_common_expression::TableField; use parking_lot::RwLock; use crate::optimizer::SExpr; +use crate::ScalarExpr; /// Planner use [`usize`] as it's index type. /// @@ -217,12 +218,18 @@ impl Metadata { column_index } - pub fn add_derived_column(&mut self, alias: String, data_type: DataType) -> IndexType { + pub fn add_derived_column( + &mut self, + alias: String, + data_type: DataType, + expr: Option, + ) -> IndexType { let column_index = self.columns.len(); let column_entry = ColumnEntry::DerivedColumn(DerivedColumn { column_index, alias, data_type, + scalar_expr: expr, }); self.columns.push(column_entry); column_index @@ -540,6 +547,9 @@ pub struct DerivedColumn { pub column_index: IndexType, pub alias: String, pub data_type: DataType, + // if the derived column is generated by the scalar expr, save the `scalar_expr`. + // Currently, it's only used by decorrelating subquery. + pub scalar_expr: Option, } #[derive(Clone, Debug)] diff --git a/src/query/sql/src/planner/optimizer/decorrelate/decorrelate.rs b/src/query/sql/src/planner/optimizer/decorrelate/decorrelate.rs index 8346c2133f79e..a5975c420281e 100644 --- a/src/query/sql/src/planner/optimizer/decorrelate/decorrelate.rs +++ b/src/query/sql/src/planner/optimizer/decorrelate/decorrelate.rs @@ -15,9 +15,9 @@ use std::collections::HashSet; use std::sync::Arc; +use databend_common_ast::Span; use databend_common_catalog::table_context::TableContext; use databend_common_exception::Result; -use databend_common_exception::Span; use databend_common_expression::types::DataType; use crate::binder::ColumnBindingBuilder; @@ -328,6 +328,7 @@ impl SubqueryRewriter { self.metadata.write().add_derived_column( "marker".to_string(), DataType::Nullable(Box::new(DataType::Boolean)), + None, ) }; let join_plan = Join { @@ -392,6 +393,7 @@ impl SubqueryRewriter { self.metadata.write().add_derived_column( "marker".to_string(), DataType::Nullable(Box::new(DataType::Boolean)), + None, ) }; let mark_join = Join { diff --git a/src/query/sql/src/planner/optimizer/decorrelate/flatten_plan.rs b/src/query/sql/src/planner/optimizer/decorrelate/flatten_plan.rs index facdb3c0fb4fa..f57de1eccad30 100644 --- a/src/query/sql/src/planner/optimizer/decorrelate/flatten_plan.rs +++ b/src/query/sql/src/planner/optimizer/decorrelate/flatten_plan.rs @@ -66,7 +66,7 @@ impl SubqueryRewriter { return Ok(plan.clone()); } // Construct a Scan plan by correlated columns. - // Finally generate a cross join, so we finish flattening the subquery. + // Finally, generate a cross join, so we finish flattening the subquery. let mut metadata = self.metadata.write(); // Currently, we don't support left plan's from clause contains subquery. // Such as: select t2.a from (select a + 1 as a from t) as t2 where (select sum(a) from t as t1 where t1.a < t2.a) = 1; @@ -74,24 +74,55 @@ impl SubqueryRewriter { .table_index_by_column_indexes(correlated_columns) .unwrap(); let mut data_types = Vec::with_capacity(correlated_columns.len()); + let mut scalar_items = vec![]; + let mut scan_columns = ColumnSet::new(); for correlated_column in correlated_columns.iter() { let column_entry = metadata.column(*correlated_column).clone(); let name = column_entry.name(); let data_type = column_entry.data_type(); data_types.push(data_type.clone()); - self.derived_columns.insert( - *correlated_column, - metadata.add_derived_column(name.to_string(), data_type), - ); + let derived_col = metadata.add_derived_column(name.to_string(), data_type, None); + self.derived_columns.insert(*correlated_column, derived_col); + if let ColumnEntry::DerivedColumn(derived_column) = &column_entry { + if let Some(scalar) = &derived_column.scalar_expr { + // Replace columns in `scalar` to derived columns. + let mut scalar = scalar.clone(); + for col in scalar.used_columns().iter() { + if let Some(new_col) = self.derived_columns.get(col) { + scalar.replace_column(*col, *new_col)?; + } else { + scan_columns.insert(*col); + } + } + scalar_items.push(ScalarItem { + scalar, + index: derived_col, + }); + } + } else { + scan_columns.insert(derived_col); + } } let mut logical_get = SExpr::create_leaf(Arc::new( Scan { table_index, - columns: self.derived_columns.values().cloned().collect(), + columns: scan_columns, ..Default::default() } .into(), )); + if !scalar_items.is_empty() { + // Wrap `EvalScalar` to `logical_get`. + logical_get = SExpr::create_unary( + Arc::new( + EvalScalar { + items: scalar_items, + } + .into(), + ), + Arc::new(logical_get), + ); + } if self.ctx.get_cluster().is_empty() { // Wrap logical get with distinct to eliminate duplicates rows. let mut group_items = Vec::with_capacity(self.derived_columns.len()); diff --git a/src/query/sql/src/planner/optimizer/decorrelate/subquery_rewriter.rs b/src/query/sql/src/planner/optimizer/decorrelate/subquery_rewriter.rs index 003101bfb5e33..7ecfe8214fcf0 100644 --- a/src/query/sql/src/planner/optimizer/decorrelate/subquery_rewriter.rs +++ b/src/query/sql/src/planner/optimizer/decorrelate/subquery_rewriter.rs @@ -429,10 +429,11 @@ impl SubqueryRewriter { // For example, `EXISTS(SELECT a FROM t WHERE a > 1)` will be rewritten into // `(SELECT COUNT(*) = 1 FROM t WHERE a > 1 LIMIT 1)`. let agg_func = AggregateCountFunction::try_create("", vec![], vec![])?; - let agg_func_index = self - .metadata - .write() - .add_derived_column("count(*)".to_string(), agg_func.return_type()?); + let agg_func_index = self.metadata.write().add_derived_column( + "count(*)".to_string(), + agg_func.return_type()?, + None, + ); let agg = Aggregate { group_items: vec![], @@ -499,6 +500,7 @@ impl SubqueryRewriter { let column_index = self.metadata.write().add_derived_column( "_exists_scalar_subquery".to_string(), DataType::Number(NumberDataType::UInt64), + None, ); output_index = Some(column_index); let eval_scalar = EvalScalar { @@ -575,6 +577,7 @@ impl SubqueryRewriter { self.metadata.write().add_derived_column( "marker".to_string(), DataType::Nullable(Box::new(DataType::Boolean)), + None, ) }; // Consider the sql: select * from t1 where t1.a = any(select t2.a from t2); @@ -652,10 +655,12 @@ impl SubqueryRewriter { let count_idx = self.metadata.write().add_derived_column( "_count_scalar_subquery".to_string(), DataType::Number(NumberDataType::UInt64), + None, ); let any_idx = self.metadata.write().add_derived_column( "_any_scalar_subquery".to_string(), *subquery.output_column.data_type.clone(), + None, ); // Aggregate operator let agg = SExpr::create_unary( @@ -744,6 +749,7 @@ impl SubqueryRewriter { let if_func_idx = self.metadata.write().add_derived_column( "_if_scalar_subquery".to_string(), *subquery.output_column.data_type.clone(), + None, ); let scalar_expr = SExpr::create_unary( Arc::new( diff --git a/src/query/sql/src/planner/optimizer/distributed/sort_and_limit.rs b/src/query/sql/src/planner/optimizer/distributed/sort_and_limit.rs index 2dc99aac40344..4042d921e7bca 100644 --- a/src/query/sql/src/planner/optimizer/distributed/sort_and_limit.rs +++ b/src/query/sql/src/planner/optimizer/distributed/sort_and_limit.rs @@ -108,8 +108,9 @@ impl SortAndLimitPushDownOptimizer { let exchange_sexpr = s_expr.child(0)?; debug_assert!(matches!( exchange_sexpr.plan.as_ref(), - RelOperator::Exchange(Exchange::Merge) + RelOperator::Exchange(Exchange::Merge) | RelOperator::Exchange(Exchange::MergeSort) )); + debug_assert!(exchange_sexpr.children.len() == 1); let exchange_sexpr = exchange_sexpr.replace_plan(Arc::new(Exchange::MergeSort.into())); diff --git a/src/query/sql/src/planner/optimizer/filter/pull_up_filter.rs b/src/query/sql/src/planner/optimizer/filter/pull_up_filter.rs index 742a67b7fa864..76fcde9f8b3bc 100644 --- a/src/query/sql/src/planner/optimizer/filter/pull_up_filter.rs +++ b/src/query/sql/src/planner/optimizer/filter/pull_up_filter.rs @@ -180,6 +180,7 @@ impl PullUpFilterOptimizer { let new_index = metadata.write().add_derived_column( column.column.column_name.clone(), *column.column.data_type.clone(), + None, ); let new_column = column.clone(); items.push(ScalarItem { diff --git a/src/query/sql/src/planner/optimizer/format.rs b/src/query/sql/src/planner/optimizer/format.rs index 3d24b2fdfb400..a7ba3cc65b35b 100644 --- a/src/query/sql/src/planner/optimizer/format.rs +++ b/src/query/sql/src/planner/optimizer/format.rs @@ -34,7 +34,7 @@ pub fn display_memo(memo: &Memo) -> Result { let root = FormatTreeNode::with_children("Memo".to_string(), children); - root.format_pretty() + Ok(root.format_pretty()?) } pub fn display_rel_op(rel_op: &RelOperator) -> String { diff --git a/src/query/sql/src/planner/optimizer/optimizer.rs b/src/query/sql/src/planner/optimizer/optimizer.rs index 0d12cea0b856c..2506f353b1d79 100644 --- a/src/query/sql/src/planner/optimizer/optimizer.rs +++ b/src/query/sql/src/planner/optimizer/optimizer.rs @@ -15,6 +15,7 @@ use std::collections::HashSet; use std::sync::Arc; +use async_recursion::async_recursion; use databend_common_ast::ast::ExplainKind; use databend_common_catalog::merge_into_join::MergeIntoJoin; use databend_common_catalog::merge_into_join::MergeIntoJoinType; @@ -50,6 +51,7 @@ use crate::plans::Join; use crate::plans::MergeInto; use crate::plans::Plan; use crate::plans::RelOperator; +use crate::InsertInputSource; use crate::MetadataRef; #[derive(Clone, Educe)] @@ -156,6 +158,7 @@ impl<'a> RecursiveOptimizer<'a> { } #[minitrace::trace] +#[async_recursion] pub async fn optimize(opt_ctx: OptimizerContext, plan: Plan) -> Result { match plan { Plan::Query { @@ -224,10 +227,65 @@ pub async fn optimize(opt_ctx: OptimizerContext, plan: Plan) -> Result { "after optimization enable_distributed_copy? : {}", plan.enable_distributed ); + + if let Some(p) = &plan.query { + let optimized_plan = optimize(opt_ctx.clone(), *p.clone()).await?; + plan.query = Some(Box::new(optimized_plan)); + } Ok(Plan::CopyIntoTable(plan)) } Plan::MergeInto(plan) => optimize_merge_into(opt_ctx.clone(), plan).await, + // distributed insert will be optimized in `physical_plan_builder` + Plan::Insert(mut plan) => { + match plan.source { + InsertInputSource::SelectPlan(p) => { + let optimized_plan = optimize(opt_ctx.clone(), *p.clone()).await?; + plan.source = InsertInputSource::SelectPlan(Box::new(optimized_plan)); + } + InsertInputSource::Stage(p) => { + let optimized_plan = optimize(opt_ctx.clone(), *p.clone()).await?; + plan.source = InsertInputSource::Stage(Box::new(optimized_plan)); + } + _ => {} + } + Ok(Plan::Insert(plan)) + } + Plan::InsertMultiTable(mut plan) => { + plan.input_source = optimize(opt_ctx.clone(), plan.input_source.clone()).await?; + Ok(Plan::InsertMultiTable(plan)) + } + Plan::Replace(mut plan) => { + match plan.source { + InsertInputSource::SelectPlan(p) => { + let optimized_plan = optimize(opt_ctx.clone(), *p.clone()).await?; + plan.source = InsertInputSource::SelectPlan(Box::new(optimized_plan)); + } + InsertInputSource::Stage(p) => { + let optimized_plan = optimize(opt_ctx.clone(), *p.clone()).await?; + plan.source = InsertInputSource::Stage(Box::new(optimized_plan)); + } + _ => {} + } + Ok(Plan::Replace(plan)) + } + + Plan::CreateTable(mut plan) => { + if let Some(p) = &plan.as_select { + let optimized_plan = optimize(opt_ctx.clone(), *p.clone()).await?; + plan.as_select = Some(Box::new(optimized_plan)); + } + + Ok(Plan::CreateTable(plan)) + } + // Already done in binder + // Plan::RefreshIndex(mut plan) => { + // // use fresh index + // let opt_ctx = + // OptimizerContext::new(opt_ctx.table_ctx.clone(), opt_ctx.metadata.clone()); + // plan.query_plan = Box::new(optimize(opt_ctx.clone(), *plan.query_plan.clone()).await?); + // Ok(Plan::RefreshIndex(plan)) + // } // Pass through statements. _ => Ok(plan), } diff --git a/src/query/sql/src/planner/optimizer/rule/transform/rule_eager_aggregation.rs b/src/query/sql/src/planner/optimizer/rule/transform/rule_eager_aggregation.rs index 87eff56dbaad9..c1f05ca09a803 100644 --- a/src/query/sql/src/planner/optimizer/rule/transform/rule_eager_aggregation.rs +++ b/src/query/sql/src/planner/optimizer/rule/transform/rule_eager_aggregation.rs @@ -1324,6 +1324,7 @@ fn add_eager_count(final_agg: &mut Aggregate, metadata: MetadataRef) -> (usize, let eager_count_index = metadata.write().add_derived_column( "count(*)".to_string(), DataType::Number(NumberDataType::UInt64), + None, ); if let ScalarExpr::AggregateFunction(agg) = &mut eager_count_aggregation_function.scalar { agg.func_name = "count".to_string(); @@ -1390,6 +1391,7 @@ fn decompose_avg( let count_index = metadata.write().add_derived_column( format!("avg_count_{}({}.{})", &func_name, table_name, column_name), count_aggregation_function.scalar.data_type()?, + None, ); // AVG => COUNT @@ -1417,6 +1419,7 @@ fn update_aggregate_and_eval( let new_index = metadata.write().add_derived_column( format!("_eager_final_{}", &func_name), final_aggregate_function.scalar.data_type()?, + None, ); // Modify final aggregate functions. @@ -1464,6 +1467,7 @@ fn create_eager_count_multiply_scalar_item( let new_index = metadata.write().add_derived_column( format!("{} * _eager_count", aggregate_function.display_name), aggregate_function.args[0].data_type()?, + None, ); let new_scalar = if let ScalarExpr::BoundColumnRef(column) = &aggregate_function.args[0] { diff --git a/src/query/sql/src/planner/planner.rs b/src/query/sql/src/planner/planner.rs index d7e8840ab021c..3d453b48b11f2 100644 --- a/src/query/sql/src/planner/planner.rs +++ b/src/query/sql/src/planner/planner.rs @@ -124,10 +124,10 @@ impl Planner { .take_while(|token| token.is_ok()) // Make sure the tokens stream is always ended with EOI. .chain(std::iter::once(Ok(Token::new_eoi(&final_sql)))) - .collect::>() + .collect::>() .unwrap() } else { - (&mut tokenizer).collect::>()? + (&mut tokenizer).collect::>()? }; loop { diff --git a/src/query/sql/src/planner/plans/ddl/account.rs b/src/query/sql/src/planner/plans/ddl/account.rs index 37fa881ee2d74..153ce5a048424 100644 --- a/src/query/sql/src/planner/plans/ddl/account.rs +++ b/src/query/sql/src/planner/plans/ddl/account.rs @@ -71,24 +71,6 @@ pub struct GrantRolePlan { pub role: String, } -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ShowGrantsPlan { - pub principal: Option, -} - -impl ShowGrantsPlan { - pub fn schema(&self) -> DataSchemaRef { - DataSchemaRefExt::create(vec![ - DataField::new("Privileges", DataType::String), - DataField::new("Object Name", DataType::String), - DataField::new("Object Id", DataType::Nullable(Box::new(DataType::String))), - DataField::new("Grant To", DataType::String), - DataField::new("Name", DataType::String), - DataField::new("Grants", DataType::String), - ]) - } -} - #[derive(Clone, Debug, PartialEq, Eq)] pub struct RevokeRolePlan { pub principal: PrincipalIdentity, diff --git a/src/query/sql/src/planner/plans/eval_scalar.rs b/src/query/sql/src/planner/plans/eval_scalar.rs index 7b833ead17dce..6dba8a7fe5c54 100644 --- a/src/query/sql/src/planner/plans/eval_scalar.rs +++ b/src/query/sql/src/planner/plans/eval_scalar.rs @@ -37,6 +37,7 @@ pub struct EvalScalar { #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct ScalarItem { pub scalar: ScalarExpr, + // The index of the derived column in metadata pub index: IndexType, } diff --git a/src/query/sql/src/planner/plans/plan.rs b/src/query/sql/src/planner/plans/plan.rs index 90d984fefed88..81042a017313f 100644 --- a/src/query/sql/src/planner/plans/plan.rs +++ b/src/query/sql/src/planner/plans/plan.rs @@ -24,6 +24,8 @@ use databend_common_expression::DataSchema; use databend_common_expression::DataSchemaRef; use databend_common_expression::DataSchemaRefExt; +use super::Exchange; +use super::RelOperator; use crate::binder::ExplainConfig; use crate::optimizer::SExpr; use crate::plans::copy_into_location::CopyIntoLocationPlan; @@ -136,7 +138,6 @@ use crate::plans::ShowCreateDatabasePlan; use crate::plans::ShowCreateTablePlan; use crate::plans::ShowFileFormatsPlan; use crate::plans::ShowGrantTenantsOfSharePlan; -use crate::plans::ShowGrantsPlan; use crate::plans::ShowNetworkPoliciesPlan; use crate::plans::ShowObjectGrantPrivilegesPlan; use crate::plans::ShowRolesPlan; @@ -275,7 +276,6 @@ pub enum Plan { DropRole(Box), GrantRole(Box), GrantPriv(Box), - ShowGrants(Box), RevokePriv(Box), RevokeRole(Box), SetRole(Box), @@ -396,6 +396,7 @@ pub enum RewriteKind { ListStage, ShowRoles, ShowPasswordPolicies, + ShowGrants, Call, } @@ -454,7 +455,6 @@ impl Plan { Plan::ExistsTable(plan) => plan.schema(), Plan::DescribeView(plan) => plan.schema(), Plan::ShowRoles(plan) => plan.schema(), - Plan::ShowGrants(plan) => plan.schema(), Plan::ShowFileFormats(plan) => plan.schema(), Plan::Replace(plan) => plan.schema(), Plan::Presign(plan) => plan.schema(), @@ -488,4 +488,29 @@ impl Plan { pub fn has_result_set(&self) -> bool { !self.schema().fields().is_empty() } + + pub fn remove_exchange_for_select(&self) -> Self { + if let Plan::Query { + s_expr, + metadata, + bind_context, + rewrite_kind, + formatted_ast, + ignore_result, + } = self + { + if let RelOperator::Exchange(Exchange::Merge) = s_expr.plan.as_ref() { + let s_expr = Box::new(s_expr.child(0).unwrap().clone()); + return Plan::Query { + s_expr, + metadata: metadata.clone(), + bind_context: bind_context.clone(), + rewrite_kind: rewrite_kind.clone(), + formatted_ast: formatted_ast.clone(), + ignore_result: *ignore_result, + }; + } + } + self.clone() + } } diff --git a/src/query/sql/src/planner/plans/scalar_expr.rs b/src/query/sql/src/planner/plans/scalar_expr.rs index da1e9831df63c..810479ed87d1a 100644 --- a/src/query/sql/src/planner/plans/scalar_expr.rs +++ b/src/query/sql/src/planner/plans/scalar_expr.rs @@ -16,11 +16,11 @@ use std::hash::Hash; use std::hash::Hasher; use databend_common_ast::ast::BinaryOperator; +use databend_common_ast::Range; +use databend_common_ast::Span; use databend_common_async_functions::AsyncFunctionCall; use databend_common_exception::ErrorCode; -use databend_common_exception::Range; use databend_common_exception::Result; -use databend_common_exception::Span; use databend_common_expression::types::DataType; use databend_common_expression::RemoteExpr; use databend_common_expression::Scalar; diff --git a/src/query/sql/src/planner/plans/window.rs b/src/query/sql/src/planner/plans/window.rs index c43ed422b1236..3a95e8893c24b 100644 --- a/src/query/sql/src/planner/plans/window.rs +++ b/src/query/sql/src/planner/plans/window.rs @@ -16,10 +16,10 @@ use std::fmt::Display; use std::fmt::Formatter; use std::sync::Arc; +use databend_common_ast::Span; use databend_common_catalog::table_context::TableContext; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_exception::Span; use databend_common_expression::types::DataType; use databend_common_expression::types::NumberDataType; use databend_common_expression::Scalar; diff --git a/src/query/sql/src/planner/semantic/type_check.rs b/src/query/sql/src/planner/semantic/type_check.rs index 41b2a3414bb50..c06f4005d4b7e 100644 --- a/src/query/sql/src/planner/semantic/type_check.rs +++ b/src/query/sql/src/planner/semantic/type_check.rs @@ -45,6 +45,7 @@ use databend_common_ast::ast::WindowFrameUnits; use databend_common_ast::parser::parse_expr; use databend_common_ast::parser::tokenize_sql; use databend_common_ast::parser::Dialect; +use databend_common_ast::Span; use databend_common_async_functions::resolve_async_function; use databend_common_catalog::catalog::CatalogManager; use databend_common_catalog::plan::InternalColumn; @@ -56,7 +57,6 @@ use databend_common_compress::DecompressDecoder; use databend_common_config::GlobalConfig; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_exception::Span; use databend_common_expression::infer_schema_type; use databend_common_expression::shrink_scalar; use databend_common_expression::type_check; diff --git a/src/query/sql/src/planner/semantic/udf_rewriter.rs b/src/query/sql/src/planner/semantic/udf_rewriter.rs index 1549b1a9ff2f3..02b374e460123 100644 --- a/src/query/sql/src/planner/semantic/udf_rewriter.rs +++ b/src/query/sql/src/planner/semantic/udf_rewriter.rs @@ -157,10 +157,11 @@ impl<'a> VisitorMut<'a> for UdfRewriter { column_ref.clone() } else { let name = format!("{}_arg_{}", &udf.display_name, i); - let index = self - .metadata - .write() - .add_derived_column(name.clone(), arg.data_type()?); + let index = self.metadata.write().add_derived_column( + name.clone(), + arg.data_type()?, + Some(arg.clone()), + ); // Generate a ColumnBinding for each argument of udf function let column = ColumnBindingBuilder::new( @@ -186,10 +187,11 @@ impl<'a> VisitorMut<'a> for UdfRewriter { let index = match self.udf_functions_index_map.get(&udf.display_name) { Some(index) => *index, - None => self - .metadata - .write() - .add_derived_column(udf.display_name.clone(), (*udf.return_type).clone()), + None => self.metadata.write().add_derived_column( + udf.display_name.clone(), + (*udf.return_type).clone(), + Some(ScalarExpr::UDFCall(udf.clone())), + ), }; // Generate a ColumnBinding for the udf function diff --git a/src/query/storages/common/index/Cargo.toml b/src/query/storages/common/index/Cargo.toml index e4023c7188a1e..de94037bdeb9f 100644 --- a/src/query/storages/common/index/Cargo.toml +++ b/src/query/storages/common/index/Cargo.toml @@ -16,6 +16,7 @@ ignored = ["xorfilter-rs", "match-template"] [dependencies] databend-common-arrow = { path = "../../../../common/arrow" } +databend-common-ast = { path = "../../../ast" } databend-common-exception = { path = "../../../../common/exception" } databend-common-expression = { path = "../../../expression" } databend-common-functions = { path = "../../../functions" } diff --git a/src/query/storages/common/index/src/bloom_index.rs b/src/query/storages/common/index/src/bloom_index.rs index 7ece1c0c73ec5..955acb84ffc4d 100644 --- a/src/query/storages/common/index/src/bloom_index.rs +++ b/src/query/storages/common/index/src/bloom_index.rs @@ -19,9 +19,9 @@ use std::sync::Arc; use databend_common_arrow::arrow::bitmap::Bitmap; use databend_common_arrow::arrow::buffer::Buffer; +use databend_common_ast::Span; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_exception::Span; use databend_common_expression::converts::datavalues::scalar_to_datavalue; use databend_common_expression::eval_function; use databend_common_expression::types::boolean::BooleanDomain; diff --git a/src/tests/sqlsmith/Cargo.toml b/src/tests/sqlsmith/Cargo.toml index 23d157bb6719d..79780ace45afa 100644 --- a/src/tests/sqlsmith/Cargo.toml +++ b/src/tests/sqlsmith/Cargo.toml @@ -23,7 +23,6 @@ tracing = "0.1.37" tracing-subscriber = "0.3.17" databend-common-ast = { path = "../../query/ast" } -databend-common-exception = { path = "../../common/exception" } databend-common-expression = { path = "../../query/expression" } databend-common-formats = { path = "../../query/formats" } databend-common-functions = { path = "../../query/functions" } diff --git a/src/tests/sqlsmith/src/sql_gen/dml.rs b/src/tests/sqlsmith/src/sql_gen/dml.rs index 1685c37504b47..ba9ee5aacc87b 100644 --- a/src/tests/sqlsmith/src/sql_gen/dml.rs +++ b/src/tests/sqlsmith/src/sql_gen/dml.rs @@ -37,7 +37,7 @@ use databend_common_ast::ast::TableReference; use databend_common_ast::ast::UnmatchedClause; use databend_common_ast::ast::UpdateExpr; use databend_common_ast::ast::UpdateStmt; -use databend_common_exception::Span; +use databend_common_ast::Span; use databend_common_expression::types::DataType; use databend_common_expression::Column; use databend_common_expression::ScalarRef; diff --git a/tests/cloud_control_server/simple_server.py b/tests/cloud_control_server/simple_server.py index f2371ae0c46fc..0ef3de850ee90 100644 --- a/tests/cloud_control_server/simple_server.py +++ b/tests/cloud_control_server/simple_server.py @@ -44,9 +44,9 @@ def load_data_from_json(): notification_history_data = json.load(f) notification_history = notification_pb2.NotificationHistory() json_format.ParseDict(notification_history_data, notification_history) - NOTIFICATION_HISTORY_DB[notification_history.name] = ( - notification_history - ) + NOTIFICATION_HISTORY_DB[ + notification_history.name + ] = notification_history def create_task_request_to_task(id, create_task_request): diff --git a/tests/cloud_control_server/task_pb2.py b/tests/cloud_control_server/task_pb2.py index 5bba5050fa192..e7992849240eb 100644 --- a/tests/cloud_control_server/task_pb2.py +++ b/tests/cloud_control_server/task_pb2.py @@ -13,7 +13,7 @@ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( - b'\n\ntask.proto\x12\ttaskproto"\xe9\x01\n\x0fScheduleOptions\x12\x15\n\x08interval\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x11\n\x04\x63ron\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\ttime_zone\x18\x03 \x01(\tH\x02\x88\x01\x01\x12>\n\rschedule_type\x18\x04 \x01(\x0e\x32\'.taskproto.ScheduleOptions.ScheduleType"0\n\x0cScheduleType\x12\x11\n\rinterval_type\x10\x00\x12\r\n\tcron_type\x10\x01\x42\x0b\n\t_intervalB\x07\n\x05_cronB\x0c\n\n_time_zone"t\n\x10WarehouseOptions\x12\x16\n\twarehouse\x18\x01 \x01(\tH\x00\x88\x01\x01\x12!\n\x14using_warehouse_size\x18\x02 \x01(\tH\x01\x88\x01\x01\x42\x0c\n\n_warehouseB\x17\n\x15_using_warehouse_size"\x19\n\tScriptSQL\x12\x0c\n\x04sqls\x18\x01 \x03(\t"\xad\x05\n\x11\x43reateTaskRequest\x12\x11\n\ttask_name\x18\x01 \x01(\t\x12\x11\n\ttenant_id\x18\x02 \x01(\t\x12\x12\n\nquery_text\x18\x03 \x01(\t\x12\r\n\x05owner\x18\x04 \x01(\t\x12\x14\n\x07\x63omment\x18\x05 \x01(\tH\x00\x88\x01\x01\x12\x34\n\x10schedule_options\x18\x07 \x01(\x0b\x32\x1a.taskproto.ScheduleOptions\x12\x36\n\x11warehouse_options\x18\x08 \x01(\x0b\x32\x1b.taskproto.WarehouseOptions\x12,\n\x1fsuspend_task_after_num_failures\x18\t \x01(\x05H\x01\x88\x01\x01\x12\x14\n\x0cif_not_exist\x18\n \x01(\x08\x12\r\n\x05\x61\x66ter\x18\x0b \x03(\t\x12\x1b\n\x0ewhen_condition\x18\x0c \x01(\tH\x02\x88\x01\x01\x12O\n\x12session_parameters\x18\r \x03(\x0b\x32\x33.taskproto.CreateTaskRequest.SessionParametersEntry\x12\x1e\n\x11\x65rror_integration\x18\x0e \x01(\tH\x03\x88\x01\x01\x12-\n\rtask_sql_type\x18\x0f \x01(\x0e\x32\x16.taskproto.TaskSQLType\x12(\n\nscript_sql\x18\x10 \x01(\x0b\x32\x14.taskproto.ScriptSQL\x1a\x38\n\x16SessionParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\n\n\x08_commentB"\n _suspend_task_after_num_failuresB\x11\n\x0f_when_conditionB\x14\n\x12_error_integration"8\n\tTaskError\x12\x0c\n\x04kind\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x0c\n\x04\x63ode\x18\x03 \x01(\x05"Y\n\x12\x43reateTaskResponse\x12(\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x12\x0f\n\x07task_id\x18\x02 \x01(\x04\x42\x08\n\x06_error"I\n\x0f\x44ropTaskRequest\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\ttask_name\x18\x02 \x01(\t\x12\x10\n\x08if_exist\x18\x03 \x01(\x08"F\n\x10\x44ropTaskResponse\x12(\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x42\x08\n\x06_error":\n\x12\x45xecuteTaskRequest\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\ttask_name\x18\x02 \x01(\t"I\n\x13\x45xecuteTaskResponse\x12(\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x42\x08\n\x06_error"M\n\x13\x44\x65scribeTaskRequest\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\ttask_name\x18\x02 \x01(\t\x12\x10\n\x08if_exist\x18\x03 \x01(\x08"\x84\x06\n\x04Task\x12\x0f\n\x07task_id\x18\x01 \x01(\x04\x12\x11\n\ttask_name\x18\x02 \x01(\t\x12\x12\n\nquery_text\x18\x04 \x01(\t\x12\x14\n\x07\x63omment\x18\x05 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05owner\x18\x06 \x01(\t\x12\x34\n\x10schedule_options\x18\x07 \x01(\x0b\x32\x1a.taskproto.ScheduleOptions\x12\x36\n\x11warehouse_options\x18\x08 \x01(\x0b\x32\x1b.taskproto.WarehouseOptions\x12\x1e\n\x11next_scheduled_at\x18\t \x01(\tH\x01\x88\x01\x01\x12,\n\x1fsuspend_task_after_num_failures\x18\n \x01(\x05H\x02\x88\x01\x01\x12&\n\x06status\x18\x0c \x01(\x0e\x32\x16.taskproto.Task.Status\x12\x12\n\ncreated_at\x18\x0e \x01(\t\x12\x12\n\nupdated_at\x18\x0f \x01(\t\x12\x1e\n\x11last_suspended_at\x18\x10 \x01(\tH\x03\x88\x01\x01\x12\r\n\x05\x61\x66ter\x18\x11 \x03(\t\x12\x1b\n\x0ewhen_condition\x18\x12 \x01(\tH\x04\x88\x01\x01\x12\x42\n\x12session_parameters\x18\x13 \x03(\x0b\x32&.taskproto.Task.SessionParametersEntry\x12\x1e\n\x11\x65rror_integration\x18\x14 \x01(\tH\x05\x88\x01\x01\x1a\x38\n\x16SessionParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"$\n\x06Status\x12\r\n\tSuspended\x10\x00\x12\x0b\n\x07Started\x10\x01\x42\n\n\x08_commentB\x14\n\x12_next_scheduled_atB"\n _suspend_task_after_num_failuresB\x14\n\x12_last_suspended_atB\x11\n\x0f_when_conditionB\x14\n\x12_error_integration"i\n\x14\x44\x65scribeTaskResponse\x12\x1d\n\x04task\x18\x01 \x01(\x0b\x32\x0f.taskproto.Task\x12(\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x42\x08\n\x06_error"p\n\x10ShowTasksRequest\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tname_like\x18\x02 \x01(\t\x12\x14\n\x0cresult_limit\x18\x04 \x01(\x05\x12\x0e\n\x06owners\x18\x05 \x03(\t\x12\x10\n\x08task_ids\x18\x06 \x03(\t"g\n\x11ShowTasksResponse\x12\x1e\n\x05tasks\x18\x01 \x03(\x0b\x32\x0f.taskproto.Task\x12(\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x42\x08\n\x06_error"\xa9\x07\n\x10\x41lterTaskRequest\x12\x11\n\ttask_name\x18\x01 \x01(\t\x12\x11\n\ttenant_id\x18\x02 \x01(\t\x12\x17\n\nquery_text\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x14\n\x07\x63omment\x18\x04 \x01(\tH\x01\x88\x01\x01\x12\x42\n\x0f\x61lter_task_type\x18\x05 \x01(\x0e\x32).taskproto.AlterTaskRequest.AlterTaskType\x12\r\n\x05owner\x18\x06 \x01(\t\x12\x34\n\x10schedule_options\x18\x07 \x01(\x0b\x32\x1a.taskproto.ScheduleOptions\x12\x10\n\x08if_exist\x18\x08 \x01(\x08\x12\x36\n\x11warehouse_options\x18\t \x01(\x0b\x32\x1b.taskproto.WarehouseOptions\x12,\n\x1fsuspend_task_after_num_failures\x18\n \x01(\x05H\x02\x88\x01\x01\x12\x1b\n\x0ewhen_condition\x18\x0b \x01(\tH\x03\x88\x01\x01\x12\x11\n\tadd_after\x18\x0c \x03(\t\x12\x14\n\x0cremove_after\x18\r \x03(\t\x12\x1e\n\x16set_session_parameters\x18\x0e \x01(\x08\x12N\n\x12session_parameters\x18\x0f \x03(\x0b\x32\x32.taskproto.AlterTaskRequest.SessionParametersEntry\x12\x1e\n\x11\x65rror_integration\x18\x10 \x01(\tH\x04\x88\x01\x01\x12-\n\rtask_sql_type\x18\x11 \x01(\x0e\x32\x16.taskproto.TaskSQLType\x12(\n\nscript_sql\x18\x12 \x01(\x0b\x32\x14.taskproto.ScriptSQL\x1a\x38\n\x16SessionParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"n\n\rAlterTaskType\x12\x0b\n\x07Suspend\x10\x00\x12\n\n\x06Resume\x10\x01\x12\x07\n\x03Set\x10\x02\x12\x0c\n\x08ModifyAs\x10\x03\x12\x0e\n\nModifyWhen\x10\x04\x12\x0c\n\x08\x41\x64\x64\x41\x66ter\x10\x05\x12\x0f\n\x0bRemoveAfter\x10\x06\x42\r\n\x0b_query_textB\n\n\x08_commentB"\n _suspend_task_after_num_failuresB\x11\n\x0f_when_conditionB\x14\n\x12_error_integration"f\n\x11\x41lterTaskResponse\x12(\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x12\x1d\n\x04task\x18\x02 \x01(\x0b\x32\x0f.taskproto.TaskB\x08\n\x06_error"\xd3\x02\n\x13ShowTaskRunsRequest\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x1c\n\x14scheduled_time_start\x18\x02 \x01(\t\x12\x1a\n\x12scheduled_time_end\x18\x03 \x01(\t\x12\x14\n\x0cresult_limit\x18\x04 \x01(\x05\x12\x12\n\nerror_only\x18\x05 \x01(\x08\x12\x0e\n\x06owners\x18\x06 \x03(\t\x12\x10\n\x08task_ids\x18\x07 \x03(\t\x12\x11\n\ttask_name\x18\x08 \x01(\t\x12\x16\n\tpage_size\x18Z \x01(\x05H\x00\x88\x01\x01\x12\x1c\n\x0fnext_page_token\x18[ \x01(\x03H\x01\x88\x01\x01\x12 \n\x13previous_page_token\x18\\ \x01(\x03H\x02\x88\x01\x01\x42\x0c\n\n_page_sizeB\x12\n\x10_next_page_tokenB\x16\n\x14_previous_page_token"\xcd\x05\n\x07TaskRun\x12\x0f\n\x07task_id\x18\x01 \x01(\x04\x12\x11\n\ttask_name\x18\x02 \x01(\t\x12\x12\n\nquery_text\x18\x04 \x01(\t\x12\x14\n\x07\x63omment\x18\x05 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05owner\x18\x06 \x01(\t\x12\x34\n\x10schedule_options\x18\x07 \x01(\x0b\x32\x1a.taskproto.ScheduleOptions\x12\x0e\n\x06run_id\x18\x08 \x01(\t\x12\x16\n\x0e\x61ttempt_number\x18\t \x01(\x05\x12\x36\n\x11warehouse_options\x18\n \x01(\x0b\x32\x1b.taskproto.WarehouseOptions\x12\'\n\x05state\x18\x0b \x01(\x0e\x32\x18.taskproto.TaskRun.State\x12\x12\n\nerror_code\x18\x0c \x01(\x03\x12\x1a\n\rerror_message\x18\r \x01(\tH\x01\x88\x01\x01\x12\x16\n\x0escheduled_time\x18\x0e \x01(\t\x12\x1b\n\x0e\x63ompleted_time\x18\x10 \x01(\tH\x02\x88\x01\x01\x12\x10\n\x08query_id\x18\x11 \x01(\t\x12\x16\n\x0e\x63ondition_text\x18\x12 \x01(\t\x12\x14\n\x0croot_task_id\x18\x13 \x01(\t\x12\x45\n\x12session_parameters\x18\x14 \x03(\x0b\x32).taskproto.TaskRun.SessionParametersEntry\x1a\x38\n\x16SessionParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"O\n\x05State\x12\r\n\tSCHEDULED\x10\x00\x12\r\n\tEXECUTING\x10\x01\x12\r\n\tSUCCEEDED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tCANCELLED\x10\x04\x42\n\n\x08_commentB\x10\n\x0e_error_messageB\x11\n\x0f_completed_time"\xdd\x01\n\x14ShowTaskRunsResponse\x12%\n\ttask_runs\x18\x01 \x03(\x0b\x32\x12.taskproto.TaskRun\x12(\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x12\x1c\n\x0fnext_page_token\x18\x06 \x01(\x03H\x01\x88\x01\x01\x12 \n\x13previous_page_token\x18\x07 \x01(\x03H\x02\x88\x01\x01\x42\x08\n\x06_errorB\x12\n\x10_next_page_tokenB\x16\n\x14_previous_page_token"S\n\x18GetTaskDependentsRequest\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\ttask_name\x18\x02 \x01(\t\x12\x11\n\trecursive\x18\x03 \x01(\x08"n\n\x19GetTaskDependentsResponse\x12\x1d\n\x04task\x18\x01 \x03(\x0b\x32\x0f.taskproto.Task\x12(\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x42\x08\n\x06_error"C\n\x1b\x45nableTaskDependentsRequest\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\ttask_name\x18\x02 \x01(\t"R\n\x1c\x45nableTaskDependentsResponse\x12(\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x42\x08\n\x06_error*"\n\x0bTaskSQLType\x12\x07\n\x03SQL\x10\x00\x12\n\n\x06SCRIPT\x10\x01\x32\xe6\x05\n\x0bTaskService\x12I\n\nCreateTask\x12\x1c.taskproto.CreateTaskRequest\x1a\x1d.taskproto.CreateTaskResponse\x12O\n\x0c\x44\x65scribeTask\x12\x1e.taskproto.DescribeTaskRequest\x1a\x1f.taskproto.DescribeTaskResponse\x12L\n\x0b\x45xecuteTask\x12\x1d.taskproto.ExecuteTaskRequest\x1a\x1e.taskproto.ExecuteTaskResponse\x12\x43\n\x08\x44ropTask\x12\x1a.taskproto.DropTaskRequest\x1a\x1b.taskproto.DropTaskResponse\x12\x46\n\tAlterTask\x12\x1b.taskproto.AlterTaskRequest\x1a\x1c.taskproto.AlterTaskResponse\x12\x46\n\tShowTasks\x12\x1b.taskproto.ShowTasksRequest\x1a\x1c.taskproto.ShowTasksResponse\x12O\n\x0cShowTaskRuns\x12\x1e.taskproto.ShowTaskRunsRequest\x1a\x1f.taskproto.ShowTaskRunsResponse\x12^\n\x11GetTaskDependents\x12#.taskproto.GetTaskDependentsRequest\x1a$.taskproto.GetTaskDependentsResponse\x12g\n\x14\x45nableTaskDependents\x12&.taskproto.EnableTaskDependentsRequest\x1a\'.taskproto.EnableTaskDependentsResponseB!Z\x1f\x64\x61tabend.com/cloudcontrol/protob\x06proto3' + b'\n\ntask.proto\x12\ttaskproto"\xa7\x02\n\x0fScheduleOptions\x12\x15\n\x08interval\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x11\n\x04\x63ron\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\ttime_zone\x18\x03 \x01(\tH\x02\x88\x01\x01\x12>\n\rschedule_type\x18\x04 \x01(\x0e\x32\'.taskproto.ScheduleOptions.ScheduleType\x12"\n\x15milliseconds_interval\x18\x05 \x01(\x04H\x03\x88\x01\x01"0\n\x0cScheduleType\x12\x11\n\rinterval_type\x10\x00\x12\r\n\tcron_type\x10\x01\x42\x0b\n\t_intervalB\x07\n\x05_cronB\x0c\n\n_time_zoneB\x18\n\x16_milliseconds_interval"t\n\x10WarehouseOptions\x12\x16\n\twarehouse\x18\x01 \x01(\tH\x00\x88\x01\x01\x12!\n\x14using_warehouse_size\x18\x02 \x01(\tH\x01\x88\x01\x01\x42\x0c\n\n_warehouseB\x17\n\x15_using_warehouse_size"\x19\n\tScriptSQL\x12\x0c\n\x04sqls\x18\x01 \x03(\t"\xad\x05\n\x11\x43reateTaskRequest\x12\x11\n\ttask_name\x18\x01 \x01(\t\x12\x11\n\ttenant_id\x18\x02 \x01(\t\x12\x12\n\nquery_text\x18\x03 \x01(\t\x12\r\n\x05owner\x18\x04 \x01(\t\x12\x14\n\x07\x63omment\x18\x05 \x01(\tH\x00\x88\x01\x01\x12\x34\n\x10schedule_options\x18\x07 \x01(\x0b\x32\x1a.taskproto.ScheduleOptions\x12\x36\n\x11warehouse_options\x18\x08 \x01(\x0b\x32\x1b.taskproto.WarehouseOptions\x12,\n\x1fsuspend_task_after_num_failures\x18\t \x01(\x05H\x01\x88\x01\x01\x12\x14\n\x0cif_not_exist\x18\n \x01(\x08\x12\r\n\x05\x61\x66ter\x18\x0b \x03(\t\x12\x1b\n\x0ewhen_condition\x18\x0c \x01(\tH\x02\x88\x01\x01\x12O\n\x12session_parameters\x18\r \x03(\x0b\x32\x33.taskproto.CreateTaskRequest.SessionParametersEntry\x12\x1e\n\x11\x65rror_integration\x18\x0e \x01(\tH\x03\x88\x01\x01\x12-\n\rtask_sql_type\x18\x0f \x01(\x0e\x32\x16.taskproto.TaskSQLType\x12(\n\nscript_sql\x18\x10 \x01(\x0b\x32\x14.taskproto.ScriptSQL\x1a\x38\n\x16SessionParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\n\n\x08_commentB"\n _suspend_task_after_num_failuresB\x11\n\x0f_when_conditionB\x14\n\x12_error_integration"8\n\tTaskError\x12\x0c\n\x04kind\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x0c\n\x04\x63ode\x18\x03 \x01(\x05"Y\n\x12\x43reateTaskResponse\x12(\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x12\x0f\n\x07task_id\x18\x02 \x01(\x04\x42\x08\n\x06_error"I\n\x0f\x44ropTaskRequest\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\ttask_name\x18\x02 \x01(\t\x12\x10\n\x08if_exist\x18\x03 \x01(\x08"F\n\x10\x44ropTaskResponse\x12(\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x42\x08\n\x06_error":\n\x12\x45xecuteTaskRequest\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\ttask_name\x18\x02 \x01(\t"I\n\x13\x45xecuteTaskResponse\x12(\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x42\x08\n\x06_error"M\n\x13\x44\x65scribeTaskRequest\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\ttask_name\x18\x02 \x01(\t\x12\x10\n\x08if_exist\x18\x03 \x01(\x08"\x84\x06\n\x04Task\x12\x0f\n\x07task_id\x18\x01 \x01(\x04\x12\x11\n\ttask_name\x18\x02 \x01(\t\x12\x12\n\nquery_text\x18\x04 \x01(\t\x12\x14\n\x07\x63omment\x18\x05 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05owner\x18\x06 \x01(\t\x12\x34\n\x10schedule_options\x18\x07 \x01(\x0b\x32\x1a.taskproto.ScheduleOptions\x12\x36\n\x11warehouse_options\x18\x08 \x01(\x0b\x32\x1b.taskproto.WarehouseOptions\x12\x1e\n\x11next_scheduled_at\x18\t \x01(\tH\x01\x88\x01\x01\x12,\n\x1fsuspend_task_after_num_failures\x18\n \x01(\x05H\x02\x88\x01\x01\x12&\n\x06status\x18\x0c \x01(\x0e\x32\x16.taskproto.Task.Status\x12\x12\n\ncreated_at\x18\x0e \x01(\t\x12\x12\n\nupdated_at\x18\x0f \x01(\t\x12\x1e\n\x11last_suspended_at\x18\x10 \x01(\tH\x03\x88\x01\x01\x12\r\n\x05\x61\x66ter\x18\x11 \x03(\t\x12\x1b\n\x0ewhen_condition\x18\x12 \x01(\tH\x04\x88\x01\x01\x12\x42\n\x12session_parameters\x18\x13 \x03(\x0b\x32&.taskproto.Task.SessionParametersEntry\x12\x1e\n\x11\x65rror_integration\x18\x14 \x01(\tH\x05\x88\x01\x01\x1a\x38\n\x16SessionParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"$\n\x06Status\x12\r\n\tSuspended\x10\x00\x12\x0b\n\x07Started\x10\x01\x42\n\n\x08_commentB\x14\n\x12_next_scheduled_atB"\n _suspend_task_after_num_failuresB\x14\n\x12_last_suspended_atB\x11\n\x0f_when_conditionB\x14\n\x12_error_integration"i\n\x14\x44\x65scribeTaskResponse\x12\x1d\n\x04task\x18\x01 \x01(\x0b\x32\x0f.taskproto.Task\x12(\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x42\x08\n\x06_error"p\n\x10ShowTasksRequest\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\tname_like\x18\x02 \x01(\t\x12\x14\n\x0cresult_limit\x18\x04 \x01(\x05\x12\x0e\n\x06owners\x18\x05 \x03(\t\x12\x10\n\x08task_ids\x18\x06 \x03(\t"g\n\x11ShowTasksResponse\x12\x1e\n\x05tasks\x18\x01 \x03(\x0b\x32\x0f.taskproto.Task\x12(\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x42\x08\n\x06_error"\xa9\x07\n\x10\x41lterTaskRequest\x12\x11\n\ttask_name\x18\x01 \x01(\t\x12\x11\n\ttenant_id\x18\x02 \x01(\t\x12\x17\n\nquery_text\x18\x03 \x01(\tH\x00\x88\x01\x01\x12\x14\n\x07\x63omment\x18\x04 \x01(\tH\x01\x88\x01\x01\x12\x42\n\x0f\x61lter_task_type\x18\x05 \x01(\x0e\x32).taskproto.AlterTaskRequest.AlterTaskType\x12\r\n\x05owner\x18\x06 \x01(\t\x12\x34\n\x10schedule_options\x18\x07 \x01(\x0b\x32\x1a.taskproto.ScheduleOptions\x12\x10\n\x08if_exist\x18\x08 \x01(\x08\x12\x36\n\x11warehouse_options\x18\t \x01(\x0b\x32\x1b.taskproto.WarehouseOptions\x12,\n\x1fsuspend_task_after_num_failures\x18\n \x01(\x05H\x02\x88\x01\x01\x12\x1b\n\x0ewhen_condition\x18\x0b \x01(\tH\x03\x88\x01\x01\x12\x11\n\tadd_after\x18\x0c \x03(\t\x12\x14\n\x0cremove_after\x18\r \x03(\t\x12\x1e\n\x16set_session_parameters\x18\x0e \x01(\x08\x12N\n\x12session_parameters\x18\x0f \x03(\x0b\x32\x32.taskproto.AlterTaskRequest.SessionParametersEntry\x12\x1e\n\x11\x65rror_integration\x18\x10 \x01(\tH\x04\x88\x01\x01\x12-\n\rtask_sql_type\x18\x11 \x01(\x0e\x32\x16.taskproto.TaskSQLType\x12(\n\nscript_sql\x18\x12 \x01(\x0b\x32\x14.taskproto.ScriptSQL\x1a\x38\n\x16SessionParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"n\n\rAlterTaskType\x12\x0b\n\x07Suspend\x10\x00\x12\n\n\x06Resume\x10\x01\x12\x07\n\x03Set\x10\x02\x12\x0c\n\x08ModifyAs\x10\x03\x12\x0e\n\nModifyWhen\x10\x04\x12\x0c\n\x08\x41\x64\x64\x41\x66ter\x10\x05\x12\x0f\n\x0bRemoveAfter\x10\x06\x42\r\n\x0b_query_textB\n\n\x08_commentB"\n _suspend_task_after_num_failuresB\x11\n\x0f_when_conditionB\x14\n\x12_error_integration"f\n\x11\x41lterTaskResponse\x12(\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x12\x1d\n\x04task\x18\x02 \x01(\x0b\x32\x0f.taskproto.TaskB\x08\n\x06_error"\xd3\x02\n\x13ShowTaskRunsRequest\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x1c\n\x14scheduled_time_start\x18\x02 \x01(\t\x12\x1a\n\x12scheduled_time_end\x18\x03 \x01(\t\x12\x14\n\x0cresult_limit\x18\x04 \x01(\x05\x12\x12\n\nerror_only\x18\x05 \x01(\x08\x12\x0e\n\x06owners\x18\x06 \x03(\t\x12\x10\n\x08task_ids\x18\x07 \x03(\t\x12\x11\n\ttask_name\x18\x08 \x01(\t\x12\x16\n\tpage_size\x18Z \x01(\x05H\x00\x88\x01\x01\x12\x1c\n\x0fnext_page_token\x18[ \x01(\x03H\x01\x88\x01\x01\x12 \n\x13previous_page_token\x18\\ \x01(\x03H\x02\x88\x01\x01\x42\x0c\n\n_page_sizeB\x12\n\x10_next_page_tokenB\x16\n\x14_previous_page_token"\xcd\x05\n\x07TaskRun\x12\x0f\n\x07task_id\x18\x01 \x01(\x04\x12\x11\n\ttask_name\x18\x02 \x01(\t\x12\x12\n\nquery_text\x18\x04 \x01(\t\x12\x14\n\x07\x63omment\x18\x05 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05owner\x18\x06 \x01(\t\x12\x34\n\x10schedule_options\x18\x07 \x01(\x0b\x32\x1a.taskproto.ScheduleOptions\x12\x0e\n\x06run_id\x18\x08 \x01(\t\x12\x16\n\x0e\x61ttempt_number\x18\t \x01(\x05\x12\x36\n\x11warehouse_options\x18\n \x01(\x0b\x32\x1b.taskproto.WarehouseOptions\x12\'\n\x05state\x18\x0b \x01(\x0e\x32\x18.taskproto.TaskRun.State\x12\x12\n\nerror_code\x18\x0c \x01(\x03\x12\x1a\n\rerror_message\x18\r \x01(\tH\x01\x88\x01\x01\x12\x16\n\x0escheduled_time\x18\x0e \x01(\t\x12\x1b\n\x0e\x63ompleted_time\x18\x10 \x01(\tH\x02\x88\x01\x01\x12\x10\n\x08query_id\x18\x11 \x01(\t\x12\x16\n\x0e\x63ondition_text\x18\x12 \x01(\t\x12\x14\n\x0croot_task_id\x18\x13 \x01(\t\x12\x45\n\x12session_parameters\x18\x14 \x03(\x0b\x32).taskproto.TaskRun.SessionParametersEntry\x1a\x38\n\x16SessionParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"O\n\x05State\x12\r\n\tSCHEDULED\x10\x00\x12\r\n\tEXECUTING\x10\x01\x12\r\n\tSUCCEEDED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\r\n\tCANCELLED\x10\x04\x42\n\n\x08_commentB\x10\n\x0e_error_messageB\x11\n\x0f_completed_time"\xdd\x01\n\x14ShowTaskRunsResponse\x12%\n\ttask_runs\x18\x01 \x03(\x0b\x32\x12.taskproto.TaskRun\x12(\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x12\x1c\n\x0fnext_page_token\x18\x06 \x01(\x03H\x01\x88\x01\x01\x12 \n\x13previous_page_token\x18\x07 \x01(\x03H\x02\x88\x01\x01\x42\x08\n\x06_errorB\x12\n\x10_next_page_tokenB\x16\n\x14_previous_page_token"S\n\x18GetTaskDependentsRequest\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\ttask_name\x18\x02 \x01(\t\x12\x11\n\trecursive\x18\x03 \x01(\x08"n\n\x19GetTaskDependentsResponse\x12\x1d\n\x04task\x18\x01 \x03(\x0b\x32\x0f.taskproto.Task\x12(\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x42\x08\n\x06_error"C\n\x1b\x45nableTaskDependentsRequest\x12\x11\n\ttenant_id\x18\x01 \x01(\t\x12\x11\n\ttask_name\x18\x02 \x01(\t"R\n\x1c\x45nableTaskDependentsResponse\x12(\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x14.taskproto.TaskErrorH\x00\x88\x01\x01\x42\x08\n\x06_error*"\n\x0bTaskSQLType\x12\x07\n\x03SQL\x10\x00\x12\n\n\x06SCRIPT\x10\x01\x32\xe6\x05\n\x0bTaskService\x12I\n\nCreateTask\x12\x1c.taskproto.CreateTaskRequest\x1a\x1d.taskproto.CreateTaskResponse\x12O\n\x0c\x44\x65scribeTask\x12\x1e.taskproto.DescribeTaskRequest\x1a\x1f.taskproto.DescribeTaskResponse\x12L\n\x0b\x45xecuteTask\x12\x1d.taskproto.ExecuteTaskRequest\x1a\x1e.taskproto.ExecuteTaskResponse\x12\x43\n\x08\x44ropTask\x12\x1a.taskproto.DropTaskRequest\x1a\x1b.taskproto.DropTaskResponse\x12\x46\n\tAlterTask\x12\x1b.taskproto.AlterTaskRequest\x1a\x1c.taskproto.AlterTaskResponse\x12\x46\n\tShowTasks\x12\x1b.taskproto.ShowTasksRequest\x1a\x1c.taskproto.ShowTasksResponse\x12O\n\x0cShowTaskRuns\x12\x1e.taskproto.ShowTaskRunsRequest\x1a\x1f.taskproto.ShowTaskRunsResponse\x12^\n\x11GetTaskDependents\x12#.taskproto.GetTaskDependentsRequest\x1a$.taskproto.GetTaskDependentsResponse\x12g\n\x14\x45nableTaskDependents\x12&.taskproto.EnableTaskDependentsRequest\x1a\'.taskproto.EnableTaskDependentsResponseB!Z\x1f\x64\x61tabend.com/cloudcontrol/protob\x06proto3' ) _globals = globals() @@ -30,72 +30,72 @@ _ALTERTASKREQUEST_SESSIONPARAMETERSENTRY._serialized_options = b"8\001" _TASKRUN_SESSIONPARAMETERSENTRY._options = None _TASKRUN_SESSIONPARAMETERSENTRY._serialized_options = b"8\001" - _globals["_TASKSQLTYPE"]._serialized_start = 5385 - _globals["_TASKSQLTYPE"]._serialized_end = 5419 + _globals["_TASKSQLTYPE"]._serialized_start = 5447 + _globals["_TASKSQLTYPE"]._serialized_end = 5481 _globals["_SCHEDULEOPTIONS"]._serialized_start = 26 - _globals["_SCHEDULEOPTIONS"]._serialized_end = 259 - _globals["_SCHEDULEOPTIONS_SCHEDULETYPE"]._serialized_start = 175 - _globals["_SCHEDULEOPTIONS_SCHEDULETYPE"]._serialized_end = 223 - _globals["_WAREHOUSEOPTIONS"]._serialized_start = 261 - _globals["_WAREHOUSEOPTIONS"]._serialized_end = 377 - _globals["_SCRIPTSQL"]._serialized_start = 379 - _globals["_SCRIPTSQL"]._serialized_end = 404 - _globals["_CREATETASKREQUEST"]._serialized_start = 407 - _globals["_CREATETASKREQUEST"]._serialized_end = 1092 - _globals["_CREATETASKREQUEST_SESSIONPARAMETERSENTRY"]._serialized_start = 947 - _globals["_CREATETASKREQUEST_SESSIONPARAMETERSENTRY"]._serialized_end = 1003 - _globals["_TASKERROR"]._serialized_start = 1094 - _globals["_TASKERROR"]._serialized_end = 1150 - _globals["_CREATETASKRESPONSE"]._serialized_start = 1152 - _globals["_CREATETASKRESPONSE"]._serialized_end = 1241 - _globals["_DROPTASKREQUEST"]._serialized_start = 1243 - _globals["_DROPTASKREQUEST"]._serialized_end = 1316 - _globals["_DROPTASKRESPONSE"]._serialized_start = 1318 - _globals["_DROPTASKRESPONSE"]._serialized_end = 1388 - _globals["_EXECUTETASKREQUEST"]._serialized_start = 1390 - _globals["_EXECUTETASKREQUEST"]._serialized_end = 1448 - _globals["_EXECUTETASKRESPONSE"]._serialized_start = 1450 - _globals["_EXECUTETASKRESPONSE"]._serialized_end = 1523 - _globals["_DESCRIBETASKREQUEST"]._serialized_start = 1525 - _globals["_DESCRIBETASKREQUEST"]._serialized_end = 1602 - _globals["_TASK"]._serialized_start = 1605 - _globals["_TASK"]._serialized_end = 2377 - _globals["_TASK_SESSIONPARAMETERSENTRY"]._serialized_start = 947 - _globals["_TASK_SESSIONPARAMETERSENTRY"]._serialized_end = 1003 - _globals["_TASK_STATUS"]._serialized_start = 2208 - _globals["_TASK_STATUS"]._serialized_end = 2244 - _globals["_DESCRIBETASKRESPONSE"]._serialized_start = 2379 - _globals["_DESCRIBETASKRESPONSE"]._serialized_end = 2484 - _globals["_SHOWTASKSREQUEST"]._serialized_start = 2486 - _globals["_SHOWTASKSREQUEST"]._serialized_end = 2598 - _globals["_SHOWTASKSRESPONSE"]._serialized_start = 2600 - _globals["_SHOWTASKSRESPONSE"]._serialized_end = 2703 - _globals["_ALTERTASKREQUEST"]._serialized_start = 2706 - _globals["_ALTERTASKREQUEST"]._serialized_end = 3643 - _globals["_ALTERTASKREQUEST_SESSIONPARAMETERSENTRY"]._serialized_start = 947 - _globals["_ALTERTASKREQUEST_SESSIONPARAMETERSENTRY"]._serialized_end = 1003 - _globals["_ALTERTASKREQUEST_ALTERTASKTYPE"]._serialized_start = 3429 - _globals["_ALTERTASKREQUEST_ALTERTASKTYPE"]._serialized_end = 3539 - _globals["_ALTERTASKRESPONSE"]._serialized_start = 3645 - _globals["_ALTERTASKRESPONSE"]._serialized_end = 3747 - _globals["_SHOWTASKRUNSREQUEST"]._serialized_start = 3750 - _globals["_SHOWTASKRUNSREQUEST"]._serialized_end = 4089 - _globals["_TASKRUN"]._serialized_start = 4092 - _globals["_TASKRUN"]._serialized_end = 4809 - _globals["_TASKRUN_SESSIONPARAMETERSENTRY"]._serialized_start = 947 - _globals["_TASKRUN_SESSIONPARAMETERSENTRY"]._serialized_end = 1003 - _globals["_TASKRUN_STATE"]._serialized_start = 4681 - _globals["_TASKRUN_STATE"]._serialized_end = 4760 - _globals["_SHOWTASKRUNSRESPONSE"]._serialized_start = 4812 - _globals["_SHOWTASKRUNSRESPONSE"]._serialized_end = 5033 - _globals["_GETTASKDEPENDENTSREQUEST"]._serialized_start = 5035 - _globals["_GETTASKDEPENDENTSREQUEST"]._serialized_end = 5118 - _globals["_GETTASKDEPENDENTSRESPONSE"]._serialized_start = 5120 - _globals["_GETTASKDEPENDENTSRESPONSE"]._serialized_end = 5230 - _globals["_ENABLETASKDEPENDENTSREQUEST"]._serialized_start = 5232 - _globals["_ENABLETASKDEPENDENTSREQUEST"]._serialized_end = 5299 - _globals["_ENABLETASKDEPENDENTSRESPONSE"]._serialized_start = 5301 - _globals["_ENABLETASKDEPENDENTSRESPONSE"]._serialized_end = 5383 - _globals["_TASKSERVICE"]._serialized_start = 5422 - _globals["_TASKSERVICE"]._serialized_end = 6164 + _globals["_SCHEDULEOPTIONS"]._serialized_end = 321 + _globals["_SCHEDULEOPTIONS_SCHEDULETYPE"]._serialized_start = 211 + _globals["_SCHEDULEOPTIONS_SCHEDULETYPE"]._serialized_end = 259 + _globals["_WAREHOUSEOPTIONS"]._serialized_start = 323 + _globals["_WAREHOUSEOPTIONS"]._serialized_end = 439 + _globals["_SCRIPTSQL"]._serialized_start = 441 + _globals["_SCRIPTSQL"]._serialized_end = 466 + _globals["_CREATETASKREQUEST"]._serialized_start = 469 + _globals["_CREATETASKREQUEST"]._serialized_end = 1154 + _globals["_CREATETASKREQUEST_SESSIONPARAMETERSENTRY"]._serialized_start = 1009 + _globals["_CREATETASKREQUEST_SESSIONPARAMETERSENTRY"]._serialized_end = 1065 + _globals["_TASKERROR"]._serialized_start = 1156 + _globals["_TASKERROR"]._serialized_end = 1212 + _globals["_CREATETASKRESPONSE"]._serialized_start = 1214 + _globals["_CREATETASKRESPONSE"]._serialized_end = 1303 + _globals["_DROPTASKREQUEST"]._serialized_start = 1305 + _globals["_DROPTASKREQUEST"]._serialized_end = 1378 + _globals["_DROPTASKRESPONSE"]._serialized_start = 1380 + _globals["_DROPTASKRESPONSE"]._serialized_end = 1450 + _globals["_EXECUTETASKREQUEST"]._serialized_start = 1452 + _globals["_EXECUTETASKREQUEST"]._serialized_end = 1510 + _globals["_EXECUTETASKRESPONSE"]._serialized_start = 1512 + _globals["_EXECUTETASKRESPONSE"]._serialized_end = 1585 + _globals["_DESCRIBETASKREQUEST"]._serialized_start = 1587 + _globals["_DESCRIBETASKREQUEST"]._serialized_end = 1664 + _globals["_TASK"]._serialized_start = 1667 + _globals["_TASK"]._serialized_end = 2439 + _globals["_TASK_SESSIONPARAMETERSENTRY"]._serialized_start = 1009 + _globals["_TASK_SESSIONPARAMETERSENTRY"]._serialized_end = 1065 + _globals["_TASK_STATUS"]._serialized_start = 2270 + _globals["_TASK_STATUS"]._serialized_end = 2306 + _globals["_DESCRIBETASKRESPONSE"]._serialized_start = 2441 + _globals["_DESCRIBETASKRESPONSE"]._serialized_end = 2546 + _globals["_SHOWTASKSREQUEST"]._serialized_start = 2548 + _globals["_SHOWTASKSREQUEST"]._serialized_end = 2660 + _globals["_SHOWTASKSRESPONSE"]._serialized_start = 2662 + _globals["_SHOWTASKSRESPONSE"]._serialized_end = 2765 + _globals["_ALTERTASKREQUEST"]._serialized_start = 2768 + _globals["_ALTERTASKREQUEST"]._serialized_end = 3705 + _globals["_ALTERTASKREQUEST_SESSIONPARAMETERSENTRY"]._serialized_start = 1009 + _globals["_ALTERTASKREQUEST_SESSIONPARAMETERSENTRY"]._serialized_end = 1065 + _globals["_ALTERTASKREQUEST_ALTERTASKTYPE"]._serialized_start = 3491 + _globals["_ALTERTASKREQUEST_ALTERTASKTYPE"]._serialized_end = 3601 + _globals["_ALTERTASKRESPONSE"]._serialized_start = 3707 + _globals["_ALTERTASKRESPONSE"]._serialized_end = 3809 + _globals["_SHOWTASKRUNSREQUEST"]._serialized_start = 3812 + _globals["_SHOWTASKRUNSREQUEST"]._serialized_end = 4151 + _globals["_TASKRUN"]._serialized_start = 4154 + _globals["_TASKRUN"]._serialized_end = 4871 + _globals["_TASKRUN_SESSIONPARAMETERSENTRY"]._serialized_start = 1009 + _globals["_TASKRUN_SESSIONPARAMETERSENTRY"]._serialized_end = 1065 + _globals["_TASKRUN_STATE"]._serialized_start = 4743 + _globals["_TASKRUN_STATE"]._serialized_end = 4822 + _globals["_SHOWTASKRUNSRESPONSE"]._serialized_start = 4874 + _globals["_SHOWTASKRUNSRESPONSE"]._serialized_end = 5095 + _globals["_GETTASKDEPENDENTSREQUEST"]._serialized_start = 5097 + _globals["_GETTASKDEPENDENTSREQUEST"]._serialized_end = 5180 + _globals["_GETTASKDEPENDENTSRESPONSE"]._serialized_start = 5182 + _globals["_GETTASKDEPENDENTSRESPONSE"]._serialized_end = 5292 + _globals["_ENABLETASKDEPENDENTSREQUEST"]._serialized_start = 5294 + _globals["_ENABLETASKDEPENDENTSREQUEST"]._serialized_end = 5361 + _globals["_ENABLETASKDEPENDENTSRESPONSE"]._serialized_start = 5363 + _globals["_ENABLETASKDEPENDENTSRESPONSE"]._serialized_end = 5445 + _globals["_TASKSERVICE"]._serialized_start = 5484 + _globals["_TASKSERVICE"]._serialized_end = 6226 # @@protoc_insertion_point(module_scope) diff --git a/tests/sqllogictests/suites/base/05_ddl/05_0017_ddl_grant_role.test b/tests/sqllogictests/suites/base/05_ddl/05_0017_ddl_grant_role.test index 043ee83087a15..818024a12bf3f 100644 --- a/tests/sqllogictests/suites/base/05_ddl/05_0017_ddl_grant_role.test +++ b/tests/sqllogictests/suites/base/05_ddl/05_0017_ddl_grant_role.test @@ -92,6 +92,72 @@ DROP ROLE `test-role` statement ok DROP USER 'test-user' +statement ok +create or replace table t(id int); + +statement ok +create or replace database db1; + +statement ok +create or replace stage s1; + +statement ok +create or replace FUNCTION isnotempty AS(p) -> not(is_null(p)); + +statement ok +grant select on default.t to role role3; + +statement ok +grant all on db1.* to role role2; + +statement ok +grant all on db1.* to role role3; + +statement ok +grant read on stage s1 to role role2; + +statement ok +drop user if exists u_05_0017; + +statement ok +create user u_05_0017 identified by '123'; + +statement ok +grant usage on udf isnotempty to u_05_0017; + +statement ok +grant usage on udf isnotempty to role role2; + +statement ok +grant usage on udf isnotempty to role role3; + +query TT +show grants on udf isnotempty; +---- +USAGE isnotempty NULL ROLE role2 (empty) +USAGE isnotempty NULL ROLE role3 (empty) + +query TT +show grants on udf isnotempty where name!='role2' limit 1; +---- +USAGE isnotempty NULL ROLE role3 (empty) + +query TT +show grants on stage s1; +---- +Read s1 NULL ROLE role2 (empty) + +query TT +select * EXCLUDE(object_id) from show_grants('database', 'db1', 'default'); +---- +CREATE,SELECT,INSERT,UPDATE,DELETE,DROP,ALTER,GRANT db1 ROLE role2 (empty) +CREATE,SELECT,INSERT,UPDATE,DELETE,DROP,ALTER,GRANT db1 ROLE role3 (empty) + +query TT +select * EXCLUDE(object_id) from show_grants('table', 't', 'default', 'default'); +---- +SELECT t ROLE role3 (empty) + statement ok DROP ROLE role1 @@ -101,3 +167,17 @@ DROP ROLE role2 statement ok DROP ROLE role3 +statement ok +drop stage if exists s1; + +statement ok +drop database if exists db1; + +statement ok +drop table if exists t; + +statement ok +drop function if exists isnotempty; + +statement ok +drop user if exists u_05_0017; diff --git a/tests/sqllogictests/suites/base/09_fuse_engine/09_0036_merge_into_without_distributed_enable.test b/tests/sqllogictests/suites/base/09_fuse_engine/09_0036_merge_into_without_distributed_enable.test index 9c79ed3974cfa..e6dd6b1668e8b 100644 --- a/tests/sqllogictests/suites/base/09_fuse_engine/09_0036_merge_into_without_distributed_enable.test +++ b/tests/sqllogictests/suites/base/09_fuse_engine/09_0036_merge_into_without_distributed_enable.test @@ -144,7 +144,7 @@ select * from t1 order by a,b,c; 2 b_1 c_1 3 b_2 c_2 -statement error 1065 +statement error 1005 merge into t1 using (select * from t2 ) as t2 on t1.a = t2.a when matched then delete when matched then update set t1.c = t2.c when not matched and t2.c = 'c_8' then insert (a,b,c) values(t2.a,t2.b,t2.c); query TTT diff --git a/tests/sqllogictests/suites/duckdb/issues/general/test_4008.test b/tests/sqllogictests/suites/duckdb/issues/general/test_4008.test index bb992493cb852..52cf94270222b 100644 --- a/tests/sqllogictests/suites/duckdb/issues/general/test_4008.test +++ b/tests/sqllogictests/suites/duckdb/issues/general/test_4008.test @@ -6,7 +6,7 @@ select 1 = all(select 1) ---- 1 -statement error 1002 +statement error 1005 select 1 where 2 + all(select 2) statement ok diff --git a/tests/sqllogictests/suites/mode/cluster/create_table.test b/tests/sqllogictests/suites/mode/cluster/create_table.test new file mode 100644 index 0000000000000..ace5b3c2b23d6 --- /dev/null +++ b/tests/sqllogictests/suites/mode/cluster/create_table.test @@ -0,0 +1,44 @@ +query T +explain create or replace table t2 as select number % 400 d, max(number) from numbers(10000000) group by number limit 3; +---- +CreateTableAsSelect: +(empty) +EvalScalar +├── output columns: [max(number) (#6), d (#7)] +├── expressions: [numbers.number (#4) % 400] +├── estimated rows: 3.00 +└── Limit + ├── output columns: [max(number) (#6), numbers.number (#4)] + ├── limit: 3 + ├── offset: 0 + ├── estimated rows: 3.00 + └── Exchange + ├── output columns: [max(number) (#6), numbers.number (#4)] + ├── exchange type: Merge + └── Limit + ├── output columns: [max(number) (#6), numbers.number (#4)] + ├── limit: 3 + ├── offset: 0 + ├── estimated rows: 3.00 + └── AggregateFinal + ├── output columns: [max(number) (#6), numbers.number (#4)] + ├── group by: [number] + ├── aggregate functions: [max(number)] + ├── limit: 3 + ├── estimated rows: 10000000.00 + └── Exchange + ├── output columns: [max(number) (#6), numbers.number (#4)] + ├── exchange type: Hash(0) + └── AggregatePartial + ├── group by: [number] + ├── aggregate functions: [max(number)] + ├── estimated rows: 10000000.00 + └── TableScan + ├── table: default.system.numbers + ├── output columns: [number (#4)] + ├── read rows: 10000000 + ├── read size: 76.29 MiB + ├── partitions total: 153 + ├── partitions scanned: 153 + ├── push downs: [filters: [], limit: NONE] + └── estimated rows: 10000000.00 diff --git a/tests/sqllogictests/suites/mode/cluster/distributed_copy_into_table.test b/tests/sqllogictests/suites/mode/cluster/distributed_copy_into_table.test index 94b9a194d4d2e..80172fe09feb1 100644 --- a/tests/sqllogictests/suites/mode/cluster/distributed_copy_into_table.test +++ b/tests/sqllogictests/suites/mode/cluster/distributed_copy_into_table.test @@ -1,26 +1,12 @@ statement ok set enable_distributed_copy_into = 1; -statement ok -drop table if exists test_order; - -statement ok -drop table if exists random_source; - -statement ok -drop stage if exists test_stage; - -statement ok -drop table if exists parquet_table; - -statement ok -drop stage if exists parquet_stage; statement ok -create stage st FILE_FORMAT = (TYPE = CSV); +create or replace stage st FILE_FORMAT = (TYPE = CSV); statement ok -create table table_random(a int not null,b string not null,c string not null) ENGINE = Random; +create or replace table table_random(a int not null,b string not null,c string not null) ENGINE = Random; statement ok copy into @st from (select a,b,c from table_random limit 1000000); @@ -47,7 +33,7 @@ statement ok copy into @st from (select a,b,c from table_random limit 1000000); statement ok -create table t(a int not null,b string not null,c string not null); +create or replace table t(a int not null,b string not null,c string not null); statement ok copy into t from @st force = true; @@ -74,10 +60,10 @@ statement ok set enable_distributed_copy_into = 1; statement ok -create table t_query(a int not null,b string not null,c string not null); +create or replace table t_query(a int not null,b string not null,c string not null); statement ok -create stage st_query FILE_FORMAT = (TYPE = TSV); +create or replace stage st_query FILE_FORMAT = (TYPE = TSV); statement ok copy into @st_query from (select a,b,c from table_random limit 1000000); @@ -100,10 +86,10 @@ select count(*) from t_query; ## add parquet_file_test statement ok -create table parquet_table(a int not null,b string not null,c string not null); +create or replace table parquet_table(a int not null,b string not null,c string not null); statement ok -create stage parquet_stage file_format = (type = parquet); +create or replace stage parquet_stage file_format = (type = parquet); statement ok copy into @parquet_stage from (select a,b,c from table_random limit 100000); @@ -148,10 +134,10 @@ select count(*) from parquet_table; # make sure it's distributed. statement ok -create table t_query2(a int not null,b string not null,c string not null); +create or replace table t_query2(a int not null,b string not null,c string not null); statement ok -create stage st_query2 FILE_FORMAT = (TYPE = TSV); +create or replace stage st_query2 FILE_FORMAT = (TYPE = TSV); statement ok copy into @st_query2 from (select a,b,c from table_random limit 10); @@ -178,13 +164,13 @@ select block_count from fuse_snapshot('default','t_query2') limit 1; #test cluster key statement ok -create table test_order(a int not null,b string not null,c timestamp not null) cluster by(to_yyyymmdd(c),a); +create or replace table test_order(a int not null,b string not null,c timestamp not null) cluster by(to_yyyymmdd(c),a); statement ok -create table random_source like test_order Engine = Random; +create or replace table random_source like test_order Engine = Random; statement ok -create stage test_stage; +create or replace stage test_stage; statement ok copy into @test_stage from (select * from random_source limit 4000000) FILE_FORMAT=(type=parquet); diff --git a/tests/sqllogictests/suites/mode/cluster/window.test b/tests/sqllogictests/suites/mode/cluster/window.test index 03aa3b6126382..bd91c38721587 100644 --- a/tests/sqllogictests/suites/mode/cluster/window.test +++ b/tests/sqllogictests/suites/mode/cluster/window.test @@ -81,39 +81,47 @@ Window ├── partition by: [department_id] ├── order by: [salary] ├── frame: [Range: Preceding(None) ~ CurrentRow] -└── Exchange +└── Sort ├── output columns: [e.name (#1), e.salary (#3), d.department_name (#5), d.department_id (#4)] - ├── exchange type: Merge - └── HashJoin - ├── output columns: [e.name (#1), e.salary (#3), d.department_name (#5), d.department_id (#4)] - ├── join type: INNER - ├── build keys: [d.department_id (#4)] - ├── probe keys: [e.department_id (#2)] - ├── filters: [] - ├── estimated rows: 8.00 - ├── Exchange(Build) - │ ├── output columns: [d.department_id (#4), d.department_name (#5)] - │ ├── exchange type: Broadcast - │ └── TableScan - │ ├── table: default.default.departments - │ ├── output columns: [department_id (#4), department_name (#5)] - │ ├── read rows: 4 - │ ├── read size: < 1 KiB - │ ├── partitions total: 1 - │ ├── partitions scanned: 1 - │ ├── pruning stats: [segments: , blocks: ] - │ ├── push downs: [filters: [], limit: NONE] - │ └── estimated rows: 4.00 - └── TableScan(Probe) - ├── table: default.default.employees - ├── output columns: [name (#1), department_id (#2), salary (#3)] - ├── read rows: 10 - ├── read size: < 1 KiB - ├── partitions total: 1 - ├── partitions scanned: 1 - ├── pruning stats: [segments: , blocks: ] - ├── push downs: [filters: [], limit: NONE] - └── estimated rows: 10.00 + ├── sort keys: [department_id ASC NULLS LAST, salary DESC NULLS LAST] + ├── estimated rows: 8.00 + └── Exchange + ├── output columns: [e.name (#1), e.salary (#3), d.department_name (#5), d.department_id (#4), #_order_col] + ├── exchange type: Merge + └── Sort + ├── output columns: [e.name (#1), e.salary (#3), d.department_name (#5), d.department_id (#4), #_order_col] + ├── sort keys: [department_id ASC NULLS LAST, salary DESC NULLS LAST] + ├── estimated rows: 8.00 + └── HashJoin + ├── output columns: [e.name (#1), e.salary (#3), d.department_name (#5), d.department_id (#4)] + ├── join type: INNER + ├── build keys: [d.department_id (#4)] + ├── probe keys: [e.department_id (#2)] + ├── filters: [] + ├── estimated rows: 8.00 + ├── Exchange(Build) + │ ├── output columns: [d.department_id (#4), d.department_name (#5)] + │ ├── exchange type: Broadcast + │ └── TableScan + │ ├── table: default.default.departments + │ ├── output columns: [department_id (#4), department_name (#5)] + │ ├── read rows: 4 + │ ├── read size: < 1 KiB + │ ├── partitions total: 1 + │ ├── partitions scanned: 1 + │ ├── pruning stats: [segments: , blocks: ] + │ ├── push downs: [filters: [], limit: NONE] + │ └── estimated rows: 4.00 + └── TableScan(Probe) + ├── table: default.default.employees + ├── output columns: [name (#1), department_id (#2), salary (#3)] + ├── read rows: 10 + ├── read size: < 1 KiB + ├── partitions total: 1 + ├── partitions scanned: 1 + ├── pruning stats: [segments: , blocks: ] + ├── push downs: [filters: [], limit: NONE] + └── estimated rows: 10.00 statement ok DROP TABLE employees; diff --git a/tests/sqllogictests/suites/mode/standalone/explain/window.test b/tests/sqllogictests/suites/mode/standalone/explain/window.test index a2fdafddcaca0..2fdefccd6b07b 100644 --- a/tests/sqllogictests/suites/mode/standalone/explain/window.test +++ b/tests/sqllogictests/suites/mode/standalone/explain/window.test @@ -13,16 +13,16 @@ CREATE TABLE empsalary (depname string, empno bigint, salary int, enroll_date da query T explain SELECT depname, empno, salary, sum(salary) OVER (PARTITION BY depname ORDER BY empno) FROM empsalary ORDER BY depname, empno ---- -Sort +Window ├── output columns: [empsalary.depname (#0), empsalary.empno (#1), empsalary.salary (#2), sum(salary) OVER ( PARTITION BY depname ORDER BY empno ) (#4)] -├── sort keys: [depname ASC NULLS LAST, empno ASC NULLS LAST] -├── estimated rows: 0.00 -└── Window - ├── output columns: [empsalary.depname (#0), empsalary.empno (#1), empsalary.salary (#2), sum(salary) OVER ( PARTITION BY depname ORDER BY empno ) (#4)] - ├── aggregate function: [sum(salary)] - ├── partition by: [depname] - ├── order by: [empno] - ├── frame: [Range: Preceding(None) ~ CurrentRow] +├── aggregate function: [sum(salary)] +├── partition by: [depname] +├── order by: [empno] +├── frame: [Range: Preceding(None) ~ CurrentRow] +└── Sort + ├── output columns: [empsalary.depname (#0), empsalary.empno (#1), empsalary.salary (#2)] + ├── sort keys: [depname ASC NULLS LAST, empno ASC NULLS LAST] + ├── estimated rows: 0.00 └── TableScan ├── table: default.test_explain_window.empsalary ├── output columns: [depname (#0), empno (#1), salary (#2)] @@ -44,17 +44,13 @@ query T explain pipeline SELECT depname, empno, salary, sum(salary) OVER (PARTITION BY depname ORDER BY empno) FROM empsalary ORDER BY depname, empno; ---- CompoundBlockOperator(Project) × 1 processor - Merge (TransformSortMerge × 4 processors) to (CompoundBlockOperator(Project) × 1) - TransformSortMerge × 4 processors - SortPartialTransform × 4 processors - Merge (Transform Window × 1 processor) to (SortPartialTransform × 4) - Transform Window × 1 processor - Merge (TransformSortMerge × 4 processors) to (Transform Window × 1) - TransformSortMerge × 4 processors - SortPartialTransform × 4 processors - Merge (DeserializeDataTransform × 1 processor) to (SortPartialTransform × 4) - DeserializeDataTransform × 1 processor - SyncReadParquetDataSource × 1 processor + Transform Window × 1 processor + Merge (TransformSortMerge × 4 processors) to (Transform Window × 1) + TransformSortMerge × 4 processors + SortPartialTransform × 4 processors + Merge (DeserializeDataTransform × 1 processor) to (SortPartialTransform × 4) + DeserializeDataTransform × 1 processor + SyncReadParquetDataSource × 1 processor # Enable sort spilling @@ -65,19 +61,14 @@ query T explain pipeline SELECT depname, empno, salary, sum(salary) OVER (PARTITION BY depname ORDER BY empno) FROM empsalary ORDER BY depname, empno; ---- CompoundBlockOperator(Project) × 1 processor - Merge (TransformSortSpill × 4 processors) to (CompoundBlockOperator(Project) × 1) - TransformSortSpill × 4 processors - TransformSortMerge × 4 processors - SortPartialTransform × 4 processors - Merge (Transform Window × 1 processor) to (SortPartialTransform × 4) - Transform Window × 1 processor - Merge (TransformSortSpill × 4 processors) to (Transform Window × 1) - TransformSortSpill × 4 processors - TransformSortMerge × 4 processors - SortPartialTransform × 4 processors - Merge (DeserializeDataTransform × 1 processor) to (SortPartialTransform × 4) - DeserializeDataTransform × 1 processor - SyncReadParquetDataSource × 1 processor + Transform Window × 1 processor + Merge (TransformSortSpill × 4 processors) to (Transform Window × 1) + TransformSortSpill × 4 processors + TransformSortMerge × 4 processors + SortPartialTransform × 4 processors + Merge (DeserializeDataTransform × 1 processor) to (SortPartialTransform × 4) + DeserializeDataTransform × 1 processor + SyncReadParquetDataSource × 1 processor statement ok @@ -100,35 +91,39 @@ Filter ├── partition by: [k] ├── order by: [v] ├── frame: [Range: Preceding(None) ~ CurrentRow] - └── UnionAll + └── Sort ├── output columns: [test.k (#0), test.v (#1)] + ├── sort keys: [k ASC NULLS LAST, v DESC NULLS LAST] ├── estimated rows: 0.00 - ├── Filter - │ ├── output columns: [test.k (#0), test.v (#1)] - │ ├── filters: [is_true(test.k (#0) = 12)] - │ ├── estimated rows: 0.00 - │ └── TableScan - │ ├── table: default.test_explain_window.test - │ ├── output columns: [k (#0), v (#1)] - │ ├── read rows: 0 - │ ├── read size: 0 - │ ├── partitions total: 0 - │ ├── partitions scanned: 0 - │ ├── push downs: [filters: [is_true(test.k (#0) = 12)], limit: NONE] - │ └── estimated rows: 0.00 - └── Filter - ├── output columns: [test.k (#2), test.v (#3)] - ├── filters: [is_true(test.k (#2) = 12)] + └── UnionAll + ├── output columns: [test.k (#0), test.v (#1)] ├── estimated rows: 0.00 - └── TableScan - ├── table: default.test_explain_window.test - ├── output columns: [k (#2), v (#3)] - ├── read rows: 0 - ├── read size: 0 - ├── partitions total: 0 - ├── partitions scanned: 0 - ├── push downs: [filters: [is_true(test.k (#2) = 12)], limit: NONE] - └── estimated rows: 0.00 + ├── Filter + │ ├── output columns: [test.k (#0), test.v (#1)] + │ ├── filters: [is_true(test.k (#0) = 12)] + │ ├── estimated rows: 0.00 + │ └── TableScan + │ ├── table: default.test_explain_window.test + │ ├── output columns: [k (#0), v (#1)] + │ ├── read rows: 0 + │ ├── read size: 0 + │ ├── partitions total: 0 + │ ├── partitions scanned: 0 + │ ├── push downs: [filters: [is_true(test.k (#0) = 12)], limit: NONE] + │ └── estimated rows: 0.00 + └── Filter + ├── output columns: [test.k (#2), test.v (#3)] + ├── filters: [is_true(test.k (#2) = 12)] + ├── estimated rows: 0.00 + └── TableScan + ├── table: default.test_explain_window.test + ├── output columns: [k (#2), v (#3)] + ├── read rows: 0 + ├── read size: 0 + ├── partitions total: 0 + ├── partitions scanned: 0 + ├── push downs: [filters: [is_true(test.k (#2) = 12)], limit: NONE] + └── estimated rows: 0.00 # cannot push down filter in window function query T @@ -144,27 +139,31 @@ Filter ├── partition by: [v] ├── order by: [v] ├── frame: [Range: Preceding(None) ~ CurrentRow] - └── UnionAll + └── Sort ├── output columns: [test.k (#0), test.v (#1)] + ├── sort keys: [v ASC NULLS LAST, v DESC NULLS LAST] ├── estimated rows: 0.00 - ├── TableScan - │ ├── table: default.test_explain_window.test - │ ├── output columns: [k (#0), v (#1)] - │ ├── read rows: 0 - │ ├── read size: 0 - │ ├── partitions total: 0 - │ ├── partitions scanned: 0 - │ ├── push downs: [filters: [], limit: NONE] - │ └── estimated rows: 0.00 - └── TableScan - ├── table: default.test_explain_window.test - ├── output columns: [k (#2), v (#3)] - ├── read rows: 0 - ├── read size: 0 - ├── partitions total: 0 - ├── partitions scanned: 0 - ├── push downs: [filters: [], limit: NONE] - └── estimated rows: 0.00 + └── UnionAll + ├── output columns: [test.k (#0), test.v (#1)] + ├── estimated rows: 0.00 + ├── TableScan + │ ├── table: default.test_explain_window.test + │ ├── output columns: [k (#0), v (#1)] + │ ├── read rows: 0 + │ ├── read size: 0 + │ ├── partitions total: 0 + │ ├── partitions scanned: 0 + │ ├── push downs: [filters: [], limit: NONE] + │ └── estimated rows: 0.00 + └── TableScan + ├── table: default.test_explain_window.test + ├── output columns: [k (#2), v (#3)] + ├── read rows: 0 + ├── read size: 0 + ├── partitions total: 0 + ├── partitions scanned: 0 + ├── push downs: [filters: [], limit: NONE] + └── estimated rows: 0.00 # cannot push down filter in window function query T @@ -180,27 +179,31 @@ Filter ├── partition by: [] ├── order by: [v] ├── frame: [Range: Preceding(None) ~ CurrentRow] - └── UnionAll + └── Sort ├── output columns: [test.k (#0), test.v (#1)] + ├── sort keys: [v DESC NULLS LAST] ├── estimated rows: 0.00 - ├── TableScan - │ ├── table: default.test_explain_window.test - │ ├── output columns: [k (#0), v (#1)] - │ ├── read rows: 0 - │ ├── read size: 0 - │ ├── partitions total: 0 - │ ├── partitions scanned: 0 - │ ├── push downs: [filters: [], limit: NONE] - │ └── estimated rows: 0.00 - └── TableScan - ├── table: default.test_explain_window.test - ├── output columns: [k (#2), v (#3)] - ├── read rows: 0 - ├── read size: 0 - ├── partitions total: 0 - ├── partitions scanned: 0 - ├── push downs: [filters: [], limit: NONE] - └── estimated rows: 0.00 + └── UnionAll + ├── output columns: [test.k (#0), test.v (#1)] + ├── estimated rows: 0.00 + ├── TableScan + │ ├── table: default.test_explain_window.test + │ ├── output columns: [k (#0), v (#1)] + │ ├── read rows: 0 + │ ├── read size: 0 + │ ├── partitions total: 0 + │ ├── partitions scanned: 0 + │ ├── push downs: [filters: [], limit: NONE] + │ └── estimated rows: 0.00 + └── TableScan + ├── table: default.test_explain_window.test + ├── output columns: [k (#2), v (#3)] + ├── read rows: 0 + ├── read size: 0 + ├── partitions total: 0 + ├── partitions scanned: 0 + ├── push downs: [filters: [], limit: NONE] + └── estimated rows: 0.00 statement ok drop table if exists t @@ -221,15 +224,19 @@ Filter ├── partition by: [a] ├── order by: [] ├── frame: [Range: Preceding(None) ~ Following(None)] - └── TableScan - ├── table: default.test_explain_window.t - ├── output columns: [a (#0)] - ├── read rows: 0 - ├── read size: 0 - ├── partitions total: 0 - ├── partitions scanned: 0 - ├── push downs: [filters: [], limit: NONE] - └── estimated rows: 0.00 + └── Sort + ├── output columns: [t.a (#0)] + ├── sort keys: [a ASC NULLS LAST] + ├── estimated rows: 0.00 + └── TableScan + ├── table: default.test_explain_window.t + ├── output columns: [a (#0)] + ├── read rows: 0 + ├── read size: 0 + ├── partitions total: 0 + ├── partitions scanned: 0 + ├── push downs: [filters: [], limit: NONE] + └── estimated rows: 0.00 ## example from: https://community.snowflake.com/s/article/Pushdown-or-Not-Pushdown statement ok @@ -253,19 +260,23 @@ Window ├── partition by: [] ├── order by: [b] ├── frame: [Range: Preceding(None) ~ CurrentRow] -└── Filter +└── Sort ├── output columns: [tbpush.b (#0)] - ├── filters: [is_true(tbpush.b (#0) > 3)] + ├── sort keys: [b ASC NULLS LAST] ├── estimated rows: 0.00 - └── TableScan - ├── table: default.test_explain_window.tbpush - ├── output columns: [b (#0)] - ├── read rows: 0 - ├── read size: 0 - ├── partitions total: 0 - ├── partitions scanned: 0 - ├── push downs: [filters: [is_true(tbpush.b (#0) > 3)], limit: NONE] - └── estimated rows: 0.00 + └── Filter + ├── output columns: [tbpush.b (#0)] + ├── filters: [is_true(tbpush.b (#0) > 3)] + ├── estimated rows: 0.00 + └── TableScan + ├── table: default.test_explain_window.tbpush + ├── output columns: [b (#0)] + ├── read rows: 0 + ├── read size: 0 + ├── partitions total: 0 + ├── partitions scanned: 0 + ├── push downs: [filters: [is_true(tbpush.b (#0) > 3)], limit: NONE] + └── estimated rows: 0.00 query T explain select * from vwpush where b > 3; @@ -280,15 +291,19 @@ Filter ├── partition by: [] ├── order by: [b] ├── frame: [Range: Preceding(None) ~ CurrentRow] - └── TableScan - ├── table: default.test_explain_window.tbpush - ├── output columns: [b (#0)] - ├── read rows: 0 - ├── read size: 0 - ├── partitions total: 0 - ├── partitions scanned: 0 - ├── push downs: [filters: [], limit: NONE] - └── estimated rows: 0.00 + └── Sort + ├── output columns: [tbpush.b (#0)] + ├── sort keys: [b ASC NULLS LAST] + ├── estimated rows: 0.00 + └── TableScan + ├── table: default.test_explain_window.tbpush + ├── output columns: [b (#0)] + ├── read rows: 0 + ├── read size: 0 + ├── partitions total: 0 + ├── partitions scanned: 0 + ├── push downs: [filters: [], limit: NONE] + └── estimated rows: 0.00 query T explain select * from (select b, row_number() over (order by b) from tbpush) where b > 3; @@ -303,15 +318,19 @@ Filter ├── partition by: [] ├── order by: [b] ├── frame: [Range: Preceding(None) ~ CurrentRow] - └── TableScan - ├── table: default.test_explain_window.tbpush - ├── output columns: [b (#0)] - ├── read rows: 0 - ├── read size: 0 - ├── partitions total: 0 - ├── partitions scanned: 0 - ├── push downs: [filters: [], limit: NONE] - └── estimated rows: 0.00 + └── Sort + ├── output columns: [tbpush.b (#0)] + ├── sort keys: [b ASC NULLS LAST] + ├── estimated rows: 0.00 + └── TableScan + ├── table: default.test_explain_window.tbpush + ├── output columns: [b (#0)] + ├── read rows: 0 + ├── read size: 0 + ├── partitions total: 0 + ├── partitions scanned: 0 + ├── push downs: [filters: [], limit: NONE] + └── estimated rows: 0.00 # test push down limit to window function statement ok @@ -370,8 +389,8 @@ explain pipeline select a, dense_rank() over (partition by a order by a desc) fr CompoundBlockOperator(Project) × 1 processor LimitTransform × 1 processor Transform Window × 1 processor - Merge (TransformSortMergeLimit × 4 processors) to (Transform Window × 1) - TransformSortMergeLimit × 4 processors + Merge (TransformSortMerge × 4 processors) to (Transform Window × 1) + TransformSortMerge × 4 processors SortPartialTransform × 4 processors Merge (DeserializeDataTransform × 1 processor) to (SortPartialTransform × 4) DeserializeDataTransform × 1 processor @@ -384,8 +403,8 @@ explain pipeline select a, sum(a) over (partition by a order by a desc rows betw CompoundBlockOperator(Project) × 1 processor LimitTransform × 1 processor Transform Window × 1 processor - Merge (TransformSortMergeLimit × 4 processors) to (Transform Window × 1) - TransformSortMergeLimit × 4 processors + Merge (TransformSortMerge × 4 processors) to (Transform Window × 1) + TransformSortMerge × 4 processors SortPartialTransform × 4 processors Merge (DeserializeDataTransform × 1 processor) to (SortPartialTransform × 4) DeserializeDataTransform × 1 processor @@ -469,19 +488,23 @@ RowFetch ├── partition by: [a] ├── order by: [a] ├── frame: [Rows: Preceding(None) ~ CurrentRow] - └── Filter + └── Sort ├── output columns: [t.a (#0), t.b (#1), t._row_id (#7)] - ├── filters: [is_true(t.a (#0) > 1)] + ├── sort keys: [a ASC NULLS LAST, a DESC NULLS LAST] ├── estimated rows: 0.00 - └── TableScan - ├── table: default.test_explain_window.t - ├── output columns: [a (#0), b (#1), _row_id (#7)] - ├── read rows: 0 - ├── read size: 0 - ├── partitions total: 0 - ├── partitions scanned: 0 - ├── push downs: [filters: [is_true(t.a (#0) > 1)], limit: NONE] - └── estimated rows: 0.00 + └── Filter + ├── output columns: [t.a (#0), t.b (#1), t._row_id (#7)] + ├── filters: [is_true(t.a (#0) > 1)] + ├── estimated rows: 0.00 + └── TableScan + ├── table: default.test_explain_window.t + ├── output columns: [a (#0), b (#1), _row_id (#7)] + ├── read rows: 0 + ├── read size: 0 + ├── partitions total: 0 + ├── partitions scanned: 0 + ├── push downs: [filters: [is_true(t.a (#0) > 1)], limit: NONE] + └── estimated rows: 0.00 statement ok drop table if exists table43764_orc @@ -504,5 +527,22 @@ CompoundBlockOperator(Project) × 1 processor DeserializeDataTransform × 1 processor SyncReadParquetDataSource × 1 processor +# same order multi window +query T +explain pipeline select *,lead(number,1, 42) over (order by number), lead(number,2,44) over (order by number), lead(number,3,44) over (order by number) from numbers(5); +---- +CompoundBlockOperator(Project) × 1 processor + Transform Window × 1 processor + CompoundBlockOperator(Map) × 1 processor + Transform Window × 1 processor + CompoundBlockOperator(Map) × 1 processor + Transform Window × 1 processor + Merge (TransformSortMerge × 4 processors) to (Transform Window × 1) + TransformSortMerge × 4 processors + SortPartialTransform × 4 processors + Merge (CompoundBlockOperator(Map) × 1 processor) to (SortPartialTransform × 4) + CompoundBlockOperator(Map) × 1 processor + NumbersSourceTransform × 1 processor + statement ok DROP DATABASE test_explain_window; diff --git a/tests/sqllogictests/suites/query/filter.test b/tests/sqllogictests/suites/query/filter.test index 59734fe1d1d77..71ec61d51e9d3 100644 --- a/tests/sqllogictests/suites/query/filter.test +++ b/tests/sqllogictests/suites/query/filter.test @@ -33,6 +33,21 @@ select a from t where a = 0 or 3 / a > 2 order by a statement ok drop table if exists t; +# AND filter short circuit +statement ok +create table t(a varchar); + +statement ok +insert into t values('null'), ('202405'); + +query I +SELECT count(1) FROM t WHERE a <> 'null' AND a IS NOT NULL AND to_date(a || '01', '%Y%m%d') > add_years(today(), - 100); +---- +1 + +statement ok +drop table if exists t; + # Boolean comparison statement ok drop table if exists t; diff --git a/tests/sqllogictests/suites/query/subquery.test b/tests/sqllogictests/suites/query/subquery.test index 2fc6b663a2e5c..4f146fd4028cb 100644 --- a/tests/sqllogictests/suites/query/subquery.test +++ b/tests/sqllogictests/suites/query/subquery.test @@ -771,3 +771,47 @@ NULL 0 statement ok drop table if exists t; + +statement ok +drop table if exists push_log; + +statement ok +drop table if exists merge_log; + +statement ok +CREATE TABLE `push_log` ( + `name` VARCHAR NULL, + `data_count` INT NULL, + `created_at` TIMESTAMP NULL +) ENGINE = FUSE; + +statement ok +CREATE TABLE `merge_log` ( + `name` VARCHAR NULL, + `file_date` VARCHAR NULL, + `inserted_count` INT NULL, + `modified_count` INT NULL, + `created_at` TIMESTAMP NULL +) ENGINE = FUSE; + +query +SELECT + (SELECT MAX(created_at) + FROM merge_log + WHERE s.name=name + AND created_at >= s.push_time + AND created_at < s.push_time + 1) max_merge_time +FROM ( + SELECT + name, + DATE_FORMAT(created_at,'%Y-%m-%d') create_date, + STR_TO_DATE(DATE_FORMAT(created_at,'%Y-%m-%d'),'%Y-%m-%d') push_time + FROM push_log +) s; +---- + +statement ok +drop table push_log; + +statement ok +drop table merge_log; diff --git a/tests/sqllogictests/suites/task/task_ddl_test_milliseconds.test b/tests/sqllogictests/suites/task/task_ddl_test_milliseconds.test new file mode 100644 index 0000000000000..837182d5e50c1 --- /dev/null +++ b/tests/sqllogictests/suites/task/task_ddl_test_milliseconds.test @@ -0,0 +1,44 @@ +# Please start the UDF Server first before running this test: +# python3 tests/cloud-control-server/simple_server.py +# +statement ok +DROP TASK IF EXISTS mstask + +statement ok +CREATE TASK mstask + WAREHOUSE = 'mywh' + SCHEDULE = 500 MILLISECOND + AS SELECT 1; + +query SSSS +select name, warehouse, schedule, definition from system.tasks where name = 'mstask' +---- +mstask mywh INTERVAL 0 SECOND 500 MILLISECOND SELECT 1 + +statement error +ALTER TASK mstask SET SCHEDULE = 1000 MILLISECOND + +statement error +ALTER TASK mstask SET SCHEDULE = 499 MILLISECOND + +statement ok +ALTER TASK mstask SET SCHEDULE = 1000 SECOND + +statement ok +ALTER TASK mstask SET SCHEDULE = 501 MILLISECOND + +query SSSS +select name, warehouse, schedule, definition from system.tasks where name = 'mstask' +---- +mstask mywh INTERVAL 0 SECOND 501 MILLISECOND SELECT 1 + +statement ok +execute TASK mstask + +query SSSS +select name, warehouse, schedule, definition from system.task_history where name = 'mstask' +---- +mstask mywh INTERVAL 0 SECOND 501 MILLISECOND SELECT 1 + +statement ok +DROP TASK mstask diff --git a/tests/sqllogictests/suites/tpcds/tpcds_join_order.test b/tests/sqllogictests/suites/tpcds/tpcds_join_order.test index da4f3eca1bfda..f780c39684462 100644 --- a/tests/sqllogictests/suites/tpcds/tpcds_join_order.test +++ b/tests/sqllogictests/suites/tpcds/tpcds_join_order.test @@ -6841,27 +6841,27 @@ ORDER BY lochierarchy DESC , rank_within_parent LIMIT 100; ---- -HashJoin: INNER +HashJoin: LEFT SEMI ├── Build -│ └── Scan: default.tpcds.date_dim (#1) (read rows: 73049) +│ └── HashJoin: INNER +│ ├── Build +│ │ └── Scan: default.tpcds.store (#4) (read rows: 1) +│ └── Probe +│ └── HashJoin: INNER +│ ├── Build +│ │ └── Scan: default.tpcds.date_dim (#5) (read rows: 73049) +│ └── Probe +│ └── Scan: default.tpcds.store_sales (#3) (read rows: 28810) └── Probe └── HashJoin: INNER ├── Build - │ └── HashJoin: INNER - │ ├── Build - │ │ └── HashJoin: INNER - │ │ ├── Build - │ │ │ └── Scan: default.tpcds.store (#4) (read rows: 1) - │ │ └── Probe - │ │ └── HashJoin: INNER - │ │ ├── Build - │ │ │ └── Scan: default.tpcds.date_dim (#5) (read rows: 73049) - │ │ └── Probe - │ │ └── Scan: default.tpcds.store_sales (#3) (read rows: 28810) - │ └── Probe - │ └── Scan: default.tpcds.store (#2) (read rows: 1) + │ └── Scan: default.tpcds.store (#2) (read rows: 1) └── Probe - └── Scan: default.tpcds.store_sales (#0) (read rows: 28810) + └── HashJoin: INNER + ├── Build + │ └── Scan: default.tpcds.date_dim (#1) (read rows: 73049) + └── Probe + └── Scan: default.tpcds.store_sales (#0) (read rows: 28810) # Q71 query I diff --git a/tests/suites/0_stateless/18_rbac/18_0005_show_grants_with_dropped.result b/tests/suites/0_stateless/18_rbac/18_0005_show_grants_with_dropped.result index 4ff9bdeb61477..aff8c8e172fc4 100644 --- a/tests/suites/0_stateless/18_rbac/18_0005_show_grants_with_dropped.result +++ b/tests/suites/0_stateless/18_rbac/18_0005_show_grants_with_dropped.result @@ -71,8 +71,9 @@ UPDATE,DELETE default.c_r1.t1 ROLE role3 GRANT UPDATE,DELETE ON 'default'.'c_r1 SELECT,INSERT *.* NULL ROLE role1 GRANT SELECT,INSERT ON *.* TO ROLE `role1` SELECT,INSERT *.* NULL USER u1 GRANT SELECT,INSERT ON *.* TO 'u1'@'%' SELECT,INSERT *.* NULL USER u1 GRANT SELECT,INSERT ON *.* TO 'u1'@'%' +SELECT,INSERT *.* NULL USER u1 GRANT SELECT,INSERT ON *.* TO 'u1'@'%' Need Err: -Error: APIError: ResponseError with 1063: Permission denied: privilege [Grant] is required on *.* for user 'u1'@'%' with roles [public,role1,role2] -Error: APIError: ResponseError with 1063: Permission denied: privilege [Grant] is required on *.* for user 'u1'@'%' with roles [public,role1,role2] -Error: APIError: ResponseError with 1063: Permission denied: privilege [Grant] is required on *.* for user 'u1'@'%' with roles [public,role1,role2] +Error: APIError: ResponseError with 1063: Permission denied: privilege [Grant] is required on *.* for user 'u1'@'%' with roles [role1,role2] +Error: APIError: ResponseError with 1063: Permission denied: privilege [Grant] is required on *.* for user 'u1'@'%' with roles [role1,role2] +Error: APIError: ResponseError with 1063: Permission denied: privilege [Grant] is required on *.* for user 'u1'@'%' with roles [role1,role2] === clean up === diff --git a/tests/suites/0_stateless/18_rbac/18_0005_show_grants_with_dropped.sh b/tests/suites/0_stateless/18_rbac/18_0005_show_grants_with_dropped.sh index afc48f5e2496d..937f9a6576e82 100755 --- a/tests/suites/0_stateless/18_rbac/18_0005_show_grants_with_dropped.sh +++ b/tests/suites/0_stateless/18_rbac/18_0005_show_grants_with_dropped.sh @@ -86,6 +86,8 @@ export USER_U1_CONNECT="bendsql --user=u1 --password=123 --host=${QUERY_MYSQL_HA echo "show grants for role role1" | $USER_U1_CONNECT echo "show grants for role role2" | $USER_U1_CONNECT echo "show grants for user u1" | $USER_U1_CONNECT +echo "show grants for user u1 where name='u1' limit 1;" | $USER_U1_CONNECT +echo "show grants for user u1 where name!='u1' limit 1;" | $USER_U1_CONNECT echo "show grants" | $USER_U1_CONNECT echo "Need Err:" echo "show grants for user root" | $USER_U1_CONNECT diff --git a/tests/suites/0_stateless/18_rbac/18_0007_privilege_access.result b/tests/suites/0_stateless/18_rbac/18_0007_privilege_access.result index aaed8dcd51454..fd5bc1fc1aa85 100644 --- a/tests/suites/0_stateless/18_rbac/18_0007_privilege_access.result +++ b/tests/suites/0_stateless/18_rbac/18_0007_privilege_access.result @@ -60,6 +60,12 @@ Error: APIError: ResponseError with 1063: Permission denied: User 'a'@'%' does n Error: APIError: ResponseError with 1063: Permission denied: User 'a'@'%' does not have the required privileges for database 'nogrant' 1 0 +=== Test: show grants on privilege check === +Error: APIError: ResponseError with 1063: Permission denied: privilege USAGE is required on udf root_func for user b. +Error: APIError: ResponseError with 1063: Permission denied: privilege READ is required on stage root_stage for user b. Or no need to show the stage privilege +Error: APIError: ResponseError with 1063: Permission denied: No privilege on database root_db for user b. +Error: APIError: ResponseError with 1063: Permission denied: No privilege on table root_table for user b. +Error: APIError: ResponseError with 1063: Permission denied: No privilege on table root_table for user b. 1 64 378 Error: APIError: ResponseError with 1063: Permission denied: privilege [Select] is required on 'default'.'default'.'t1' for user 'b'@'%' with roles [public] Error: APIError: ResponseError with 1063: Permission denied: privilege [Read] is required on STAGE s3 for user 'b'@'%' with roles [public] diff --git a/tests/suites/0_stateless/18_rbac/18_0007_privilege_access.sh b/tests/suites/0_stateless/18_rbac/18_0007_privilege_access.sh index 8fdf73fbf6818..c6b74787ada90 100755 --- a/tests/suites/0_stateless/18_rbac/18_0007_privilege_access.sh +++ b/tests/suites/0_stateless/18_rbac/18_0007_privilege_access.sh @@ -195,6 +195,25 @@ EOF echo "drop user if exists b" | $BENDSQL_CLIENT_CONNECT echo "create user b identified by '$TEST_USER_PASSWORD'" | $BENDSQL_CLIENT_CONNECT +echo "=== Test: show grants on privilege check ===" +echo "drop database if exists root_db" | $BENDSQL_CLIENT_CONNECT +echo "drop table if exists default.root_table" | $BENDSQL_CLIENT_CONNECT +echo "drop stage if exists root_stage" | $BENDSQL_CLIENT_CONNECT +echo "drop function if exists root_func" | $BENDSQL_CLIENT_CONNECT +echo "create database root_db" | $BENDSQL_CLIENT_CONNECT +echo "create table default.root_table(id int)" | $BENDSQL_CLIENT_CONNECT +echo "create stage root_stage" | $BENDSQL_CLIENT_CONNECT +echo "create function root_func as (a) -> (a+1);" | $BENDSQL_CLIENT_CONNECT +echo "show grants on udf root_func" | $USER_B_CONNECT +echo "show grants on stage root_stage" | $USER_B_CONNECT +echo "show grants on database root_db" | $USER_B_CONNECT +echo "show grants on table default.root_table" | $USER_B_CONNECT +echo "show grants on table root_table" | $USER_B_CONNECT +echo "drop database if exists root_db" | $BENDSQL_CLIENT_CONNECT +echo "drop table if exists default.root_table" | $BENDSQL_CLIENT_CONNECT +echo "drop stage if exists root_stage" | $BENDSQL_CLIENT_CONNECT +echo "drop function if exists root_func" | $BENDSQL_CLIENT_CONNECT + echo "drop table if exists t" | $BENDSQL_CLIENT_CONNECT echo "drop table if exists t1" | $BENDSQL_CLIENT_CONNECT echo "drop table if exists t2" | $BENDSQL_CLIENT_CONNECT