diff --git a/.config/nextest.toml b/.config/nextest.toml index 15de353595390..0e6cef6fd7388 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -34,7 +34,3 @@ test-group = 'failpoint-limited' [[profile.ci-sim.overrides]] filter = 'test(failpoint_limited::)' test-group = 'failpoint-limited' - -[[profile.ci.junit.overrides]] -filter = 'test(failpoint_limited::)' -test-group = 'failpoint-limited' \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index db421dd9ac45f..275024641e22e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" @@ -2102,11 +2102,11 @@ dependencies = [ [[package]] name = "bit-set" -version = "0.5.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" dependencies = [ - "bit-vec", + "bit-vec 0.8.0", ] [[package]] @@ -2115,6 +2115,12 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +[[package]] +name = "bit-vec" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" + [[package]] name = "bitflags" version = "1.3.2" @@ -4826,9 +4832,9 @@ checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" [[package]] name = "fancy-regex" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "531e46835a22af56d1e3b66f04844bed63158bc094a628bec1d321d9b4c44bf2" +checksum = "6e24cb5a94bcae1e5408b0effca5cd7172ea3c5755049c5f3af4cd283a165298" dependencies = [ "bit-set", "regex-automata 0.4.8", @@ -9733,7 +9739,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7f3541ff84e39da334979ac4bf171e0f277f4f782603aeae65bf5795dc7275a" dependencies = [ "async-trait", - "bit-vec", + "bit-vec 0.6.3", "bytes", "chrono", "crc", @@ -10457,6 +10463,7 @@ dependencies = [ "async-recursion", "async-trait", "bytes", + "chrono", "criterion", "either", "foyer", @@ -10470,6 +10477,7 @@ dependencies = [ "madsim-tokio", "madsim-tonic", "memcomparable", + "mysql_async", "opendal 0.49.2", "parking_lot 0.12.1", "parquet 53.0.0", @@ -10487,6 +10495,7 @@ dependencies = [ "risingwave_pb", "risingwave_rpc_client", "risingwave_storage", + "rust_decimal", "rw_futures_util", "scopeguard", "serde_json", @@ -11340,6 +11349,7 @@ dependencies = [ "maplit", "md5", "memcomparable", + "mysql_async", "num-integer", "parking_lot 0.12.1", "parse-display", @@ -14825,9 +14835,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.5.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" dependencies = [ "async-compression", "bitflags 2.6.0", diff --git a/Cargo.toml b/Cargo.toml index da6b0bfa87385..49e421f4d8219 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -161,6 +161,9 @@ deltalake = { version = "0.20.1", features = [ itertools = "0.13.0" jsonbb = "0.1.4" lru = { git = "https://github.com/risingwavelabs/lru-rs.git", rev = "2682b85" } +mysql_async = { version = "0.34", default-features = false, features = [ + "default", +] } parquet = { version = "53", features = ["async"] } thiserror-ext = "0.1.2" tikv-jemalloc-ctl = { git = "https://github.com/risingwavelabs/jemallocator.git", rev = "64a2d9" } @@ -285,6 +288,7 @@ new_without_default = "allow" # duplicated_attributes = "allow" # TODO: remove later https://github.com/rust-lang/rust-clippy/issues/12436 mixed_attributes_style = "allow" +too_long_first_doc_paragraph = "allow" [workspace.lints.rustdoc] private_intra_doc_links = "allow" diff --git a/ci/Dockerfile b/ci/Dockerfile index 95dbb5205c754..88fe2519252e9 100644 --- a/ci/Dockerfile +++ b/ci/Dockerfile @@ -77,7 +77,7 @@ RUN cargo binstall -y --locked --no-symlinks cargo-llvm-cov cargo-nextest cargo- && rm -rf "/root/.cargo/registry/cache" \ && rm -rf "/root/.cargo/git/db" \ && cargo uninstall cargo-cache -RUN cargo install cargo-dylint@3.1.0 dylint-link@3.1.0 +RUN cargo install cargo-dylint@3.2.1 dylint-link@3.2.1 # install risedev COPY < = { ), } -export const fragmentsColumn: Column = { +export const fragmentsColumn: Column = { name: "Fragments", width: 1, content: (r) => ( diff --git a/dashboard/lib/api/streaming.ts b/dashboard/lib/api/streaming.ts index 211bd1b6bbc4c..ac7ba30d1258e 100644 --- a/dashboard/lib/api/streaming.ts +++ b/dashboard/lib/api/streaming.ts @@ -15,6 +15,7 @@ * */ +import { Expose, plainToInstance } from "class-transformer" import _ from "lodash" import sortBy from "lodash/sortBy" import { @@ -53,14 +54,6 @@ export async function getRelationIdInfos(): Promise { return fragmentIds } -export async function getFragments(): Promise { - let fragmentList: TableFragments[] = (await api.get("/fragments2")).map( - TableFragments.fromJSON - ) - fragmentList = sortBy(fragmentList, (x) => x.tableId) - return fragmentList -} - export interface Relation { id: number name: string @@ -75,7 +68,43 @@ export interface Relation { databaseName?: string } -export interface StreamingJob extends Relation { +export class StreamingJob { + @Expose({ name: "jobId" }) + id!: number + @Expose({ name: "objType" }) + _objType!: string + name!: string + jobStatus!: string + @Expose({ name: "parallelism" }) + _parallelism!: any + maxParallelism!: number + + get parallelism() { + const parallelism = this._parallelism + if (typeof parallelism === "string") { + // `Adaptive` + return parallelism + } else if (typeof parallelism === "object") { + // `Fixed (64)` + let key = Object.keys(parallelism)[0] + let value = parallelism[key] + return `${key} (${value})` + } else { + // fallback + return JSON.stringify(parallelism) + } + } + + get type() { + if (this._objType == "Table") { + return "Table / MV" + } else { + return this._objType + } + } +} + +export interface StreamingRelation extends Relation { dependentRelations: number[] } @@ -98,17 +127,15 @@ export function relationTypeTitleCase(x: Relation) { return _.startCase(_.toLower(relationType(x))) } -export function relationIsStreamingJob(x: Relation): x is StreamingJob { +export function relationIsStreamingJob(x: Relation): x is StreamingRelation { const type = relationType(x) return type !== "UNKNOWN" && type !== "SOURCE" && type !== "INTERNAL" } export async function getStreamingJobs() { - let jobs = _.concat( - await getMaterializedViews(), - await getTables(), - await getIndexes(), - await getSinks() + let jobs = plainToInstance( + StreamingJob, + (await api.get("/streaming_jobs")) as any[] ) jobs = sortBy(jobs, (x) => x.id) return jobs diff --git a/dashboard/package-lock.json b/dashboard/package-lock.json index 78bc775791ee2..ce861243e27cf 100644 --- a/dashboard/package-lock.json +++ b/dashboard/package-lock.json @@ -17,6 +17,7 @@ "@uidotdev/usehooks": "^2.4.1", "base64url": "^3.0.1", "bootstrap-icons": "^1.9.1", + "class-transformer": "^0.5.1", "d3": "^7.6.1", "d3-axis": "^3.0.0", "d3-dag": "^0.11.4", @@ -35,6 +36,7 @@ "react-json-view": "^1.21.3", "react-syntax-highlighter": "^15.5.0", "recharts": "^2.3.2", + "reflect-metadata": "^0.2.2", "styled-components": "5.3.0", "ts-proto": "^1.169.1" }, @@ -4135,6 +4137,11 @@ "integrity": "sha512-xmDt/QIAdeZ9+nfdPsaBCpMvHNLFiLdjj59qjqn+6iPe6YmHGQ35sBnQ8uslRBXFmXkiZQOJRjvQeoGppoTjjg==", "dev": true }, + "node_modules/class-transformer": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/class-transformer/-/class-transformer-0.5.1.tgz", + "integrity": "sha512-SQa1Ws6hUbfC98vKGxZH3KFY0Y1lm5Zm0SY8XX9zbK7FJCyVEac3ATW0RIpwzW+oOfmHE5PMPufDG9hCfoEOMw==" + }, "node_modules/classcat": { "version": "5.0.4", "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.4.tgz", @@ -9801,6 +9808,11 @@ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" }, + "node_modules/reflect-metadata": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/reflect-metadata/-/reflect-metadata-0.2.2.tgz", + "integrity": "sha512-urBwgfrvVP/eAyXx4hluJivBKzuEbSQs9rKWCrCkbSxNv8mxPcUZKeuoF3Uy4mJl3Lwprp6yy5/39VWigZ4K6Q==" + }, "node_modules/reflect.getprototypeof": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.4.tgz", @@ -14811,6 +14823,11 @@ "integrity": "sha512-xmDt/QIAdeZ9+nfdPsaBCpMvHNLFiLdjj59qjqn+6iPe6YmHGQ35sBnQ8uslRBXFmXkiZQOJRjvQeoGppoTjjg==", "dev": true }, + "class-transformer": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/class-transformer/-/class-transformer-0.5.1.tgz", + "integrity": "sha512-SQa1Ws6hUbfC98vKGxZH3KFY0Y1lm5Zm0SY8XX9zbK7FJCyVEac3ATW0RIpwzW+oOfmHE5PMPufDG9hCfoEOMw==" + }, "classcat": { "version": "5.0.4", "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.4.tgz", @@ -18962,6 +18979,11 @@ } } }, + "reflect-metadata": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/reflect-metadata/-/reflect-metadata-0.2.2.tgz", + "integrity": "sha512-urBwgfrvVP/eAyXx4hluJivBKzuEbSQs9rKWCrCkbSxNv8mxPcUZKeuoF3Uy4mJl3Lwprp6yy5/39VWigZ4K6Q==" + }, "reflect.getprototypeof": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.4.tgz", diff --git a/dashboard/package.json b/dashboard/package.json index 5e01f71c3686b..79897d63b19c8 100644 --- a/dashboard/package.json +++ b/dashboard/package.json @@ -24,6 +24,7 @@ "@uidotdev/usehooks": "^2.4.1", "base64url": "^3.0.1", "bootstrap-icons": "^1.9.1", + "class-transformer": "^0.5.1", "d3": "^7.6.1", "d3-axis": "^3.0.0", "d3-dag": "^0.11.4", @@ -42,6 +43,7 @@ "react-json-view": "^1.21.3", "react-syntax-highlighter": "^15.5.0", "recharts": "^2.3.2", + "reflect-metadata": "^0.2.2", "styled-components": "5.3.0", "ts-proto": "^1.169.1" }, diff --git a/dashboard/pages/_app.tsx b/dashboard/pages/_app.tsx index 25746318c1d24..8dd4d2cc1079d 100644 --- a/dashboard/pages/_app.tsx +++ b/dashboard/pages/_app.tsx @@ -15,6 +15,7 @@ * */ import "bootstrap-icons/font/bootstrap-icons.css" +import "reflect-metadata" import "../styles/global.css" import { ChakraProvider } from "@chakra-ui/react" diff --git a/dashboard/pages/fragment_graph.tsx b/dashboard/pages/fragment_graph.tsx index 3ffbda24b232f..2312334e53abe 100644 --- a/dashboard/pages/fragment_graph.tsx +++ b/dashboard/pages/fragment_graph.tsx @@ -24,7 +24,12 @@ import { HStack, Input, Select, + Table, + TableContainer, + Tbody, + Td, Text, + Tr, VStack, } from "@chakra-ui/react" import * as d3 from "d3" @@ -184,7 +189,7 @@ function buildFragmentDependencyAsEdges( return nodes } -const SIDEBAR_WIDTH = 200 +const SIDEBAR_WIDTH = 225 type BackPressureDataSource = "Embedded" | "Prometheus" const backPressureDataSources: BackPressureDataSource[] = [ @@ -202,23 +207,28 @@ interface EmbeddedBackPressureInfo { } export default function Streaming() { - const { response: relationList } = useFetch(getStreamingJobs) + const { response: streamingJobList } = useFetch(getStreamingJobs) const { response: relationIdInfos } = useFetch(getRelationIdInfos) - const [relationId, setRelationId] = useQueryState("id", parseAsInteger) + const [jobId, setJobId] = useQueryState("id", parseAsInteger) const [selectedFragmentId, setSelectedFragmentId] = useState() const [tableFragments, setTableFragments] = useState() + const job = useMemo( + () => streamingJobList?.find((j) => j.id === jobId), + [streamingJobList, jobId] + ) + const toast = useErrorToast() useEffect(() => { - if (relationId) { + if (jobId) { setTableFragments(undefined) - getFragmentsByJobId(relationId).then((tf) => { + getFragmentsByJobId(jobId).then((tf) => { setTableFragments(tf) }) } - }, [relationId]) + }, [jobId]) const fragmentDependencyCallback = useCallback(() => { if (tableFragments) { @@ -232,14 +242,14 @@ export default function Streaming() { }, [tableFragments]) useEffect(() => { - if (relationList) { - if (!relationId) { - if (relationList.length > 0) { - setRelationId(relationList[0].id) + if (streamingJobList) { + if (!jobId) { + if (streamingJobList.length > 0) { + setJobId(streamingJobList[0].id) } } } - }, [relationId, relationList, setRelationId]) + }, [jobId, streamingJobList, setJobId]) // The table fragments of the selected fragment id const fragmentDependency = fragmentDependencyCallback()?.fragmentDep @@ -276,7 +286,7 @@ export default function Streaming() { const fragmentIdToRelationId = map[relationId].map for (const fragmentId in fragmentIdToRelationId) { if (parseInt(fragmentId) == searchFragIdInt) { - setRelationId(parseInt(relationId)) + setJobId(parseInt(relationId)) setSelectedFragmentId(searchFragIdInt) return } @@ -295,7 +305,7 @@ export default function Streaming() { for (const fragmentId in fragmentIdToRelationId) { let actorIds = fragmentIdToRelationId[fragmentId].ids if (actorIds.includes(searchActorIdInt)) { - setRelationId(parseInt(relationId)) + setJobId(parseInt(relationId)) setSelectedFragmentId(parseInt(fragmentId)) return } @@ -406,41 +416,70 @@ export default function Streaming() { height="full" > - Relations + Streaming Jobs { - const id = relationList?.find( + const id = streamingJobList?.find( (x) => x.name == event.target.value )?.id if (id) { - setRelationId(id) + setJobId(id) } }} placeholder="Search..." mb={2} > - {relationList && - relationList.map((r) => ( + {streamingJobList && + streamingJobList.map((r) => ( ))} + {job && ( + + Information + + + + + + + + + + + + + + + + + + + + +
Type{job.type}
Status{job.jobStatus}
Parallelism{job.parallelism}
+ Max Parallelism + {job.maxParallelism}
+
+
+ )} Goto diff --git a/dashboard/tsconfig.json b/dashboard/tsconfig.json index d88f23ca2cf51..42256872b36d1 100644 --- a/dashboard/tsconfig.json +++ b/dashboard/tsconfig.json @@ -18,6 +18,8 @@ "isolatedModules": true, "jsx": "preserve", "incremental": true, + "experimentalDecorators": true, + "emitDecoratorMetadata": true, }, "include": [ "next-env.d.ts", diff --git a/docker/docker-compose-distributed-etcd.yml b/docker/docker-compose-distributed-etcd.yml deleted file mode 100644 index 3f965c5a1b702..0000000000000 --- a/docker/docker-compose-distributed-etcd.yml +++ /dev/null @@ -1,380 +0,0 @@ ---- -version: "3" -x-image: &image - image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.0.1} -services: - compactor-0: - <<: *image - command: - - compactor-node - - "--listen-addr" - - "0.0.0.0:6660" - - "--advertise-addr" - - "compactor-0:6660" - - "--prometheus-listener-addr" - - "0.0.0.0:1260" - - "--meta-address" - - "http://meta-node-0:5690" - - "--config-path" - - /risingwave.toml - expose: - - "6660" - - "1260" - ports: [ ] - depends_on: - - meta-node-0 - # - minio-0 - volumes: - - "./risingwave.toml:/risingwave.toml" - environment: - RUST_BACKTRACE: "1" - # If ENABLE_TELEMETRY is not set, telemetry will start by default - ENABLE_TELEMETRY: ${ENABLE_TELEMETRY:-true} - container_name: compactor-0 - healthcheck: - test: - - CMD-SHELL - - bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/6660; exit $$?;' - interval: 1s - timeout: 5s - retries: 5 - restart: always - deploy: - resources: - limits: - memory: 2G - reservations: - memory: 1G - compute-node-0: - <<: *image - command: - - compute-node - - "--listen-addr" - - "0.0.0.0:5688" - - "--advertise-addr" - - "compute-node-0:5688" - - "--prometheus-listener-addr" - - "0.0.0.0:1222" - - "--meta-address" - - "http://meta-node-0:5690" - - "--config-path" - - /risingwave.toml - expose: - - "5688" - - "1222" - ports: [ ] - depends_on: - - meta-node-0 - # - minio-0 - volumes: - - "./risingwave.toml:/risingwave.toml" - environment: - RUST_BACKTRACE: "1" - # If ENABLE_TELEMETRY is not set, telemetry will start by default - ENABLE_TELEMETRY: ${ENABLE_TELEMETRY:-true} - container_name: compute-node-0 - healthcheck: - test: - - CMD-SHELL - - bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/5688; exit $$?;' - interval: 1s - timeout: 5s - retries: 5 - restart: always - deploy: - resources: - limits: - memory: 26G - reservations: - memory: 26G - etcd-0: - image: "quay.io/coreos/etcd:v3.5.10" - command: - - /usr/local/bin/etcd - - "--listen-client-urls" - - "http://0.0.0.0:2388" - - "--advertise-client-urls" - - "http://etcd-0:2388" - - "--listen-peer-urls" - - "http://0.0.0.0:2389" - - "--initial-advertise-peer-urls" - - "http://etcd-0:2389" - - "--listen-metrics-urls" - - "http://0.0.0.0:2379" - - "--name" - - risedev-meta - - "--max-txn-ops" - - "999999" - - "--max-request-bytes" - - "10485760" - - "--auto-compaction-mode" - - periodic - - "--auto-compaction-retention" - - 1m - - "--snapshot-count" - - "10000" - - "--data-dir" - - /etcd-data - expose: - - "2388" - ports: - - "2388:2388" - - "2389:2389" - depends_on: [ ] - volumes: - - "etcd-0:/etcd-data" - environment: { } - container_name: etcd-0 - healthcheck: - test: - - CMD - - etcdctl - - --endpoints=http://localhost:2388 - - endpoint - - health - interval: 1s - timeout: 5s - retries: 5 - restart: always - frontend-node-0: - <<: *image - command: - - frontend-node - - "--listen-addr" - - "0.0.0.0:4566" - - "--meta-addr" - - "http://meta-node-0:5690" - - "--advertise-addr" - - "frontend-node-0:4566" - - "--config-path" - - /risingwave.toml - - "--prometheus-listener-addr" - - "0.0.0.0:2222" - expose: - - "4566" - ports: - - "4566:4566" - depends_on: - - meta-node-0 - volumes: - - "./risingwave.toml:/risingwave.toml" - environment: - RUST_BACKTRACE: "1" - # If ENABLE_TELEMETRY is not set, telemetry will start by default - ENABLE_TELEMETRY: ${ENABLE_TELEMETRY:-true} - container_name: frontend-node-0 - healthcheck: - test: - - CMD-SHELL - - bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/4566; exit $$?;' - interval: 1s - timeout: 5s - retries: 5 - restart: always - deploy: - resources: - limits: - memory: 2G - reservations: - memory: 1G - grafana-0: - image: "grafana/grafana-oss:latest" - command: [ ] - expose: - - "3001" - ports: - - "3001:3001" - depends_on: [ ] - volumes: - - "grafana-0:/var/lib/grafana" - - "./grafana.ini:/etc/grafana/grafana.ini" - - "./grafana-risedev-datasource.yml:/etc/grafana/provisioning/datasources/grafana-risedev-datasource.yml" - - "./grafana-risedev-dashboard.yml:/etc/grafana/provisioning/dashboards/grafana-risedev-dashboard.yml" - - "./dashboards:/dashboards" - environment: { } - container_name: grafana-0 - healthcheck: - test: - - CMD-SHELL - - bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/3001; exit $$?;' - interval: 1s - timeout: 5s - retries: 5 - restart: always - meta-node-0: - <<: *image - command: - - meta-node - - "--listen-addr" - - "0.0.0.0:5690" - - "--advertise-addr" - - "meta-node-0:5690" - - "--dashboard-host" - - "0.0.0.0:5691" - - "--prometheus-host" - - "0.0.0.0:1250" - - "--prometheus-endpoint" - - "http://prometheus-0:9500" - - "--backend" - - etcd - - "--etcd-endpoints" - - "etcd-0:2388" - - "--state-store" - - "hummock+minio://hummockadmin:hummockadmin@minio-0:9301/hummock001" - - "--data-directory" - - "hummock_001" - - "--config-path" - - /risingwave.toml - expose: - - "5690" - - "1250" - - "5691" - ports: - - "5690:5690" - - "5691:5691" - depends_on: - - "etcd-0" - volumes: - - "./risingwave.toml:/risingwave.toml" - environment: - RUST_BACKTRACE: "1" - # If ENABLE_TELEMETRY is not set, telemetry will start by default - ENABLE_TELEMETRY: ${ENABLE_TELEMETRY:-true} - RW_TELEMETRY_TYPE: ${RW_TELEMETRY_TYPE:-"docker-compose"} - container_name: meta-node-0 - healthcheck: - test: - - CMD-SHELL - - bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/5690; exit $$?;' - interval: 1s - timeout: 5s - retries: 5 - restart: always - deploy: - resources: - limits: - memory: 2G - reservations: - memory: 1G - minio-0: - image: "quay.io/minio/minio:latest" - command: - - server - - "--address" - - "0.0.0.0:9301" - - "--console-address" - - "0.0.0.0:9400" - - /data - expose: - - "9301" - - "9400" - ports: - - "9301:9301" - - "9400:9400" - depends_on: [ ] - volumes: - - "minio-0:/data" - entrypoint: " - - /bin/sh -c ' - - set -e - - mkdir -p \"/data/hummock001\" - - /usr/bin/docker-entrypoint.sh \"$$0\" \"$$@\" - - '" - environment: - MINIO_CI_CD: "1" - MINIO_PROMETHEUS_AUTH_TYPE: public - MINIO_PROMETHEUS_URL: "http://prometheus-0:9500" - MINIO_ROOT_PASSWORD: hummockadmin - MINIO_ROOT_USER: hummockadmin - MINIO_DOMAIN: "minio-0" - container_name: minio-0 - healthcheck: - test: - - CMD-SHELL - - bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/9301; exit $$?;' - interval: 1s - timeout: 5s - retries: 5 - restart: always - prometheus-0: - image: "prom/prometheus:latest" - command: - - "--config.file=/etc/prometheus/prometheus.yml" - - "--storage.tsdb.path=/prometheus" - - "--web.console.libraries=/usr/share/prometheus/console_libraries" - - "--web.console.templates=/usr/share/prometheus/consoles" - - "--web.listen-address=0.0.0.0:9500" - - "--storage.tsdb.retention.time=30d" - expose: - - "9500" - ports: - - "9500:9500" - depends_on: [ ] - volumes: - - "prometheus-0:/prometheus" - - "./prometheus.yaml:/etc/prometheus/prometheus.yml" - environment: { } - container_name: prometheus-0 - healthcheck: - test: - - CMD-SHELL - - sh -c 'printf "GET /-/healthy HTTP/1.0\n\n" | nc localhost 9500; exit $$?;' - interval: 1s - timeout: 5s - retries: 5 - restart: always - message_queue: - image: "redpandadata/redpanda:latest" - command: - - redpanda - - start - - "--smp" - - "1" - - "--reserve-memory" - - 0M - - "--memory" - - 4G - - "--overprovisioned" - - "--node-id" - - "0" - - "--check=false" - - "--kafka-addr" - - "PLAINTEXT://0.0.0.0:29092,OUTSIDE://0.0.0.0:9092" - - "--advertise-kafka-addr" - - "PLAINTEXT://message_queue:29092,OUTSIDE://localhost:9092" - expose: - - "29092" - - "9092" - - "9644" - ports: - - "29092:29092" - - "9092:9092" - - "9644:9644" - - "8081:8081" - depends_on: [ ] - volumes: - - "message_queue:/var/lib/redpanda/data" - environment: { } - container_name: message_queue - healthcheck: - test: curl -f localhost:9644/v1/status/ready - interval: 1s - timeout: 5s - retries: 5 - restart: always -volumes: - etcd-0: - external: false - grafana-0: - external: false - minio-0: - external: false - prometheus-0: - external: false - message_queue: - external: false diff --git a/docker/docker-compose-distributed.yml b/docker/docker-compose-distributed.yml index abe1f33ed9823..4be9fdbb57642 100644 --- a/docker/docker-compose-distributed.yml +++ b/docker/docker-compose-distributed.yml @@ -1,6 +1,6 @@ --- x-image: &image - image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.0.1} + image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.1.0-rc.1} services: compactor-0: <<: *image @@ -19,7 +19,7 @@ services: expose: - "6660" - "1260" - ports: [ ] + ports: [] depends_on: - meta-node-0 # - minio-0 @@ -61,7 +61,7 @@ services: expose: - "5688" - "1222" - ports: [ ] + ports: [] depends_on: - meta-node-0 # - minio-0 @@ -148,12 +148,12 @@ services: memory: 1G grafana-0: image: "grafana/grafana-oss:latest" - command: [ ] + command: [] expose: - "3001" ports: - "3001:3001" - depends_on: [ ] + depends_on: [] volumes: - "grafana-0:/var/lib/grafana" - "./grafana.ini:/etc/grafana/grafana.ini" @@ -241,7 +241,7 @@ services: ports: - "9301:9301" - "9400:9400" - depends_on: [ ] + depends_on: [] volumes: - "minio-0:/data" entrypoint: " @@ -284,7 +284,7 @@ services: - "9500" ports: - "9500:9500" - depends_on: [ ] + depends_on: [] volumes: - "prometheus-0:/prometheus" - "./prometheus.yaml:/etc/prometheus/prometheus.yml" @@ -326,7 +326,7 @@ services: - "9092:9092" - "9644:9644" - "8081:8081" - depends_on: [ ] + depends_on: [] volumes: - "message_queue:/var/lib/redpanda/data" environment: {} diff --git a/docker/docker-compose-etcd.yml b/docker/docker-compose-etcd.yml deleted file mode 100644 index 9d6fe0566023e..0000000000000 --- a/docker/docker-compose-etcd.yml +++ /dev/null @@ -1,279 +0,0 @@ ---- -version: "3" -x-image: &image - image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.0.1} -services: - risingwave-standalone: - <<: *image - command: "standalone --meta-opts=\" \ - --listen-addr 0.0.0.0:5690 \ - --advertise-addr 0.0.0.0:5690 \ - --dashboard-host 0.0.0.0:5691 \ - --prometheus-host 0.0.0.0:1250 \ - --prometheus-endpoint http://prometheus-0:9500 \ - --backend etcd \ - --etcd-endpoints etcd-0:2388 \ - --state-store hummock+minio://hummockadmin:hummockadmin@minio-0:9301/hummock001 \ - --data-directory hummock_001 \ - --config-path /risingwave.toml\" \ - --compute-opts=\" \ - --config-path /risingwave.toml \ - --listen-addr 0.0.0.0:5688 \ - --prometheus-listener-addr 0.0.0.0:1250 \ - --advertise-addr 0.0.0.0:5688 \ - --async-stack-trace verbose \ - #--parallelism 4 \ - #--total-memory-bytes 8589934592 \ - --role both \ - --meta-address http://0.0.0.0:5690\" \ - --frontend-opts=\" \ - --config-path /risingwave.toml \ - --listen-addr 0.0.0.0:4566 \ - --advertise-addr 0.0.0.0:4566 \ - --prometheus-listener-addr 0.0.0.0:1250 \ - --health-check-listener-addr 0.0.0.0:6786 \ - --meta-addr http://0.0.0.0:5690\" \ - --compactor-opts=\" \ - --listen-addr 0.0.0.0:6660 \ - --prometheus-listener-addr 0.0.0.0:1250 \ - --advertise-addr 0.0.0.0:6660 \ - --meta-address http://0.0.0.0:5690\"" - expose: - - "6660" - - "4566" - - "5688" - - "5690" - - "1250" - - "5691" - ports: - - "4566:4566" - - "5690:5690" - - "5691:5691" - - "1250:1250" - depends_on: - - etcd-0 - - minio-0 - volumes: - - "./risingwave.toml:/risingwave.toml" - environment: - RUST_BACKTRACE: "1" - # If ENABLE_TELEMETRY is not set, telemetry will start by default - ENABLE_TELEMETRY: ${ENABLE_TELEMETRY:-true} - RW_TELEMETRY_TYPE: ${RW_TELEMETRY_TYPE:-"docker-compose"} - container_name: risingwave-standalone - healthcheck: - test: - - CMD-SHELL - - bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/6660; exit $$?;' - - bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/5688; exit $$?;' - - bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/4566; exit $$?;' - - bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/5690; exit $$?;' - interval: 1s - timeout: 5s - restart: always - deploy: - resources: - limits: - memory: 28G - reservations: - memory: 28G - - etcd-0: - image: "quay.io/coreos/etcd:v3.5.10" - command: - - /usr/local/bin/etcd - - "--listen-client-urls" - - "http://0.0.0.0:2388" - - "--advertise-client-urls" - - "http://etcd-0:2388" - - "--listen-peer-urls" - - "http://0.0.0.0:2389" - - "--initial-advertise-peer-urls" - - "http://etcd-0:2389" - - "--listen-metrics-urls" - - "http://0.0.0.0:2379" - - "--name" - - risedev-meta - - "--max-txn-ops" - - "999999" - - "--max-request-bytes" - - "10485760" - - "--auto-compaction-mode" - - periodic - - "--auto-compaction-retention" - - 1m - - "--snapshot-count" - - "10000" - - "--data-dir" - - /etcd-data - expose: - - "2388" - ports: - - "2388:2388" - - "2389:2389" - depends_on: [ ] - volumes: - - "etcd-0:/etcd-data" - environment: { } - container_name: etcd-0 - healthcheck: - test: - - CMD - - etcdctl - - --endpoints=http://localhost:2388 - - endpoint - - health - interval: 1s - timeout: 5s - retries: 5 - restart: always - - grafana-0: - image: "grafana/grafana-oss:latest" - command: [ ] - expose: - - "3001" - ports: - - "3001:3001" - depends_on: [ ] - volumes: - - "grafana-0:/var/lib/grafana" - - "./grafana.ini:/etc/grafana/grafana.ini" - - "./grafana-risedev-datasource.yml:/etc/grafana/provisioning/datasources/grafana-risedev-datasource.yml" - - "./grafana-risedev-dashboard.yml:/etc/grafana/provisioning/dashboards/grafana-risedev-dashboard.yml" - - "./dashboards:/dashboards" - environment: { } - container_name: grafana-0 - healthcheck: - test: - - CMD-SHELL - - bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/3001; exit $$?;' - interval: 1s - timeout: 5s - retries: 5 - restart: always - - minio-0: - image: "quay.io/minio/minio:latest" - command: - - server - - "--address" - - "0.0.0.0:9301" - - "--console-address" - - "0.0.0.0:9400" - - /data - expose: - - "9301" - - "9400" - ports: - - "9301:9301" - - "9400:9400" - depends_on: [ ] - volumes: - - "minio-0:/data" - entrypoint: " - - /bin/sh -c ' - - set -e - - mkdir -p \"/data/hummock001\" - - /usr/bin/docker-entrypoint.sh \"$$0\" \"$$@\" - - '" - environment: - MINIO_CI_CD: "1" - MINIO_PROMETHEUS_AUTH_TYPE: public - MINIO_PROMETHEUS_URL: "http://prometheus-0:9500" - MINIO_ROOT_PASSWORD: hummockadmin - MINIO_ROOT_USER: hummockadmin - MINIO_DOMAIN: "minio-0" - container_name: minio-0 - healthcheck: - test: - - CMD-SHELL - - bash -c 'printf \"GET / HTTP/1.1\n\n\" > /dev/tcp/127.0.0.1/9301; exit $$?;' - interval: 1s - timeout: 5s - retries: 5 - restart: always - - prometheus-0: - image: "prom/prometheus:latest" - command: - - "--config.file=/etc/prometheus/prometheus.yml" - - "--storage.tsdb.path=/prometheus" - - "--web.console.libraries=/usr/share/prometheus/console_libraries" - - "--web.console.templates=/usr/share/prometheus/consoles" - - "--web.listen-address=0.0.0.0:9500" - - "--storage.tsdb.retention.time=30d" - expose: - - "9500" - ports: - - "9500:9500" - depends_on: [ ] - volumes: - - "prometheus-0:/prometheus" - - "./prometheus.yaml:/etc/prometheus/prometheus.yml" - environment: { } - container_name: prometheus-0 - healthcheck: - test: - - CMD-SHELL - - sh -c 'printf "GET /-/healthy HTTP/1.0\n\n" | nc localhost 9500; exit $$?;' - interval: 1s - timeout: 5s - retries: 5 - restart: always - - message_queue: - image: "redpandadata/redpanda:latest" - command: - - redpanda - - start - - "--smp" - - "1" - - "--reserve-memory" - - 0M - - "--memory" - - 4G - - "--overprovisioned" - - "--node-id" - - "0" - - "--check=false" - - "--kafka-addr" - - "PLAINTEXT://0.0.0.0:29092,OUTSIDE://0.0.0.0:9092" - - "--advertise-kafka-addr" - - "PLAINTEXT://message_queue:29092,OUTSIDE://localhost:9092" - expose: - - "29092" - - "9092" - - "9644" - ports: - - "29092:29092" - - "9092:9092" - - "9644:9644" - - "8081:8081" - depends_on: [ ] - volumes: - - "message_queue:/var/lib/redpanda/data" - environment: { } - container_name: message_queue - healthcheck: - test: curl -f localhost:9644/v1/status/ready - interval: 1s - timeout: 5s - retries: 5 - restart: always -volumes: - etcd-0: - external: false - grafana-0: - external: false - minio-0: - external: false - prometheus-0: - external: false - message_queue: - external: false diff --git a/docker/docker-compose-with-azblob.yml b/docker/docker-compose-with-azblob.yml index 1a0de9d31d169..99889d846dcb1 100644 --- a/docker/docker-compose-with-azblob.yml +++ b/docker/docker-compose-with-azblob.yml @@ -1,6 +1,6 @@ --- x-image: &image - image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.0.1} + image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.1.0-rc.1} services: risingwave-standalone: <<: *image diff --git a/docker/docker-compose-with-gcs.yml b/docker/docker-compose-with-gcs.yml index 29a3d36b3eb65..80466c7cccaba 100644 --- a/docker/docker-compose-with-gcs.yml +++ b/docker/docker-compose-with-gcs.yml @@ -1,6 +1,6 @@ --- x-image: &image - image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.0.1} + image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.1.0-rc.1} services: risingwave-standalone: <<: *image diff --git a/docker/docker-compose-with-hdfs.yml b/docker/docker-compose-with-hdfs.yml index c8616e07a0853..7b69bdf3d29bc 100644 --- a/docker/docker-compose-with-hdfs.yml +++ b/docker/docker-compose-with-hdfs.yml @@ -17,7 +17,7 @@ services: expose: - "6660" - "1260" - ports: [ ] + ports: [] depends_on: - meta-node-0 volumes: @@ -57,7 +57,7 @@ services: expose: - "5688" - "1222" - ports: [ ] + ports: [] depends_on: - meta-node-0 volumes: @@ -80,56 +80,6 @@ services: memory: 26G reservations: memory: 26G - etcd-0: - image: "quay.io/coreos/etcd:v3.5.10" - command: - - /usr/local/bin/etcd - - "--listen-client-urls" - - "http://0.0.0.0:2388" - - "--advertise-client-urls" - - "http://etcd-0:2388" - - "--listen-peer-urls" - - "http://0.0.0.0:2389" - - "--initial-advertise-peer-urls" - - "http://etcd-0:2389" - - "--listen-metrics-urls" - - "http://0.0.0.0:2379" - - "--name" - - risedev-meta - - "--max-txn-ops" - - "999999" - - "--max-request-bytes" - - "10485760" - - "--auto-compaction-mode" - - periodic - - "--auto-compaction-retention" - - 1m - - "--snapshot-count" - - "10000" - - "--data-dir" - - /etcd-data - expose: - - "2388" - ports: - - "2388:2388" - - "2389:2389" - depends_on: [ ] - volumes: - - "etcd-0:/etcd-data" - environment: - - HADOOP_HOME=/opt/hadoop/ - container_name: etcd-0 - healthcheck: - test: - - CMD - - etcdctl - - --endpoints=http://localhost:2388 - - endpoint - - health - interval: 1s - timeout: 5s - retries: 5 - restart: always frontend-node-0: image: "ghcr.io/risingwavelabs/risingwave:RisingWave_1.6.1_HDFS_2.7-x86_64" command: @@ -171,19 +121,19 @@ services: memory: 1G grafana-0: image: "grafana/grafana-oss:latest" - command: [ ] + command: [] expose: - "3001" ports: - "3001:3001" - depends_on: [ ] + depends_on: [] volumes: - "grafana-0:/var/lib/grafana" - "./grafana.ini:/etc/grafana/grafana.ini" - "./grafana-risedev-datasource.yml:/etc/grafana/provisioning/datasources/grafana-risedev-datasource.yml" - "./grafana-risedev-dashboard.yml:/etc/grafana/provisioning/dashboards/grafana-risedev-dashboard.yml" - "./dashboards:/dashboards" - environment: { } + environment: {} container_name: grafana-0 healthcheck: test: @@ -208,9 +158,9 @@ services: - "--prometheus-endpoint" - "http://prometheus-0:9500" - "--backend" - - etcd - - "--etcd-endpoints" - - "etcd-0:2388" + - sql + - "--sql-endpoints" + - "postgres://postgres:@postgres-0:5432/metadata" - "--state-store" - "hummock+hdfs://" - "--data-directory" @@ -231,7 +181,7 @@ services: - ":/opt/hadoop" environment: - HADOOP_HOME=/opt/hadoop/ - - RW_TELEMETRY_TYPE: ${RW_TELEMETRY_TYPE:-"docker-compose"} + - RW_TELEMETRY_TYPE=${RW_TELEMETRY_TYPE:-"docker-compose"} container_name: meta-node-0 healthcheck: test: @@ -247,6 +197,19 @@ services: memory: 2G reservations: memory: 1G + postgres-0: + image: "postgres:15-alpine" + environment: + - POSTGRES_HOST_AUTH_METHOD=trust + - POSTGRES_USER=postgres + - POSTGRES_DB=metadata + - POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C + expose: + - "5432" + ports: + - "8432:5432" + volumes: + - "postgres-0:/var/lib/postgresql/data" prometheus-0: image: "prom/prometheus:latest" command: @@ -260,11 +223,11 @@ services: - "9500" ports: - "9500:9500" - depends_on: [ ] + depends_on: [] volumes: - "prometheus-0:/prometheus" - "./prometheus.yaml:/etc/prometheus/prometheus.yml" - environment: { } + environment: {} container_name: prometheus-0 healthcheck: test: @@ -302,10 +265,10 @@ services: - "9092:9092" - "9644:9644" - "8081:8081" - depends_on: [ ] + depends_on: [] volumes: - "message_queue:/var/lib/redpanda/data" - environment: { } + environment: {} container_name: message_queue healthcheck: test: curl -f localhost:9644/v1/status/ready @@ -314,7 +277,7 @@ services: retries: 5 restart: always volumes: - etcd-0: + postgres-0: external: false grafana-0: external: false diff --git a/docker/docker-compose-with-local-fs.yml b/docker/docker-compose-with-local-fs.yml index a4c0fc6d278c3..68483796ac800 100644 --- a/docker/docker-compose-with-local-fs.yml +++ b/docker/docker-compose-with-local-fs.yml @@ -1,6 +1,6 @@ --- x-image: &image - image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.0.1} + image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.1.0-rc.1} services: risingwave-standalone: <<: *image diff --git a/docker/docker-compose-with-obs.yml b/docker/docker-compose-with-obs.yml index ea04d95e7e76a..f4bf8dc0e74c0 100644 --- a/docker/docker-compose-with-obs.yml +++ b/docker/docker-compose-with-obs.yml @@ -1,6 +1,6 @@ --- x-image: &image - image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.0.1} + image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.1.0-rc.1} services: risingwave-standalone: <<: *image diff --git a/docker/docker-compose-with-oss.yml b/docker/docker-compose-with-oss.yml index 9e77aa6be8f9b..7d9563473182a 100644 --- a/docker/docker-compose-with-oss.yml +++ b/docker/docker-compose-with-oss.yml @@ -1,6 +1,6 @@ --- x-image: &image - image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.0.1} + image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.1.0-rc.1} services: risingwave-standalone: <<: *image diff --git a/docker/docker-compose-with-s3.yml b/docker/docker-compose-with-s3.yml index 14f126bc39c10..d7dc75aa556a6 100644 --- a/docker/docker-compose-with-s3.yml +++ b/docker/docker-compose-with-s3.yml @@ -1,6 +1,6 @@ --- x-image: &image - image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.0.1} + image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.1.0-rc.1} services: risingwave-standalone: <<: *image diff --git a/docker/docker-compose-with-sqlite.yml b/docker/docker-compose-with-sqlite.yml index d0bb26c42fe32..d4081b592c2ab 100644 --- a/docker/docker-compose-with-sqlite.yml +++ b/docker/docker-compose-with-sqlite.yml @@ -1,6 +1,6 @@ --- x-image: &image - image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.0.1} + image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.1.0-rc.1} services: risingwave-standalone: <<: *image diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index dd594f29719d0..e315878c98b77 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -1,6 +1,6 @@ --- x-image: &image - image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.0.1} + image: ${RW_IMAGE:-risingwavelabs/risingwave:v2.1.0-rc.1} services: risingwave-standalone: <<: *image diff --git a/docker/prometheus.yaml b/docker/prometheus.yaml index 23ca2884a96e4..7828eadca47f1 100644 --- a/docker/prometheus.yaml +++ b/docker/prometheus.yaml @@ -25,10 +25,6 @@ scrape_configs: static_configs: - targets: ["compactor-0:1260"] - - job_name: etcd - static_configs: - - targets: ["etcd-0:2379"] - - job_name: frontend static_configs: - targets: ["frontend-node-0:2222"] diff --git a/e2e_test/ddl/alter_set_schema.slt b/e2e_test/ddl/alter_set_schema.slt index 74dcc5a77e64a..db0f479c85c05 100644 --- a/e2e_test/ddl/alter_set_schema.slt +++ b/e2e_test/ddl/alter_set_schema.slt @@ -94,23 +94,6 @@ WHERE nspname = 'test_schema'; ---- test_subscription test_schema -statement ok -CREATE CONNECTION test_conn WITH (type = 'privatelink', provider = 'mock'); - -statement ok -ALTER CONNECTION test_conn SET SCHEMA test_schema; - -query TT -SELECT name AS connname, nspname AS schemaname -FROM rw_connections -JOIN pg_namespace ON pg_namespace.oid = rw_connections.schema_id -WHERE nspname = 'test_schema'; ----- -test_conn test_schema - -statement ok -DROP CONNECTION test_schema.test_conn; - statement ok DROP SINK test_schema.test_sink; diff --git a/e2e_test/ddl/connection.slt b/e2e_test/ddl/connection.slt deleted file mode 100644 index 435395e9d249a..0000000000000 --- a/e2e_test/ddl/connection.slt +++ /dev/null @@ -1,23 +0,0 @@ -# Create a connection. -statement ok -CREATE CONNECTION conn0 WITH (type = 'privatelink', provider = 'mock'); - -# Create another user with duplicate name. -statement error -CREATE CONNECTION conn0 WITH (type = 'privatelink', provider = 'mock'); - -# Create if not exists. -statement ok -CREATE CONNECTION IF NOT EXISTS conn0 WITH (type = 'privatelink', provider = 'mock'); - -# Test quoting. -statement ok -CREATE CONNECTION "conn1" WITH (type = 'privatelink', provider = 'mock'); - -# Drop connections. -statement ok -DROP CONNECTION conn0; - -# Drop connections. -statement ok -DROP CONNECTION conn1; diff --git a/e2e_test/iceberg/start_spark_connect_server.sh b/e2e_test/iceberg/start_spark_connect_server.sh index f0f3f19a1fab7..8f0c2640a1b59 100755 --- a/e2e_test/iceberg/start_spark_connect_server.sh +++ b/e2e_test/iceberg/start_spark_connect_server.sh @@ -3,7 +3,7 @@ set -ex ICEBERG_VERSION=1.4.3 -SPARK_VERSION=3.4.3 +SPARK_VERSION=3.4.4 PACKAGES="org.apache.iceberg:iceberg-spark-runtime-3.4_2.12:$ICEBERG_VERSION,org.apache.hadoop:hadoop-aws:3.3.2" PACKAGES="$PACKAGES,org.apache.spark:spark-connect_2.12:$SPARK_VERSION" diff --git a/e2e_test/sink/kafka/create_sink.slt b/e2e_test/sink/kafka/create_sink.slt index 338465c471af9..7f589e4a4b231 100644 --- a/e2e_test/sink/kafka/create_sink.slt +++ b/e2e_test/sink/kafka/create_sink.slt @@ -31,48 +31,6 @@ create sink sink_non_exist_broker from t_kafka with ( type = 'append-only', ); -# Test create sink with connection -# Create a mock connection -statement ok -create connection mock with ( - type = 'privatelink', - provider = 'mock', -); - -# Refer to a non-existant connection -statement error -create sink si_kafka_append_only_conn from t_kafka with ( - connector = 'kafka', - properties.bootstrap.server = 'message_queue:29092', - topic = 'test-rw-sink-append-only', - type = 'append-only', - force_append_only = 'true', - connection.name = 'nonexist', -); - -# Create sink with connection -statement ok -create sink si_kafka_append_only_conn from t_kafka with ( - connector = 'kafka', - properties.bootstrap.server = 'message_queue:29092', - topic = 'test-rw-sink-append-only', - type = 'append-only', - force_append_only = 'true', - connection.name = 'mock', -); - -# Try to drop connection mock, which is in use -statement error -drop connection mock; - -# Drop sink -statement ok -drop sink si_kafka_append_only_conn; - -# Drop connection -statement ok -drop connection mock; - # Connection test clean-up finished statement error sink cannot be append-only diff --git a/e2e_test/source_inline/cdc/mysql/mysql_create_drop.slt.serial b/e2e_test/source_inline/cdc/mysql/mysql_create_drop.slt.serial index 10854d97b6440..fde008079dc6e 100644 --- a/e2e_test/source_inline/cdc/mysql/mysql_create_drop.slt.serial +++ b/e2e_test/source_inline/cdc/mysql/mysql_create_drop.slt.serial @@ -45,7 +45,6 @@ create source s with ( username = 'shared-cdc', password = 'abcdef', database.name = 'testdb1', - server.id = '114514' ); sleep 2s diff --git a/e2e_test/source_inline/tvf/mysql_query.slt b/e2e_test/source_inline/tvf/mysql_query.slt new file mode 100644 index 0000000000000..56acf0598244c --- /dev/null +++ b/e2e_test/source_inline/tvf/mysql_query.slt @@ -0,0 +1,73 @@ +control substitution on + +system ok +mysql -e "DROP DATABASE IF EXISTS tvf; CREATE DATABASE tvf;" + +system ok +mysql -e " +USE tvf; +CREATE TABLE test ( + id bigint primary key, + v0 bit, + v1 bool, + v2 tinyint(1), + v3 tinyint(2), + v4 smallint, + v5 mediumint, + v6 integer, + v7 bigint, + v8 float, + v9 double, + v10 numeric(4, 2), + v11 decimal(4, 2), + v12 char(255), + v13 varchar(255), + v14 bit(10), + v15 tinyblob, + v16 blob, + v17 mediumblob, + v18 longblob, + v19 date, + v20 time, + v21 timestamp, + v22 json, + v23 int +); +INSERT INTO test SELECT + 1 as id, + true as v0, + true as v1, + 2 as v2, + 3 as v3, + 4 as v4, + 5 as v5, + 6 as v6, + 7 as v7, + 1.08 as v8, + 1.09 as v9, + 1.10 as v10, + 1.11 as v11, + 'char' as v12, + 'varchar' as v13, + b'1010' as v14, + x'16' as v15, + x'17' as v16, + x'18' as v17, + x'19' as v18, + '2021-01-01' as v19, + '12:34:56' as v20, + '2021-01-01 12:34:56' as v21, + JSON_OBJECT('key1', 1, 'key2', 'abc') as v22, + null as v23; +" + +query +select * from mysql_query('$MYSQL_HOST', '$MYSQL_TCP_PORT', '$RISEDEV_MYSQL_USER', '$MYSQL_PWD', 'tvf', 'select * from test;'); +---- +1 t 1 2 3 4 5 6 7 1.08 1.09 1.10 1.11 char varchar \x000a \x16 \x17 \x18 \x19 2021-01-01 12:34:56 2021-01-01 12:34:56+00:00 {"key1": 1, "key2": "abc"} NULL + +system ok +mysql -e " +USE tvf; +DROP DATABASE tvf; +" \ No newline at end of file diff --git a/e2e_test/source_legacy/basic/ddl.slt b/e2e_test/source_legacy/basic/ddl.slt index 8e63971fb5b82..e8b9e10b249fa 100644 --- a/e2e_test/source_legacy/basic/ddl.slt +++ b/e2e_test/source_legacy/basic/ddl.slt @@ -198,35 +198,6 @@ s statement ok drop table s -# Test create source with connection -statement ok -CREATE CONNECTION mock WITH (type = 'privatelink', provider = 'mock'); - -# Reference to non-existant connection -statement error -create source s ( - column1 varchar -) with ( - connector = 'kafka', - topic = 'kafka_1_partition_topic', - properties.bootstrap.server = 'message_queue:29092', - connection.name = 'nonexist', -) FORMAT PLAIN ENCODE JSON; - -statement ok -CREATE TABLE mytable ( - column1 varchar -) with ( - connector = 'kafka', - topic = 'kafka_1_partition_topic', - properties.bootstrap.server = 'message_queue:29092', - connection.name = 'mock' -) FORMAT PLAIN ENCODE JSON; - -statement ok -DROP TABLE mytable; - - # `DEBEZIUM_MONGO_JSON` requires the source table have `_id` and `payload` columns. statement error create source s ( @@ -236,7 +207,6 @@ create source s ( connector = 'kafka', topic = 'kafka_1_partition_topic', properties.bootstrap.server = 'message_queue:29092', - connection.name = 'nonexist', ) FORMAT DEBEZIUM_MONGO ENCODE JSON; # `DEBEZIUM_MONGO_JSON` requires the `_id` column is primary key. @@ -248,7 +218,6 @@ create source s ( connector = 'kafka', topic = 'kafka_1_partition_topic', properties.bootstrap.server = 'message_queue:29092', - connection.name = 'nonexist', ) FORMAT DEBEZIUM_MONGO ENCODE JSON; # `DEBEZIUM_MONGO_JSON` requires the `payload` column is jsonb type. @@ -260,25 +229,4 @@ create source s ( connector = 'kafka', topic = 'kafka_1_partition_topic', properties.bootstrap.server = 'message_queue:29092', - connection.name = 'nonexist', ) FORMAT DEBEZIUM_MONGO ENCODE JSON; - -statement ok -create source s ( - column1 varchar -) with ( - connector = 'kafka', - topic = 'kafka_1_partition_topic', - properties.bootstrap.server = 'message_queue:29092', - connection.name = 'mock', -) FORMAT PLAIN ENCODE JSON; - -# Drop a connection in use -statement error -drop connection mock; - -statement ok -drop source s; - -statement ok -drop connection mock; diff --git a/e2e_test/source_legacy/basic/old_row_format_syntax/ddl.slt b/e2e_test/source_legacy/basic/old_row_format_syntax/ddl.slt index 0fe67a8504b5d..79c3553c38c70 100644 --- a/e2e_test/source_legacy/basic/old_row_format_syntax/ddl.slt +++ b/e2e_test/source_legacy/basic/old_row_format_syntax/ddl.slt @@ -152,35 +152,6 @@ s statement ok drop table s -# Test create source with connection -statement ok -CREATE CONNECTION mock WITH (type = 'privatelink', provider = 'mock'); - -# Reference to non-existant connection -statement error -create source s ( - column1 varchar -) with ( - connector = 'kafka', - topic = 'kafka_1_partition_topic', - properties.bootstrap.server = 'message_queue:29092', - connection.name = 'nonexist', -) ROW FORMAT JSON; - -statement ok -CREATE TABLE mytable ( - column1 varchar -) with ( - connector = 'kafka', - topic = 'kafka_1_partition_topic', - properties.bootstrap.server = 'message_queue:29092', - connection.name = 'mock' -) ROW FORMAT JSON; - -statement ok -DROP TABLE mytable; - - # `DEBEZIUM_MONGO_JSON` requires the source table have `_id` and `payload` columns. statement error create source s ( @@ -190,7 +161,6 @@ create source s ( connector = 'kafka', topic = 'kafka_1_partition_topic', properties.bootstrap.server = 'message_queue:29092', - connection.name = 'nonexist', ) ROW FORMAT DEBEZIUM_MONGO_JSON; # `DEBEZIUM_MONGO_JSON` requires the `_id` column is primary key. @@ -202,7 +172,6 @@ create source s ( connector = 'kafka', topic = 'kafka_1_partition_topic', properties.bootstrap.server = 'message_queue:29092', - connection.name = 'nonexist', ) ROW FORMAT DEBEZIUM_MONGO_JSON; # `DEBEZIUM_MONGO_JSON` requires the `payload` column is jsonb type. @@ -214,7 +183,6 @@ create source s ( connector = 'kafka', topic = 'kafka_1_partition_topic', properties.bootstrap.server = 'message_queue:29092', - connection.name = 'nonexist', ) ROW FORMAT DEBEZIUM_MONGO_JSON; statement ok @@ -224,15 +192,7 @@ create source s ( connector = 'kafka', topic = 'kafka_1_partition_topic', properties.bootstrap.server = 'message_queue:29092', - connection.name = 'mock', ) ROW FORMAT JSON; -# Drop a connection in use -statement error -drop connection mock; - statement ok drop source s; - -statement ok -drop connection mock; diff --git a/e2e_test/source_legacy/cdc_inline/auto_schema_map_mysql.slt b/e2e_test/source_legacy/cdc_inline/auto_schema_map_mysql.slt index 08afa5d1988a7..e1b5016774a0c 100644 --- a/e2e_test/source_legacy/cdc_inline/auto_schema_map_mysql.slt +++ b/e2e_test/source_legacy/cdc_inline/auto_schema_map_mysql.slt @@ -174,8 +174,8 @@ SELECT c_binary_255 FROM rw_mysql_types_test order by c_boolean; ---- -0 NULL NULL NULL -8388608 -2147483647 9223372036854775806 -10 -10000 -10000 c d \x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 -1 NULL -128 -32767 -8388608 -2147483647 -9223372036854775807 -10 -10000 -10000 a b \x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +0 f NULL NULL -8388608 -2147483647 9223372036854775806 -10 -10000 -10000 c d \x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +1 t -128 -32767 -8388608 -2147483647 -9223372036854775807 -10 -10000 -10000 a b \x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 query TTTTTTTT SELECT diff --git a/lints/Cargo.lock b/lints/Cargo.lock index aa1e1e4ef9b32..cdfe23e743f56 100644 --- a/lints/Cargo.lock +++ b/lints/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "aho-corasick" @@ -89,9 +89,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "block-buffer" @@ -162,23 +162,22 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clippy_config" -version = "0.1.81" -source = "git+https://github.com/risingwavelabs/clippy?rev=5135d0218365e85f3371405b604a7fb1459eb256#5135d0218365e85f3371405b604a7fb1459eb256" +version = "0.1.83" +source = "git+https://github.com/risingwavelabs/clippy?rev=755ee9dd292b354fed40dcb9e4cd92d1d604207b#755ee9dd292b354fed40dcb9e4cd92d1d604207b" dependencies = [ - "rustc-semver", + "itertools", "serde", "toml 0.7.8", ] [[package]] name = "clippy_utils" -version = "0.1.81" -source = "git+https://github.com/risingwavelabs/clippy?rev=5135d0218365e85f3371405b604a7fb1459eb256#5135d0218365e85f3371405b604a7fb1459eb256" +version = "0.1.83" +source = "git+https://github.com/risingwavelabs/clippy?rev=755ee9dd292b354fed40dcb9e4cd92d1d604207b#755ee9dd292b354fed40dcb9e4cd92d1d604207b" dependencies = [ "arrayvec", "clippy_config", "itertools", - "rustc-semver", "rustc_apfloat", ] @@ -190,9 +189,9 @@ checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" [[package]] name = "compiletest_rs" -version = "0.10.2" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7225fee1bcf9247bb3a1b1a2d7ecfe2f7a990e549a09d766a257a4ae30dac0d6" +checksum = "71fcc3c0c91b59c137b3cf8073cbc2f72a49b3d5505660ec88f94da3ed4bb1de" dependencies = [ "diff", "filetime", @@ -207,7 +206,7 @@ dependencies = [ "serde_derive", "serde_json", "tester", - "winapi", + "windows-sys 0.59.0", ] [[package]] @@ -289,9 +288,9 @@ dependencies = [ [[package]] name = "dylint" -version = "3.1.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6720f18fdd779ad137ab6bc448c11042b1b32eea625ea618c8b953660bba56" +checksum = "22752a1003c06a3f04f9ea66e4899f97132a1888276fee2d5fbe5f6820eee274" dependencies = [ "ansi_term", "anyhow", @@ -310,13 +309,13 @@ dependencies = [ [[package]] name = "dylint_internal" -version = "3.1.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395dade88bc1a3103ef91b442498943d0072df869736a7c0107a5753c05d006f" +checksum = "f02176c2fa705973bfce833c3f12a69196959086c776ec967a5aa23b5523d4b6" dependencies = [ "ansi_term", "anyhow", - "bitflags 2.5.0", + "bitflags 2.6.0", "cargo_metadata", "git2", "home", @@ -333,9 +332,9 @@ dependencies = [ [[package]] name = "dylint_linting" -version = "3.1.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d070f934310ccf8f04a940affcce0cd196c1068b6d19c5ae6d975f926968b66" +checksum = "0338d94b92ffeece90a8ba405c6d051f18b4483558728537a333daa3bb422616" dependencies = [ "cargo_metadata", "dylint_internal", @@ -348,9 +347,9 @@ dependencies = [ [[package]] name = "dylint_testing" -version = "3.1.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc8a64c781bde7f1e445dc1205754994012e15a2a6e659ab7166b380c1c22ab" +checksum = "8e1581603a3fc49b8ede8bed50f5e2d719ce607572f5e5d5d06f200f3285448a" dependencies = [ "anyhow", "cargo_metadata", @@ -411,9 +410,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "filetime" @@ -472,7 +471,7 @@ version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbf97ba92db08df386e10c8ede66a2a0369bd277090afd8710e19e38de9ec0cd" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "libc", "libgit2-sys", "log", @@ -596,9 +595,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.154" +version = "0.2.161" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" +checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" [[package]] name = "libgit2-sys" @@ -620,7 +619,7 @@ version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "libc", "redox_syscall 0.4.1", ] @@ -666,9 +665,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "log" @@ -684,11 +683,11 @@ checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "miow" -version = "0.3.7" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" +checksum = "359f76430b20a79f9e20e115b3428614e654f04fab314482fc0fda0ebd3c6044" dependencies = [ - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -703,9 +702,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "openssl-probe" @@ -757,18 +756,18 @@ checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "proc-macro2" -version = "1.0.79" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" +checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" dependencies = [ "unicode-ident", ] [[package]] name = "quote" -version = "1.0.33" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] @@ -804,9 +803,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.2" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", @@ -816,9 +815,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", @@ -827,15 +826,15 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "rust-embed" -version = "8.4.0" +version = "8.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19549741604902eb99a7ed0ee177a0663ee1eda51a29f71401f166e47e77806a" +checksum = "fa66af4a4fdd5e7ebc276f115e895611a34739a9c1c01028383d612d550953c0" dependencies = [ "rust-embed-impl", "rust-embed-utils", @@ -866,12 +865,6 @@ dependencies = [ "walkdir", ] -[[package]] -name = "rustc-semver" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be1bdc7edf596692617627bbfeaba522131b18e06ca4df2b6b689e3c5d5ce84" - [[package]] name = "rustc_apfloat" version = "0.2.1+llvm-462a31f5a5ab" @@ -884,23 +877,23 @@ dependencies = [ [[package]] name = "rustfix" -version = "0.6.1" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd2853d9e26988467753bd9912c3a126f642d05d229a4b53f5752ee36c56481" +checksum = "8fb2b066405a6d48a1b39c0022270503e352ae84da0c24e1d5f8ffc38e97a325" dependencies = [ - "anyhow", - "log", "serde", "serde_json", + "thiserror", + "tracing", ] [[package]] name = "rustix" -version = "0.38.34" +version = "0.38.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "aa260229e6538e52293eeb577aabd09945a09d6d9cc0fc550ed7529056c2e32a" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", @@ -939,18 +932,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.193" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.193" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" dependencies = [ "proc-macro2", "quote", @@ -959,11 +952,12 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.108" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] @@ -996,9 +990,9 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "syn" -version = "2.0.39" +version = "2.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" +checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" dependencies = [ "proc-macro2", "quote", @@ -1007,14 +1001,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.10.1" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", "fastrand", + "once_cell", "rustix", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -1043,9 +1038,9 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" dependencies = [ "thiserror-impl", ] @@ -1074,9 +1069,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ "proc-macro2", "quote", @@ -1312,7 +1307,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -1332,17 +1336,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -1353,9 +1358,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -1365,9 +1370,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -1377,9 +1382,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -1389,9 +1400,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -1401,9 +1412,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -1413,9 +1424,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -1425,9 +1436,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" diff --git a/lints/Cargo.toml b/lints/Cargo.toml index e0b8fe5d96664..40c90cda3d86d 100644 --- a/lints/Cargo.toml +++ b/lints/Cargo.toml @@ -14,12 +14,12 @@ path = "ui/format_error.rs" # See `README.md` before bumping the version. # Remember to update the version in `ci/Dockerfile` as well. [dependencies] -clippy_utils = { git = "https://github.com/risingwavelabs/clippy", rev = "61e1d2fd7062e46ccf1237707ee6da5aac018f70" } -dylint_linting = "3.1.0" +clippy_utils = { git = "https://github.com/risingwavelabs/clippy", rev = "755ee9dd292b354fed40dcb9e4cd92d1d604207b" } +dylint_linting = "3.2.1" itertools = "0.12" [dev-dependencies] -dylint_testing = "3.1.0" +dylint_testing = "3.2.1" # UI test dependencies anyhow = "1" diff --git a/lints/rust-toolchain b/lints/rust-toolchain index 31dbc57d04b2b..c4f89bf3f7be5 100644 --- a/lints/rust-toolchain +++ b/lints/rust-toolchain @@ -1,5 +1,5 @@ # See `README.md` before bumping the version. [toolchain] -channel = "nightly-2024-07-19" +channel = "nightly-2024-10-11" components = ["llvm-tools-preview", "rustc-dev"] diff --git a/lints/src/format_error.rs b/lints/src/format_error.rs index 402adc4aa5af0..b4304aad82cdc 100644 --- a/lints/src/format_error.rs +++ b/lints/src/format_error.rs @@ -104,7 +104,7 @@ impl<'tcx> LateLintPass<'tcx> for FormatError { if let FormatArgsPiece::Placeholder(placeholder) = piece && let Ok(index) = placeholder.argument.index && let Some(arg) = format_args.arguments.all_args().get(index) - && let Ok(arg_expr) = find_format_arg_expr(expr, arg) + && let Some(arg_expr) = find_format_arg_expr(expr, arg) { if in_tracing_event_macro { check_fmt_arg_in_tracing_event(cx, arg_expr); diff --git a/proto/batch_plan.proto b/proto/batch_plan.proto index f881f6546fae5..b46230b2438d6 100644 --- a/proto/batch_plan.proto +++ b/proto/batch_plan.proto @@ -104,6 +104,17 @@ message PostgresQueryNode { string query = 7; } +// NOTE(kwannoel): This will only be used in batch mode. We can change the definition as needed. +message MySqlQueryNode { + repeated plan_common.ColumnDesc columns = 1; + string hostname = 2; + string port = 3; + string username = 4; + string password = 5; + string database = 6; + string query = 7; +} + message ProjectNode { repeated expr.ExprNode select_list = 1; } @@ -386,6 +397,7 @@ message PlanNode { FileScanNode file_scan = 38; IcebergScanNode iceberg_scan = 39; PostgresQueryNode postgres_query = 40; + MySqlQueryNode mysql_query = 41; // The following nodes are used for testing. bool block_executor = 100; bool busy_loop_executor = 101; diff --git a/proto/catalog.proto b/proto/catalog.proto index 0c67a92f23cdd..169347c199eb9 100644 --- a/proto/catalog.proto +++ b/proto/catalog.proto @@ -243,7 +243,7 @@ message Connection { uint32 database_id = 3; string name = 4; oneof info { - PrivateLinkService private_link_service = 5; + PrivateLinkService private_link_service = 5 [deprecated = true]; } uint32 owner = 6; } diff --git a/proto/expr.proto b/proto/expr.proto index 43e2002933a47..5330843512849 100644 --- a/proto/expr.proto +++ b/proto/expr.proto @@ -365,6 +365,8 @@ message TableFunction { FILE_SCAN = 19; // postgres query POSTGRES_QUERY = 20; + // mysql query + MYSQL_QUERY = 21; // User defined table function USER_DEFINED = 100; } diff --git a/risedev.yml b/risedev.yml index 0ec9b8b09af79..8e3668dcb49c2 100644 --- a/risedev.yml +++ b/risedev.yml @@ -959,6 +959,25 @@ profile: address: schemaregistry port: 8082 + local-inline-source-test: + config-path: src/config/ci-recovery.toml + steps: + - use: minio + - use: sqlite + - use: meta-node + meta-backend: sqlite + - use: compute-node + enable-tiered-cache: true + - use: frontend + - use: compactor + - use: pubsub + persist-data: true + - use: kafka + persist-data: true + - use: schema-registry + - use: mysql + - use: postgres + ci-inline-source-test: config-path: src/config/ci-recovery.toml steps: diff --git a/src/batch/Cargo.toml b/src/batch/Cargo.toml index 46c4aa7b9de6e..ee6f757e17376 100644 --- a/src/batch/Cargo.toml +++ b/src/batch/Cargo.toml @@ -19,6 +19,7 @@ assert_matches = "1" async-recursion = "1" async-trait = "0.1" bytes = "1" +chrono = "0.4" either = "1" foyer = { workspace = true } futures = { version = "0.3", default-features = false, features = ["alloc"] } @@ -29,6 +30,7 @@ hytra = "0.1.2" iceberg = { workspace = true } itertools = { workspace = true } memcomparable = "0.2" +mysql_async = { workspace = true } opendal = { workspace = true } parking_lot = { workspace = true } parquet = { workspace = true } @@ -45,6 +47,7 @@ risingwave_hummock_sdk = { workspace = true } risingwave_pb = { workspace = true } risingwave_rpc_client = { workspace = true } risingwave_storage = { workspace = true } +rust_decimal = "1" rw_futures_util = { workspace = true } scopeguard = "1" serde_json = "1" diff --git a/src/batch/src/error.rs b/src/batch/src/error.rs index e3e53ee449fac..b0723277a6f5e 100644 --- a/src/batch/src/error.rs +++ b/src/batch/src/error.rs @@ -17,6 +17,8 @@ use std::sync::Arc; pub use anyhow::anyhow; +use iceberg::Error as IcebergError; +use mysql_async::Error as MySqlError; use parquet::errors::ParquetError; use risingwave_common::array::ArrayError; use risingwave_common::error::{def_anyhow_newtype, def_anyhow_variant, BoxedError}; @@ -29,7 +31,7 @@ use risingwave_rpc_client::error::{RpcError, ToTonicStatus}; use risingwave_storage::error::StorageError; use thiserror::Error; use thiserror_ext::Construct; -use tokio_postgres; +use tokio_postgres::Error as PostgresError; use tonic::Status; use crate::worker_manager::worker_node_manager::FragmentId; @@ -192,7 +194,8 @@ def_anyhow_variant! { pub BatchExternalSystemError, BatchError ExternalSystemError, - tokio_postgres::Error => "Postgres error", - iceberg::Error => "Iceberg error", + PostgresError => "Postgres error", + IcebergError => "Iceberg error", ParquetError => "Parquet error", + MySqlError => "MySQL error", } diff --git a/src/batch/src/executor/join/chunked_data.rs b/src/batch/src/executor/join/chunked_data.rs index 158f9cba9f289..be947a885cb58 100644 --- a/src/batch/src/executor/join/chunked_data.rs +++ b/src/batch/src/executor/join/chunked_data.rs @@ -37,7 +37,7 @@ pub(super) struct AllRowIdIter<'a> { chunk_offsets: &'a [usize], } -impl<'a> Iterator for AllRowIdIter<'a> { +impl Iterator for AllRowIdIter<'_> { type Item = RowId; fn next(&mut self) -> Option { diff --git a/src/batch/src/executor/join/hash_join.rs b/src/batch/src/executor/join/hash_join.rs index 863e53035626a..834320b60d2b0 100644 --- a/src/batch/src/executor/join/hash_join.rs +++ b/src/batch/src/executor/join/hash_join.rs @@ -158,7 +158,7 @@ impl ChunkedData> { } } -impl<'a> Iterator for RowIdIter<'a> { +impl Iterator for RowIdIter<'_> { type Item = RowId; fn next(&mut self) -> Option { diff --git a/src/batch/src/executor/mod.rs b/src/batch/src/executor/mod.rs index c3bd373198df7..ce84065d9d41c 100644 --- a/src/batch/src/executor/mod.rs +++ b/src/batch/src/executor/mod.rs @@ -29,6 +29,7 @@ mod managed; mod max_one_row; mod merge_sort; mod merge_sort_exchange; +mod mysql_query; mod order_by; mod postgres_query; mod project; @@ -65,6 +66,7 @@ pub use managed::*; pub use max_one_row::*; pub use merge_sort::*; pub use merge_sort_exchange::*; +pub use mysql_query::*; pub use order_by::*; pub use postgres_query::*; pub use project::*; @@ -247,6 +249,7 @@ impl<'a, C: BatchTaskContext> ExecutorBuilder<'a, C> { NodeBody::FileScan => FileScanExecutorBuilder, NodeBody::IcebergScan => IcebergScanExecutorBuilder, NodeBody::PostgresQuery => PostgresQueryExecutorBuilder, + NodeBody::MysqlQuery => MySqlQueryExecutorBuilder, // Follow NodeBody only used for test NodeBody::BlockExecutor => BlockExecutorBuilder, NodeBody::BusyLoopExecutor => BusyLoopExecutorBuilder, diff --git a/src/batch/src/executor/mysql_query.rs b/src/batch/src/executor/mysql_query.rs new file mode 100644 index 0000000000000..721c9c5e55bf1 --- /dev/null +++ b/src/batch/src/executor/mysql_query.rs @@ -0,0 +1,170 @@ +// Copyright 2024 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::Context; +use futures_async_stream::try_stream; +use futures_util::stream::StreamExt; +use mysql_async; +use mysql_async::prelude::*; +use risingwave_common::catalog::{Field, Schema}; +use risingwave_common::row::OwnedRow; +use risingwave_common::util::chunk_coalesce::DataChunkBuilder; +use risingwave_connector::parser::mysql_datum_to_rw_datum; +use risingwave_pb::batch_plan::plan_node::NodeBody; + +use crate::error::{BatchError, BatchExternalSystemError}; +use crate::executor::{BoxedExecutor, BoxedExecutorBuilder, DataChunk, Executor, ExecutorBuilder}; +use crate::task::BatchTaskContext; + +/// `MySqlQuery` executor. Runs a query against a `MySql` database. +pub struct MySqlQueryExecutor { + schema: Schema, + host: String, + port: String, + username: String, + password: String, + database: String, + query: String, + identity: String, + chunk_size: usize, +} + +impl Executor for MySqlQueryExecutor { + fn schema(&self) -> &risingwave_common::catalog::Schema { + &self.schema + } + + fn identity(&self) -> &str { + &self.identity + } + + fn execute(self: Box) -> super::BoxedDataChunkStream { + self.do_execute().boxed() + } +} +pub fn mysql_row_to_owned_row( + mut row: mysql_async::Row, + schema: &Schema, +) -> Result { + let mut datums = vec![]; + for i in 0..schema.fields.len() { + let rw_field = &schema.fields[i]; + let name = rw_field.name.as_str(); + let datum = match mysql_datum_to_rw_datum(&mut row, i, name, &rw_field.data_type) { + Ok(val) => val, + Err(e) => { + let e = BatchExternalSystemError(e); + return Err(e.into()); + } + }; + datums.push(datum); + } + Ok(OwnedRow::new(datums)) +} + +impl MySqlQueryExecutor { + pub fn new( + schema: Schema, + host: String, + port: String, + username: String, + password: String, + database: String, + query: String, + identity: String, + chunk_size: usize, + ) -> Self { + Self { + schema, + host, + port, + username, + password, + database, + query, + identity, + chunk_size, + } + } + + #[try_stream(ok = DataChunk, error = BatchError)] + async fn do_execute(self: Box) { + tracing::debug!("mysql_query_executor: started"); + let database_opts: mysql_async::Opts = mysql_async::OptsBuilder::default() + .ip_or_hostname(self.host) + .tcp_port(self.port.parse::().unwrap()) // FIXME + .user(Some(self.username)) + .pass(Some(self.password)) + .db_name(Some(self.database)) + .into(); + + let pool = mysql_async::Pool::new(database_opts); + let mut conn = pool + .get_conn() + .await + .context("failed to connect to mysql in batch executor")?; + + let query = self.query; + let mut query_iter = conn + .query_iter(query) + .await + .context("failed to execute my_sql_query in batch executor")?; + let Some(row_stream) = query_iter.stream::().await? else { + bail!("failed to get row stream from mysql query") + }; + + let mut builder = DataChunkBuilder::new(self.schema.data_types(), self.chunk_size); + tracing::debug!("mysql_query_executor: query executed, start deserializing rows"); + // deserialize the rows + #[for_await] + for row in row_stream { + let row = row?; + let owned_row = mysql_row_to_owned_row(row, &self.schema)?; + if let Some(chunk) = builder.append_one_row(owned_row) { + yield chunk; + } + } + if let Some(chunk) = builder.consume_all() { + yield chunk; + } + return Ok(()); + } +} + +pub struct MySqlQueryExecutorBuilder {} + +#[async_trait::async_trait] +impl BoxedExecutorBuilder for MySqlQueryExecutorBuilder { + async fn new_boxed_executor( + source: &ExecutorBuilder<'_, C>, + _inputs: Vec, + ) -> crate::error::Result { + let mysql_query_node = try_match_expand!( + source.plan_node().get_node_body().unwrap(), + NodeBody::MysqlQuery + )?; + + Ok(Box::new(MySqlQueryExecutor::new( + Schema::from_iter(mysql_query_node.columns.iter().map(Field::from)), + mysql_query_node.hostname.clone(), + mysql_query_node.port.clone(), + mysql_query_node.username.clone(), + mysql_query_node.password.clone(), + mysql_query_node.database.clone(), + mysql_query_node.query.clone(), + source.plan_node().get_identity().clone(), + source.context.get_config().developer.chunk_size, + ))) + } +} diff --git a/src/batch/src/executor/postgres_query.rs b/src/batch/src/executor/postgres_query.rs index 2b6524a2e45e7..4ae1fcba65da9 100644 --- a/src/batch/src/executor/postgres_query.rs +++ b/src/batch/src/executor/postgres_query.rs @@ -37,6 +37,7 @@ pub struct PostgresQueryExecutor { database: String, query: String, identity: String, + chunk_size: usize, } impl Executor for PostgresQueryExecutor { @@ -115,6 +116,7 @@ impl PostgresQueryExecutor { database: String, query: String, identity: String, + chunk_size: usize, ) -> Self { Self { schema, @@ -125,6 +127,7 @@ impl PostgresQueryExecutor { database, query, identity, + chunk_size, } } @@ -151,7 +154,7 @@ impl PostgresQueryExecutor { .query_raw(&self.query, params) .await .context("postgres_query received error from remote server")?; - let mut builder = DataChunkBuilder::new(self.schema.data_types(), 1024); + let mut builder = DataChunkBuilder::new(self.schema.data_types(), self.chunk_size); tracing::debug!("postgres_query_executor: query executed, start deserializing rows"); // deserialize the rows #[for_await] @@ -191,6 +194,7 @@ impl BoxedExecutorBuilder for PostgresQueryExecutorBuilder { postgres_query_node.database.clone(), postgres_query_node.query.clone(), source.plan_node().get_identity().clone(), + source.context.get_config().developer.chunk_size, ))) } } diff --git a/src/batch/src/lib.rs b/src/batch/src/lib.rs index 9b88c3be9cd68..38eb0efd80553 100644 --- a/src/batch/src/lib.rs +++ b/src/batch/src/lib.rs @@ -20,7 +20,6 @@ #![feature(coroutines)] #![feature(proc_macro_hygiene, stmt_expr_attributes)] #![feature(iterator_try_collect)] -#![feature(is_sorted)] #![recursion_limit = "256"] #![feature(let_chains)] #![feature(int_roundings)] diff --git a/src/bench/s3_bench/main.rs b/src/bench/s3_bench/main.rs index 792c9c4743dbf..6720a492ba942 100644 --- a/src/bench/s3_bench/main.rs +++ b/src/bench/s3_bench/main.rs @@ -557,7 +557,7 @@ where { struct StringVisitor; - impl<'de> serde::de::Visitor<'de> for StringVisitor { + impl serde::de::Visitor<'_> for StringVisitor { type Value = ByteSize; fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { diff --git a/src/bench/sink_bench/main.rs b/src/bench/sink_bench/main.rs index 05eabdb96e913..850c41c31460f 100644 --- a/src/bench/sink_bench/main.rs +++ b/src/bench/sink_bench/main.rs @@ -383,10 +383,10 @@ where sink_writer_param.vnode_bitmap = Some(Bitmap::ones(1)); } let log_sinker = sink.new_log_sinker(sink_writer_param).await.unwrap(); - if let Err(e) = log_sinker.consume_log_and_sink(&mut log_reader).await { - return Err(e.to_report_string()); + match log_sinker.consume_log_and_sink(&mut log_reader).await { + Ok(_) => Err("Stream closed".to_string()), + Err(e) => Err(e.to_report_string()), } - Err("Stream closed".to_string()) } #[derive(Debug, Deserialize)] diff --git a/src/common/common_service/Cargo.toml b/src/common/common_service/Cargo.toml index 3051ffce5d3d3..37775ff04a82b 100644 --- a/src/common/common_service/Cargo.toml +++ b/src/common/common_service/Cargo.toml @@ -30,7 +30,7 @@ thiserror-ext = { workspace = true } tokio = { version = "0.2", package = "madsim-tokio", features = ["rt", "rt-multi-thread", "sync", "macros", "time", "signal"] } tonic = { workspace = true } tower = { version = "0.5", features = ["util", "load-shed"] } -tower-http = { version = "0.5", features = ["add-extension", "compression-gzip"] } +tower-http = { version = "0.6", features = ["add-extension", "compression-gzip"] } tracing = "0.1" [target.'cfg(not(madsim))'.dependencies] diff --git a/src/common/estimate_size/src/collections/btreemap.rs b/src/common/estimate_size/src/collections/btreemap.rs index af9ab3471acec..25d0c5f82c677 100644 --- a/src/common/estimate_size/src/collections/btreemap.rs +++ b/src/common/estimate_size/src/collections/btreemap.rs @@ -224,7 +224,7 @@ pub struct OccupiedEntry<'a, K, V> { heap_size: &'a mut KvSize, } -impl<'a, K, V> OccupiedEntry<'a, K, V> +impl OccupiedEntry<'_, K, V> where K: EstimateSize + Ord, V: EstimateSize, @@ -248,7 +248,7 @@ where heap_size: &'a mut KvSize, } -impl<'a, K, V, F> Iterator for ExtractIf<'a, K, V, F> +impl Iterator for ExtractIf<'_, K, V, F> where K: EstimateSize, V: EstimateSize, diff --git a/src/common/estimate_size/src/collections/mod.rs b/src/common/estimate_size/src/collections/mod.rs index 5bffd5133eddc..c5c8a03d71761 100644 --- a/src/common/estimate_size/src/collections/mod.rs +++ b/src/common/estimate_size/src/collections/mod.rs @@ -85,7 +85,7 @@ where } } -impl<'a, V, S> Drop for MutGuard<'a, V, S> +impl Drop for MutGuard<'_, V, S> where V: EstimateSize, S: private::GenericKvSize, @@ -96,7 +96,7 @@ where } } -impl<'a, V, S> Deref for MutGuard<'a, V, S> +impl Deref for MutGuard<'_, V, S> where V: EstimateSize, S: private::GenericKvSize, @@ -108,7 +108,7 @@ where } } -impl<'a, V, S> DerefMut for MutGuard<'a, V, S> +impl DerefMut for MutGuard<'_, V, S> where V: EstimateSize, S: private::GenericKvSize, diff --git a/src/common/metrics/src/gauge_ext.rs b/src/common/metrics/src/gauge_ext.rs index d357ae5e52cb3..b76f2ac7531d0 100644 --- a/src/common/metrics/src/gauge_ext.rs +++ b/src/common/metrics/src/gauge_ext.rs @@ -30,7 +30,7 @@ impl IntGauge { } } - impl<'a> Drop for Guard<'a> { + impl Drop for Guard<'_> { fn drop(&mut self) { self.gauge.dec(); } diff --git a/src/common/metrics/src/guarded_metrics.rs b/src/common/metrics/src/guarded_metrics.rs index 9b16cc778938c..97ea311455270 100644 --- a/src/common/metrics/src/guarded_metrics.rs +++ b/src/common/metrics/src/guarded_metrics.rs @@ -191,6 +191,8 @@ impl LabelGuardedMetricsInfo { } } +/// An RAII metrics vec with labels. +/// /// `LabelGuardedMetricVec` enhances the [`MetricVec`] to ensure the set of labels to be /// correctly removed from the Prometheus client once being dropped. This is useful for metrics /// that are associated with an object that can be dropped, such as streaming jobs, fragments, diff --git a/src/common/proc_macro/src/lib.rs b/src/common/proc_macro/src/lib.rs index ccf7d4c282e2b..52761b5891109 100644 --- a/src/common/proc_macro/src/lib.rs +++ b/src/common/proc_macro/src/lib.rs @@ -268,6 +268,7 @@ pub fn session_config(input: TokenStream) -> TokenStream { /// This proc macro recursively extracts rustdoc comments from the fields in a struct and generates a method /// that produces docs for each field. +/// /// Unlike rustdoc, this tool focuses solely on extracting rustdoc for struct fields, without methods. /// /// Example: diff --git a/src/common/src/array/arrow/arrow_iceberg.rs b/src/common/src/array/arrow/arrow_iceberg.rs index ce15c5e3646e6..e353b57e257c9 100644 --- a/src/common/src/array/arrow/arrow_iceberg.rs +++ b/src/common/src/array/arrow/arrow_iceberg.rs @@ -138,8 +138,9 @@ impl ToArrow for IcebergArrowConvert { impl FromArrow for IcebergArrowConvert {} /// Iceberg sink with `create_table_if_not_exists` option will use this struct to convert the -/// iceberg data type to arrow data type. Specifically, it will add the field id to the -/// arrow field metadata, because iceberg-rust and icelake need the field id to be set. +/// iceberg data type to arrow data type. +/// +/// Specifically, it will add the field id to the arrow field metadata, because iceberg-rust and icelake need the field id to be set. /// /// Note: this is different from [`IcebergArrowConvert`], which is used to read from/write to /// an _existing_ iceberg table. In that case, we just need to make sure the data is compatible to the existing schema. diff --git a/src/common/src/array/bytes_array.rs b/src/common/src/array/bytes_array.rs index 0d62c3d26c6e3..2b19451822ad0 100644 --- a/src/common/src/array/bytes_array.rs +++ b/src/common/src/array/bytes_array.rs @@ -271,7 +271,7 @@ pub struct PartialBytesWriter<'a> { builder: &'a mut BytesArrayBuilder, } -impl<'a> PartialBytesWriter<'a> { +impl PartialBytesWriter<'_> { /// `write_ref` will append partial dirty data to `builder`. /// `PartialBytesWriter::write_ref` is different from `BytesWriter::write_ref` /// in that it allows us to call it multiple times. @@ -287,7 +287,7 @@ impl<'a> PartialBytesWriter<'a> { } } -impl<'a> Drop for PartialBytesWriter<'a> { +impl Drop for PartialBytesWriter<'_> { fn drop(&mut self) { // If `finish` is not called, we should rollback the data. self.builder.rollback_partial(); diff --git a/src/common/src/array/data_chunk_iter.rs b/src/common/src/array/data_chunk_iter.rs index 064de8532c690..24d6271b547a8 100644 --- a/src/common/src/array/data_chunk_iter.rs +++ b/src/common/src/array/data_chunk_iter.rs @@ -85,7 +85,7 @@ impl<'a> Iterator for DataChunkRefIter<'a> { } } -impl<'a> FusedIterator for DataChunkRefIter<'a> {} +impl FusedIterator for DataChunkRefIter<'_> {} pub struct DataChunkRefIterWithHoles<'a> { chunk: &'a DataChunk, @@ -132,7 +132,7 @@ mod row_ref { idx: usize, } - impl<'a> std::fmt::Debug for RowRef<'a> { + impl std::fmt::Debug for RowRef<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_list().entries(self.iter()).finish() } diff --git a/src/common/src/array/iterator.rs b/src/common/src/array/iterator.rs index 31518150e6f8d..fa39f1ab2e63a 100644 --- a/src/common/src/array/iterator.rs +++ b/src/common/src/array/iterator.rs @@ -47,8 +47,8 @@ impl<'a, A: Array> Iterator for ArrayIterator<'a, A> { } } -impl<'a, A: Array> ExactSizeIterator for ArrayIterator<'a, A> {} -unsafe impl<'a, A: Array> TrustedLen for ArrayIterator<'a, A> {} +impl ExactSizeIterator for ArrayIterator<'_, A> {} +unsafe impl TrustedLen for ArrayIterator<'_, A> {} #[cfg(test)] mod tests { diff --git a/src/common/src/array/mod.rs b/src/common/src/array/mod.rs index b34e5f9b9c470..e8d4cad8fb5b5 100644 --- a/src/common/src/array/mod.rs +++ b/src/common/src/array/mod.rs @@ -731,10 +731,10 @@ mod test_util { use crate::util::iter_util::ZipEqFast; pub fn hash_finish(hashers: &[H]) -> Vec { - return hashers + hashers .iter() .map(|hasher| hasher.finish()) - .collect::>(); + .collect::>() } pub fn test_hash(arrs: Vec, expects: Vec, hasher_builder: H) { diff --git a/src/common/src/array/utf8_array.rs b/src/common/src/array/utf8_array.rs index 8463f73e3ee58..749a9efe3da13 100644 --- a/src/common/src/array/utf8_array.rs +++ b/src/common/src/array/utf8_array.rs @@ -204,7 +204,7 @@ pub struct PartialStringWriter<'a> { bytes: PartialBytesWriter<'a>, } -impl<'a> PartialStringWriter<'a> { +impl PartialStringWriter<'_> { /// `finish` will be called while the entire record is written. /// Exactly one new record was appended and the `builder` can be safely used. pub fn finish(self) { diff --git a/src/common/src/bitmap.rs b/src/common/src/bitmap.rs index ae07105164408..22869b23bc1d8 100644 --- a/src/common/src/bitmap.rs +++ b/src/common/src/bitmap.rs @@ -325,7 +325,7 @@ impl Bitmap { /// Returns the length of vector to store `num_bits` bits. fn vec_len(num_bits: usize) -> usize { - (num_bits + BITS - 1) / BITS + num_bits.div_ceil(BITS) } /// Returns the number of valid bits in the bitmap, @@ -468,7 +468,7 @@ impl From for Bitmap { } } -impl<'a, 'b> BitAnd<&'b Bitmap> for &'a Bitmap { +impl<'b> BitAnd<&'b Bitmap> for &Bitmap { type Output = Bitmap; fn bitand(self, rhs: &'b Bitmap) -> Bitmap { @@ -488,7 +488,7 @@ impl<'a, 'b> BitAnd<&'b Bitmap> for &'a Bitmap { } } -impl<'a> BitAnd for &'a Bitmap { +impl BitAnd for &Bitmap { type Output = Bitmap; fn bitand(self, rhs: Bitmap) -> Self::Output { @@ -524,7 +524,7 @@ impl BitAndAssign for Bitmap { } } -impl<'a, 'b> BitOr<&'b Bitmap> for &'a Bitmap { +impl<'b> BitOr<&'b Bitmap> for &Bitmap { type Output = Bitmap; fn bitor(self, rhs: &'b Bitmap) -> Bitmap { @@ -544,7 +544,7 @@ impl<'a, 'b> BitOr<&'b Bitmap> for &'a Bitmap { } } -impl<'a> BitOr for &'a Bitmap { +impl BitOr for &Bitmap { type Output = Bitmap; fn bitor(self, rhs: Bitmap) -> Self::Output { @@ -599,7 +599,7 @@ impl BitXor for &Bitmap { } } -impl<'a> Not for &'a Bitmap { +impl Not for &Bitmap { type Output = Bitmap; fn not(self) -> Self::Output { @@ -700,7 +700,7 @@ pub struct BitmapIter<'a> { all_ones: bool, } -impl<'a> BitmapIter<'a> { +impl BitmapIter<'_> { fn next_always_load_usize(&mut self) -> Option { if self.idx >= self.num_bits { return None; @@ -724,7 +724,7 @@ impl<'a> BitmapIter<'a> { } } -impl<'a> iter::Iterator for BitmapIter<'a> { +impl iter::Iterator for BitmapIter<'_> { type Item = bool; fn next(&mut self) -> Option { @@ -778,7 +778,7 @@ pub enum BitmapOnesIter<'a> { }, } -impl<'a> iter::Iterator for BitmapOnesIter<'a> { +impl iter::Iterator for BitmapOnesIter<'_> { type Item = usize; fn next(&mut self) -> Option { diff --git a/src/common/src/cache.rs b/src/common/src/cache.rs index e86ef432eea95..678a101ae62a6 100644 --- a/src/common/src/cache.rs +++ b/src/common/src/cache.rs @@ -877,13 +877,13 @@ pub struct CleanCacheGuard<'a, K: LruKey + Clone + 'static, T: LruValue + 'stati hash: u64, } -impl<'a, K: LruKey + Clone + 'static, T: LruValue + 'static> CleanCacheGuard<'a, K, T> { +impl CleanCacheGuard<'_, K, T> { fn mark_success(mut self) -> K { self.key.take().unwrap() } } -impl<'a, K: LruKey + Clone + 'static, T: LruValue + 'static> Drop for CleanCacheGuard<'a, K, T> { +impl Drop for CleanCacheGuard<'_, K, T> { fn drop(&mut self) { if let Some(key) = self.key.as_ref() { self.cache.clear_pending_request(key, self.hash); diff --git a/src/common/src/config.rs b/src/common/src/config.rs index b2020b2ac0542..c3fc78919a3ef 100644 --- a/src/common/src/config.rs +++ b/src/common/src/config.rs @@ -116,7 +116,7 @@ pub trait OverrideConfig { fn r#override(&self, config: &mut RwConfig); } -impl<'a, T: OverrideConfig> OverrideConfig for &'a T { +impl OverrideConfig for &T { fn r#override(&self, config: &mut RwConfig) { T::r#override(self, config) } diff --git a/src/common/src/hash/key.rs b/src/common/src/hash/key.rs index 96928e69f4a83..1c6f60f6c621a 100644 --- a/src/common/src/hash/key.rs +++ b/src/common/src/hash/key.rs @@ -480,7 +480,7 @@ impl HashKeyDe for Int256 { } } -impl<'a> HashKeySer<'a> for Serial { +impl HashKeySer<'_> for Serial { fn serialize_into(self, mut buf: impl BufMut) { buf.put_i64_ne(self.as_row_id()); } diff --git a/src/common/src/lib.rs b/src/common/src/lib.rs index e3417853b0201..b44aba68b0985 100644 --- a/src/common/src/lib.rs +++ b/src/common/src/lib.rs @@ -18,7 +18,6 @@ )] #![feature(extract_if)] #![feature(trait_alias)] -#![feature(is_sorted)] #![feature(type_alias_impl_trait)] #![feature(test)] #![feature(trusted_len)] @@ -35,7 +34,6 @@ #![feature(iter_order_by)] #![feature(binary_heap_into_iter_sorted)] #![feature(impl_trait_in_assoc_type)] -#![feature(map_entry_replace)] #![feature(negative_impls)] #![feature(register_tool)] #![feature(btree_cursors)] @@ -93,6 +91,7 @@ pub mod test_utils; pub mod transaction; pub mod types; pub mod vnode_mapping; + pub mod test_prelude { pub use super::array::{DataChunkTestExt, StreamChunkTestExt}; pub use super::catalog::test_utils::ColumnDescTestExt; diff --git a/src/common/src/row/mod.rs b/src/common/src/row/mod.rs index 0b2181105352b..2aba5ce074c6d 100644 --- a/src/common/src/row/mod.rs +++ b/src/common/src/row/mod.rs @@ -187,7 +187,7 @@ pub trait RowExt: Row { fn display(&self) -> impl Display + '_ { struct D<'a, T: Row>(&'a T); - impl<'a, T: Row> Display for D<'a, T> { + impl Display for D<'_, T> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, diff --git a/src/common/src/row/project.rs b/src/common/src/row/project.rs index 542ea6417051b..9c4ee3231b6a4 100644 --- a/src/common/src/row/project.rs +++ b/src/common/src/row/project.rs @@ -24,12 +24,12 @@ pub struct Project<'i, R> { indices: &'i [usize], } -impl<'i, R: Row> PartialEq for Project<'i, R> { +impl PartialEq for Project<'_, R> { fn eq(&self, other: &Self) -> bool { self.iter().eq(other.iter()) } } -impl<'i, R: Row> Eq for Project<'i, R> {} +impl Eq for Project<'_, R> {} impl<'i, R: Row> Row for Project<'i, R> { #[inline] diff --git a/src/common/src/types/mod.rs b/src/common/src/types/mod.rs index 54b6c0718d905..44be87116643c 100644 --- a/src/common/src/types/mod.rs +++ b/src/common/src/types/mod.rs @@ -951,7 +951,7 @@ impl ScalarImpl { } } -impl<'a> ScalarRefImpl<'a> { +impl ScalarRefImpl<'_> { /// Converts [`ScalarRefImpl`] to [`ScalarImpl`] pub fn into_scalar_impl(self) -> ScalarImpl { dispatch_scalar_ref_variants!(self, inner, { inner.to_owned_scalar().into() }) diff --git a/src/common/src/types/num256.rs b/src/common/src/types/num256.rs index 2acbcf636a037..6c96b3ddbbec8 100644 --- a/src/common/src/types/num256.rs +++ b/src/common/src/types/num256.rs @@ -42,7 +42,7 @@ pub struct Int256(pub(crate) Box); #[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd)] pub struct Int256Ref<'a>(pub &'a i256); -impl<'a> Display for Int256Ref<'a> { +impl Display for Int256Ref<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { self.write(f) } @@ -208,7 +208,7 @@ impl Int256 { } } -impl<'a> Int256Ref<'a> { +impl Int256Ref<'_> { pub fn memcmp_serialize( &self, serializer: &mut memcomparable::Serializer, diff --git a/src/common/src/types/scalar_impl.rs b/src/common/src/types/scalar_impl.rs index 43742f74c7b51..9384961e4a890 100644 --- a/src/common/src/types/scalar_impl.rs +++ b/src/common/src/types/scalar_impl.rs @@ -145,7 +145,7 @@ impl Scalar for bool { } /// Implement `ScalarRef` for `bool`. -impl<'a> ScalarRef<'a> for bool { +impl ScalarRef<'_> for bool { type ScalarType = bool; fn to_owned_scalar(&self) -> bool { @@ -167,7 +167,7 @@ impl Scalar for Decimal { } /// Implement `ScalarRef` for `Decimal`. -impl<'a> ScalarRef<'a> for Decimal { +impl ScalarRef<'_> for Decimal { type ScalarType = Decimal; fn to_owned_scalar(&self) -> Decimal { @@ -189,7 +189,7 @@ impl Scalar for Interval { } /// Implement `ScalarRef` for `Interval`. -impl<'a> ScalarRef<'a> for Interval { +impl ScalarRef<'_> for Interval { type ScalarType = Interval; fn to_owned_scalar(&self) -> Interval { @@ -211,7 +211,7 @@ impl Scalar for Date { } /// Implement `ScalarRef` for `Date`. -impl<'a> ScalarRef<'a> for Date { +impl ScalarRef<'_> for Date { type ScalarType = Date; fn to_owned_scalar(&self) -> Date { @@ -233,7 +233,7 @@ impl Scalar for Timestamp { } /// Implement `ScalarRef` for `Timestamp`. -impl<'a> ScalarRef<'a> for Timestamp { +impl ScalarRef<'_> for Timestamp { type ScalarType = Timestamp; fn to_owned_scalar(&self) -> Timestamp { @@ -255,7 +255,7 @@ impl Scalar for Time { } /// Implement `ScalarRef` for `Time`. -impl<'a> ScalarRef<'a> for Time { +impl ScalarRef<'_> for Time { type ScalarType = Time; fn to_owned_scalar(&self) -> Time { @@ -277,7 +277,7 @@ impl Scalar for Timestamptz { } /// Implement `ScalarRef` for `Timestamptz`. -impl<'a> ScalarRef<'a> for Timestamptz { +impl ScalarRef<'_> for Timestamptz { type ScalarType = Timestamptz; fn to_owned_scalar(&self) -> Timestamptz { @@ -322,7 +322,7 @@ impl ScalarImpl { } } -impl<'scalar> ScalarRefImpl<'scalar> { +impl ScalarRefImpl<'_> { pub fn get_ident(&self) -> &'static str { dispatch_scalar_ref_variants!(self, [I = VARIANT_NAME], { I }) } diff --git a/src/common/src/types/with_data_type.rs b/src/common/src/types/with_data_type.rs index 257d8c7f15dc1..1001a277b316d 100644 --- a/src/common/src/types/with_data_type.rs +++ b/src/common/src/types/with_data_type.rs @@ -92,7 +92,7 @@ impl_with_data_type!(rust_decimal::Decimal, DataType::Decimal); impl_with_data_type!(Decimal, DataType::Decimal); impl_with_data_type!(Serial, DataType::Serial); -impl<'a> WithDataType for &'a str { +impl WithDataType for &str { fn default_data_type() -> DataType { DataType::Varchar } @@ -109,7 +109,7 @@ impl_with_data_type!(Vec, DataType::Bytea); impl_with_data_type!(Bytes, DataType::Bytea); impl_with_data_type!(JsonbVal, DataType::Jsonb); -impl<'a> WithDataType for JsonbRef<'a> { +impl WithDataType for JsonbRef<'_> { fn default_data_type() -> DataType { DataType::Jsonb } diff --git a/src/common/src/util/hash_util.rs b/src/common/src/util/hash_util.rs index 264e4783786bb..85654580827e0 100644 --- a/src/common/src/util/hash_util.rs +++ b/src/common/src/util/hash_util.rs @@ -15,10 +15,10 @@ use std::hash::{BuildHasher, Hasher}; pub fn finalize_hashers(hashers: &[H]) -> Vec { - return hashers + hashers .iter() .map(|hasher| hasher.finish()) - .collect::>(); + .collect::>() } #[derive(Clone, Copy)] diff --git a/src/common/src/util/prost.rs b/src/common/src/util/prost.rs index 8145a37a8a202..4ea799f1befeb 100644 --- a/src/common/src/util/prost.rs +++ b/src/common/src/util/prost.rs @@ -32,7 +32,7 @@ impl TypeUrl for batch_plan::ExchangeNode { pub struct StackTraceResponseOutput<'a>(&'a StackTraceResponse); -impl<'a> Deref for StackTraceResponseOutput<'a> { +impl Deref for StackTraceResponseOutput<'_> { type Target = StackTraceResponse; fn deref(&self) -> &Self::Target { @@ -40,7 +40,7 @@ impl<'a> Deref for StackTraceResponseOutput<'a> { } } -impl<'a> Display for StackTraceResponseOutput<'a> { +impl Display for StackTraceResponseOutput<'_> { fn fmt(&self, s: &mut Formatter<'_>) -> std::fmt::Result { if !self.actor_traces.is_empty() { writeln!(s, "--- Actor Traces ---")?; diff --git a/src/common/src/util/recursive.rs b/src/common/src/util/recursive.rs index 2869b3c496335..839bcb72e5f3b 100644 --- a/src/common/src/util/recursive.rs +++ b/src/common/src/util/recursive.rs @@ -82,7 +82,7 @@ impl Tracker { } } - impl<'a> Drop for DepthGuard<'a> { + impl Drop for DepthGuard<'_> { fn drop(&mut self) { let mut d = self.depth.borrow_mut(); d.last_max = d.last_max.max(d.current); // update the last max depth diff --git a/src/compute/src/server.rs b/src/compute/src/server.rs index 909b4f96b7a14..da3328b0b2ced 100644 --- a/src/compute/src/server.rs +++ b/src/compute/src/server.rs @@ -256,7 +256,7 @@ pub async fn compute_node_serve( compactor_context, hummock_meta_client.clone(), storage.sstable_object_id_manager().clone(), - storage.filter_key_extractor_manager().clone(), + storage.compaction_catalog_manager_ref().clone(), ); sub_tasks.push((handle, shutdown_sender)); } diff --git a/src/connector/Cargo.toml b/src/connector/Cargo.toml index 9fd49fea88c7b..44fb2d7ba840f 100644 --- a/src/connector/Cargo.toml +++ b/src/connector/Cargo.toml @@ -73,9 +73,7 @@ jsonwebtoken = "9.2.0" maplit = "1.0.2" moka = { version = "0.12.0", features = ["future"] } mongodb = { version = "2.8.2", features = ["tokio-runtime"] } -mysql_async = { version = "0.34", default-features = false, features = [ - "default", -] } +mysql_async = { workspace = true } mysql_common = { version = "0.32", default-features = false, features = [ "chrono", ] } diff --git a/src/connector/codec/tests/integration_tests/utils.rs b/src/connector/codec/tests/integration_tests/utils.rs index 889dbeffc306f..021913edf033d 100644 --- a/src/connector/codec/tests/integration_tests/utils.rs +++ b/src/connector/codec/tests/integration_tests/utils.rs @@ -23,7 +23,7 @@ use risingwave_pb::plan_common::AdditionalColumn; /// More concise display for `DataType`, to use in tests. pub struct DataTypeTestDisplay<'a>(pub &'a DataType); -impl<'a> std::fmt::Debug for DataTypeTestDisplay<'a> { +impl std::fmt::Debug for DataTypeTestDisplay<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.0 { DataType::Struct(s) => { @@ -68,7 +68,7 @@ impl<'a> std::fmt::Debug for DataTypeTestDisplay<'a> { /// More concise display for `ScalarRefImpl`, to use in tests. pub struct ScalarRefImplTestDisplay<'a>(pub ScalarRefImpl<'a>); -impl<'a> std::fmt::Debug for ScalarRefImplTestDisplay<'a> { +impl std::fmt::Debug for ScalarRefImplTestDisplay<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.0 { ScalarRefImpl::Struct(s) => { @@ -112,7 +112,7 @@ impl<'a> std::fmt::Debug for ScalarRefImplTestDisplay<'a> { /// More concise display for `ScalarImpl`, to use in tests. pub struct ScalarImplTestDisplay<'a>(pub &'a ScalarImpl); -impl<'a> std::fmt::Debug for ScalarImplTestDisplay<'a> { +impl std::fmt::Debug for ScalarImplTestDisplay<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { ScalarRefImplTestDisplay(self.0.as_scalar_ref_impl()).fmt(f) } @@ -121,7 +121,7 @@ impl<'a> std::fmt::Debug for ScalarImplTestDisplay<'a> { /// More concise display for `DatumRef`, to use in tests. pub struct DatumRefTestDisplay<'a>(pub DatumRef<'a>); -impl<'a> std::fmt::Debug for DatumRefTestDisplay<'a> { +impl std::fmt::Debug for DatumRefTestDisplay<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.0 { Some(scalar) => ScalarRefImplTestDisplay(scalar).fmt(f), @@ -133,7 +133,7 @@ impl<'a> std::fmt::Debug for DatumRefTestDisplay<'a> { /// More concise display for `Datum`, to use in tests. pub struct DatumTestDisplay<'a>(pub &'a Datum); -impl<'a> std::fmt::Debug for DatumTestDisplay<'a> { +impl std::fmt::Debug for DatumTestDisplay<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { DatumRefTestDisplay(self.0.to_datum_ref()).fmt(f) } @@ -142,7 +142,7 @@ impl<'a> std::fmt::Debug for DatumTestDisplay<'a> { /// More concise display for `DatumCow`, to use in tests. pub struct DatumCowTestDisplay<'a>(pub &'a DatumCow<'a>); -impl<'a> std::fmt::Debug for DatumCowTestDisplay<'a> { +impl std::fmt::Debug for DatumCowTestDisplay<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.0 { DatumCow::Borrowed(datum_ref) => { @@ -164,7 +164,7 @@ impl<'a> std::fmt::Debug for DatumCowTestDisplay<'a> { /// More concise display for `ColumnDesc`, to use in tests. pub struct ColumnDescTestDisplay<'a>(pub &'a ColumnDesc); -impl<'a> std::fmt::Debug for ColumnDescTestDisplay<'a> { +impl std::fmt::Debug for ColumnDescTestDisplay<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let ColumnDesc { data_type, diff --git a/src/connector/src/parser/mod.rs b/src/connector/src/parser/mod.rs index 2142914aa2503..d8bd4a9dbcd56 100644 --- a/src/connector/src/parser/mod.rs +++ b/src/connector/src/parser/mod.rs @@ -44,7 +44,7 @@ use thiserror_ext::AsReport; use self::avro::AvroAccessBuilder; use self::bytes_parser::BytesAccessBuilder; -pub use self::mysql::mysql_row_to_owned_row; +pub use self::mysql::{mysql_datum_to_rw_datum, mysql_row_to_owned_row}; use self::plain_parser::PlainParser; pub use self::postgres::postgres_row_to_owned_row; use self::simd_json_parser::DebeziumJsonAccessBuilder; @@ -445,22 +445,22 @@ impl SourceStreamChunkRowWriter<'_> { } (_, &Some(AdditionalColumnType::Partition(_))) => { // the meta info does not involve spec connector - return Ok(A::output_for( + Ok(A::output_for( self.row_meta .as_ref() .map(|ele| ScalarRefImpl::Utf8(ele.split_id)), - )); + )) } (_, &Some(AdditionalColumnType::Offset(_))) => { // the meta info does not involve spec connector - return Ok(A::output_for( + Ok(A::output_for( self.row_meta .as_ref() .map(|ele| ScalarRefImpl::Utf8(ele.offset)), - )); + )) } (_, &Some(AdditionalColumnType::HeaderInner(ref header_inner))) => { - return Ok(A::output_for( + Ok(A::output_for( self.row_meta .as_ref() .and_then(|ele| { @@ -473,21 +473,19 @@ impl SourceStreamChunkRowWriter<'_> { .unwrap_or(Datum::None.into()), )) } - (_, &Some(AdditionalColumnType::Headers(_))) => { - return Ok(A::output_for( - self.row_meta - .as_ref() - .and_then(|ele| extract_headers_from_meta(ele.meta)) - .unwrap_or(None), - )) - } + (_, &Some(AdditionalColumnType::Headers(_))) => Ok(A::output_for( + self.row_meta + .as_ref() + .and_then(|ele| extract_headers_from_meta(ele.meta)) + .unwrap_or(None), + )), (_, &Some(AdditionalColumnType::Filename(_))) => { // Filename is used as partition in FS connectors - return Ok(A::output_for( + Ok(A::output_for( self.row_meta .as_ref() .map(|ele| ScalarRefImpl::Utf8(ele.split_id)), - )); + )) } (_, &Some(AdditionalColumnType::Payload(_))) => { // ingest the whole payload as a single column diff --git a/src/connector/src/parser/mysql.rs b/src/connector/src/parser/mysql.rs index fe9b77c643de7..e9a8eeba70cb3 100644 --- a/src/connector/src/parser/mysql.rs +++ b/src/connector/src/parser/mysql.rs @@ -14,128 +14,201 @@ use std::sync::LazyLock; -use chrono::NaiveDate; use mysql_async::Row as MysqlRow; use risingwave_common::catalog::Schema; use risingwave_common::log::LogSuppresser; use risingwave_common::row::OwnedRow; -use risingwave_common::types::{ - DataType, Date, Decimal, JsonbVal, ScalarImpl, Time, Timestamp, Timestamptz, -}; -use rust_decimal::Decimal as RustDecimal; use thiserror_ext::AsReport; use crate::parser::util::log_error; static LOG_SUPPERSSER: LazyLock = LazyLock::new(LogSuppresser::default); +use anyhow::anyhow; +use chrono::NaiveDate; +use risingwave_common::bail; +use risingwave_common::types::{ + DataType, Date, Datum, Decimal, JsonbVal, ScalarImpl, Time, Timestamp, Timestamptz, +}; +use rust_decimal::Decimal as RustDecimal; macro_rules! handle_data_type { ($row:expr, $i:expr, $name:expr, $type:ty) => {{ - let res = $row.take_opt::, _>($i).unwrap_or(Ok(None)); - match res { - Ok(val) => val.map(|v| ScalarImpl::from(v)), - Err(err) => { - log_error!($name, err, "parse column failed"); - None - } + match $row.take_opt::, _>($i) { + None => bail!("no value found at column: {}, index: {}", $name, $i), + Some(Ok(val)) => Ok(val.map(|v| ScalarImpl::from(v))), + Some(Err(e)) => Err(anyhow::Error::new(e.clone()) + .context("failed to deserialize MySQL value into rust value") + .context(format!( + "column: {}, index: {}, rust_type: {}", + $name, + $i, + stringify!($type), + ))), } }}; ($row:expr, $i:expr, $name:expr, $type:ty, $rw_type:ty) => {{ - let res = $row.take_opt::, _>($i).unwrap_or(Ok(None)); - match res { - Ok(val) => val.map(|v| ScalarImpl::from(<$rw_type>::from(v))), - Err(err) => { - log_error!($name, err, "parse column failed"); - None - } + match $row.take_opt::, _>($i) { + None => bail!("no value found at column: {}, index: {}", $name, $i), + Some(Ok(val)) => Ok(val.map(|v| ScalarImpl::from(<$rw_type>::from(v)))), + Some(Err(e)) => Err(anyhow::Error::new(e.clone()) + .context("failed to deserialize MySQL value into rust value") + .context(format!( + "column: {}, index: {}, rust_type: {}", + $name, + $i, + stringify!($ty), + ))), } }}; } +/// The decoding result can be interpreted as follows: +/// Ok(value) => The value was found and successfully decoded. +/// Err(error) => The value was found but could not be decoded, +/// either because it was not supported, +/// or there was an error during conversion. +pub fn mysql_datum_to_rw_datum( + mysql_row: &mut MysqlRow, + mysql_datum_index: usize, + column_name: &str, + rw_data_type: &DataType, +) -> Result { + match rw_data_type { + DataType::Boolean => { + // Bit(1) + match mysql_row.take_opt::>, _>(mysql_datum_index) { + None => bail!( + "no value found at column: {}, index: {}", + column_name, + mysql_datum_index + ), + Some(Ok(val)) => match val { + None => Ok(None), + Some(val) => match val.as_slice() { + [0] => Ok(Some(ScalarImpl::from(false))), + [1] => Ok(Some(ScalarImpl::from(true))), + _ => Err(anyhow!("invalid value for boolean: {:?}", val)), + }, + }, + Some(Err(e)) => Err(anyhow::Error::new(e.clone()) + .context("failed to deserialize MySQL value into rust value") + .context(format!( + "column: {}, index: {}, rust_type: Vec", + column_name, mysql_datum_index, + ))), + } + } + DataType::Int16 => { + handle_data_type!(mysql_row, mysql_datum_index, column_name, i16) + } + DataType::Int32 => { + handle_data_type!(mysql_row, mysql_datum_index, column_name, i32) + } + DataType::Int64 => { + handle_data_type!(mysql_row, mysql_datum_index, column_name, i64) + } + DataType::Float32 => { + handle_data_type!(mysql_row, mysql_datum_index, column_name, f32) + } + DataType::Float64 => { + handle_data_type!(mysql_row, mysql_datum_index, column_name, f64) + } + DataType::Decimal => { + handle_data_type!( + mysql_row, + mysql_datum_index, + column_name, + RustDecimal, + Decimal + ) + } + DataType::Varchar => { + handle_data_type!(mysql_row, mysql_datum_index, column_name, String) + } + DataType::Date => { + handle_data_type!(mysql_row, mysql_datum_index, column_name, NaiveDate, Date) + } + DataType::Time => { + handle_data_type!( + mysql_row, + mysql_datum_index, + column_name, + chrono::NaiveTime, + Time + ) + } + DataType::Timestamp => { + handle_data_type!( + mysql_row, + mysql_datum_index, + column_name, + chrono::NaiveDateTime, + Timestamp + ) + } + DataType::Timestamptz => { + match mysql_row.take_opt::, _>(mysql_datum_index) { + None => bail!( + "no value found at column: {}, index: {}", + column_name, + mysql_datum_index + ), + Some(Ok(val)) => Ok(val.map(|v| { + ScalarImpl::from(Timestamptz::from_micros(v.and_utc().timestamp_micros())) + })), + Some(Err(err)) => Err(anyhow::Error::new(err.clone()) + .context("failed to deserialize MySQL value into rust value") + .context(format!( + "column: {}, index: {}, rust_type: chrono::NaiveDateTime", + column_name, mysql_datum_index, + ))), + } + } + DataType::Bytea => match mysql_row.take_opt::>, _>(mysql_datum_index) { + None => bail!( + "no value found at column: {}, index: {}", + column_name, + mysql_datum_index + ), + Some(Ok(val)) => Ok(val.map(ScalarImpl::from)), + Some(Err(err)) => Err(anyhow::Error::new(err.clone()) + .context("failed to deserialize MySQL value into rust value") + .context(format!( + "column: {}, index: {}, rust_type: Vec", + column_name, mysql_datum_index, + ))), + }, + DataType::Jsonb => { + handle_data_type!( + mysql_row, + mysql_datum_index, + column_name, + serde_json::Value, + JsonbVal + ) + } + DataType::Interval + | DataType::Struct(_) + | DataType::List(_) + | DataType::Int256 + | DataType::Serial + | DataType::Map(_) => Err(anyhow!( + "unsupported data type: {}, set to null", + rw_data_type + )), + } +} + pub fn mysql_row_to_owned_row(mysql_row: &mut MysqlRow, schema: &Schema) -> OwnedRow { let mut datums = vec![]; for i in 0..schema.fields.len() { let rw_field = &schema.fields[i]; let name = rw_field.name.as_str(); - let datum = { - match rw_field.data_type { - DataType::Boolean => { - handle_data_type!(mysql_row, i, name, bool) - } - DataType::Int16 => { - handle_data_type!(mysql_row, i, name, i16) - } - DataType::Int32 => { - handle_data_type!(mysql_row, i, name, i32) - } - DataType::Int64 => { - handle_data_type!(mysql_row, i, name, i64) - } - DataType::Float32 => { - handle_data_type!(mysql_row, i, name, f32) - } - DataType::Float64 => { - handle_data_type!(mysql_row, i, name, f64) - } - DataType::Decimal => { - handle_data_type!(mysql_row, i, name, RustDecimal, Decimal) - } - DataType::Varchar => { - handle_data_type!(mysql_row, i, name, String) - } - DataType::Date => { - handle_data_type!(mysql_row, i, name, NaiveDate, Date) - } - DataType::Time => { - handle_data_type!(mysql_row, i, name, chrono::NaiveTime, Time) - } - DataType::Timestamp => { - handle_data_type!(mysql_row, i, name, chrono::NaiveDateTime, Timestamp) - } - DataType::Timestamptz => { - let res = mysql_row - .take_opt::, _>(i) - .unwrap_or(Ok(None)); - match res { - Ok(val) => val.map(|v| { - ScalarImpl::from(Timestamptz::from_micros( - v.and_utc().timestamp_micros(), - )) - }), - Err(err) => { - log_error!(name, err, "parse column failed"); - None - } - } - } - DataType::Bytea => { - let res = mysql_row - .take_opt::>, _>(i) - .unwrap_or(Ok(None)); - match res { - Ok(val) => val.map(|v| ScalarImpl::from(v.into_boxed_slice())), - Err(err) => { - log_error!(name, err, "parse column failed"); - None - } - } - } - DataType::Jsonb => { - handle_data_type!(mysql_row, i, name, serde_json::Value, JsonbVal) - } - DataType::Interval - | DataType::Struct(_) - | DataType::List(_) - | DataType::Int256 - | DataType::Serial - | DataType::Map(_) => { - // Interval, Struct, List, Int256 are not supported - // XXX: is this branch reachable? - if let Ok(suppressed_count) = LOG_SUPPERSSER.check() { - tracing::warn!(column = rw_field.name, ?rw_field.data_type, suppressed_count, "unsupported data type, set to null"); - } - None - } + let datum = match mysql_datum_to_rw_datum(mysql_row, i, name, &rw_field.data_type) { + Ok(val) => val, + Err(e) => { + log_error!(name, e, "parse column failed"); + None } }; datums.push(datum); diff --git a/src/connector/src/parser/parquet_parser.rs b/src/connector/src/parser/parquet_parser.rs index db2ace3d2b6dd..4657a518991fc 100644 --- a/src/connector/src/parser/parquet_parser.rs +++ b/src/connector/src/parser/parquet_parser.rs @@ -87,11 +87,10 @@ impl ParquetParser { /// # Returns /// /// A `StreamChunk` containing the converted data from the `RecordBatch`. - - // The hidden columns that must be included here are _rw_file and _rw_offset. - // Depending on whether the user specifies a primary key (pk), there may be an additional hidden column row_id. - // Therefore, the maximum number of hidden columns is three. - + /// + /// The hidden columns that must be included here are `_rw_file` and `_rw_offset`. + /// Depending on whether the user specifies a primary key (pk), there may be an additional hidden column `row_id`. + /// Therefore, the maximum number of hidden columns is three. fn convert_record_batch_to_stream_chunk( &mut self, record_batch: RecordBatch, diff --git a/src/connector/src/parser/plain_parser.rs b/src/connector/src/parser/plain_parser.rs index e9c9436fd295f..9a7cb491e697b 100644 --- a/src/connector/src/parser/plain_parser.rs +++ b/src/connector/src/parser/plain_parser.rs @@ -163,7 +163,7 @@ impl PlainParser { row_op.with_value(self.payload_builder.generate_accessor(data).await?); } - writer.do_insert(|column: &SourceColumnDesc| row_op.access_field(column))?; + writer.do_insert(|column: &SourceColumnDesc| row_op.access_field::(column))?; Ok(ParseResult::Rows) } diff --git a/src/connector/src/parser/unified/kv_event.rs b/src/connector/src/parser/unified/kv_event.rs index 6ab7925b9bb48..f58ed65a54564 100644 --- a/src/connector/src/parser/unified/kv_event.rs +++ b/src/connector/src/parser/unified/kv_event.rs @@ -76,13 +76,19 @@ where } } - pub fn access_field(&self, desc: &SourceColumnDesc) -> AccessResult> { - match desc.additional_column.column_type { - Some(AdditionalColumnType::Key(_)) => self.access_key(&[&desc.name], &desc.data_type), + pub fn access_field( + &self, + desc: &SourceColumnDesc, + ) -> AccessResult> { + match (&desc.additional_column.column_type, KEY_ONLY) { + (Some(AdditionalColumnType::Key(_)), _) => { + self.access_key(&[&desc.name], &desc.data_type) + } // hack here: Get the whole payload as a single column // use a special mark empty slice as path to represent the whole payload - Some(AdditionalColumnType::Payload(_)) => self.access_value(&[], &desc.data_type), - None => self.access_value(&[&desc.name], &desc.data_type), + (Some(AdditionalColumnType::Payload(_)), _) => self.access_value(&[], &desc.data_type), + (None, false) => self.access_value(&[&desc.name], &desc.data_type), + (_, true) => Ok(DatumCow::Owned(None)), _ => unreachable!(), } } diff --git a/src/connector/src/parser/upsert_parser.rs b/src/connector/src/parser/upsert_parser.rs index c7bcce9f86a86..54f33ff69ce5c 100644 --- a/src/connector/src/parser/upsert_parser.rs +++ b/src/connector/src/parser/upsert_parser.rs @@ -104,10 +104,16 @@ impl UpsertParser { } else { change_event_op = ChangeEventOperation::Delete; } - let f = |column: &SourceColumnDesc| row_op.access_field(column); + match change_event_op { - ChangeEventOperation::Upsert => writer.do_insert(f)?, - ChangeEventOperation::Delete => writer.do_delete(f)?, + ChangeEventOperation::Upsert => { + let f = |column: &SourceColumnDesc| row_op.access_field::(column); + writer.do_insert(f)? + } + ChangeEventOperation::Delete => { + let f = |column: &SourceColumnDesc| row_op.access_field::(column); + writer.do_delete(f)? + } } Ok(()) } diff --git a/src/connector/src/sink/elasticsearch_opensearch/elasticsearch_converter.rs b/src/connector/src/sink/elasticsearch_opensearch/elasticsearch_converter.rs index 195c976ba2b76..80dc5e0f1ca6e 100644 --- a/src/connector/src/sink/elasticsearch_opensearch/elasticsearch_converter.rs +++ b/src/connector/src/sink/elasticsearch_opensearch/elasticsearch_converter.rs @@ -28,6 +28,7 @@ use super::elasticsearch_opensearch_config::{ use super::elasticsearch_opensearch_formatter::{BuildBulkPara, ElasticSearchOpenSearchFormatter}; use crate::sink::Result; +#[expect(clippy::large_enum_variant)] pub enum StreamChunkConverter { Es(EsStreamChunkConverter), Other, diff --git a/src/connector/src/sink/formatter/mod.rs b/src/connector/src/sink/formatter/mod.rs index bb0a41f63c33d..6b8bcfa4a8c93 100644 --- a/src/connector/src/sink/formatter/mod.rs +++ b/src/connector/src/sink/formatter/mod.rs @@ -49,6 +49,7 @@ pub trait SinkFormatter { /// For example append-only without `primary_key` (aka `downstream_pk`) set. /// * Value may be None so that messages with same key are removed during log compaction. /// For example debezium tombstone event. + #[expect(clippy::type_complexity)] fn format_chunk( &self, chunk: &StreamChunk, diff --git a/src/connector/src/sink/iceberg/prometheus/monitored_partition_writer.rs b/src/connector/src/sink/iceberg/prometheus/monitored_partition_writer.rs index c5fb3bcc906b5..8a5352772e7c1 100644 --- a/src/connector/src/sink/iceberg/prometheus/monitored_partition_writer.rs +++ b/src/connector/src/sink/iceberg/prometheus/monitored_partition_writer.rs @@ -28,6 +28,7 @@ pub struct MonitoredFanoutPartitionedWriterBuilder { } impl MonitoredFanoutPartitionedWriterBuilder { + #[expect(dead_code)] pub fn new( inner: FanoutPartitionedWriterBuilder, partition_num: LabelGuardedIntGauge<2>, diff --git a/src/connector/src/sink/iceberg/prometheus/monitored_write_writer.rs b/src/connector/src/sink/iceberg/prometheus/monitored_write_writer.rs index 634e9ac968f89..90d0bf575d56b 100644 --- a/src/connector/src/sink/iceberg/prometheus/monitored_write_writer.rs +++ b/src/connector/src/sink/iceberg/prometheus/monitored_write_writer.rs @@ -28,6 +28,7 @@ pub struct MonitoredWriteWriterBuilder { impl MonitoredWriteWriterBuilder { /// Create writer context. + #[expect(dead_code)] pub fn new( inner: B, write_qps: LabelGuardedIntCounter<2>, diff --git a/src/connector/src/sink/mod.rs b/src/connector/src/sink/mod.rs index 2bbbb95582447..2111b33e29ef7 100644 --- a/src/connector/src/sink/mod.rs +++ b/src/connector/src/sink/mod.rs @@ -693,12 +693,10 @@ impl SinkCommitCoordinator for DummySinkCommitCoordinator { impl SinkImpl { pub fn new(mut param: SinkParam) -> Result { - const CONNECTION_NAME_KEY: &str = "connection.name"; const PRIVATE_LINK_TARGET_KEY: &str = "privatelink.targets"; // remove privatelink related properties if any param.properties.remove(PRIVATE_LINK_TARGET_KEY); - param.properties.remove(CONNECTION_NAME_KEY); let sink_type = param .properties diff --git a/src/connector/src/source/kafka/private_link.rs b/src/connector/src/source/kafka/private_link.rs index 6aeebde87b516..3f6f1b8e32da7 100644 --- a/src/connector/src/source/kafka/private_link.rs +++ b/src/connector/src/source/kafka/private_link.rs @@ -30,7 +30,6 @@ use crate::error::ConnectorResult; use crate::source::kafka::{KAFKA_PROPS_BROKER_KEY, KAFKA_PROPS_BROKER_KEY_ALIAS}; pub const PRIVATELINK_ENDPOINT_KEY: &str = "privatelink.endpoint"; -pub const CONNECTION_NAME_KEY: &str = "connection.name"; #[derive(Debug)] pub(super) enum PrivateLinkContextRole { diff --git a/src/connector/src/with_options.rs b/src/connector/src/with_options.rs index 37cca7ec09bed..6c857de148fad 100644 --- a/src/connector/src/with_options.rs +++ b/src/connector/src/with_options.rs @@ -18,6 +18,7 @@ use risingwave_pb::secret::PbSecretRef; use crate::sink::catalog::SinkFormatDesc; use crate::source::cdc::external::CdcTableType; +use crate::source::cdc::MYSQL_CDC_CONNECTOR; use crate::source::iceberg::ICEBERG_CONNECTOR; use crate::source::{ AZBLOB_CONNECTOR, GCS_CONNECTOR, KAFKA_CONNECTOR, OPENDAL_S3_CONNECTOR, POSIX_FS_CONNECTOR, @@ -104,6 +105,14 @@ pub trait WithPropertiesExt: Get + Sized { connector == KAFKA_CONNECTOR } + #[inline(always)] + fn is_mysql_cdc_connector(&self) -> bool { + let Some(connector) = self.get_connector() else { + return false; + }; + connector == MYSQL_CDC_CONNECTOR + } + #[inline(always)] fn is_cdc_connector(&self) -> bool { let Some(connector) = self.get_connector() else { diff --git a/src/error/src/anyhow.rs b/src/error/src/anyhow.rs index 08203c176fcbc..30a46259ffdb5 100644 --- a/src/error/src/anyhow.rs +++ b/src/error/src/anyhow.rs @@ -126,7 +126,7 @@ macro_rules! def_anyhow_newtype { ) => { #[derive(::thiserror::Error, ::std::fmt::Debug)] #[error(transparent)] - $(#[$attr])* $vis struct $name(#[from] #[backtrace] ::anyhow::Error); + $(#[$attr])* $vis struct $name(#[from] #[backtrace] pub ::anyhow::Error); impl $name { /// Unwrap the newtype to get the inner [`anyhow::Error`]. diff --git a/src/expr/core/src/window_function/kind.rs b/src/expr/core/src/window_function/kind.rs index 6bf5ea8c45e5f..e7a16d421483f 100644 --- a/src/expr/core/src/window_function/kind.rs +++ b/src/expr/core/src/window_function/kind.rs @@ -21,6 +21,7 @@ use crate::aggregate::AggType; use crate::Result; /// Kind of window functions. +#[expect(clippy::large_enum_variant)] #[derive(Debug, Display, FromStr /* for builtin */, Clone, PartialEq, Eq, Hash, EnumAsInner)] #[display(style = "snake_case")] pub enum WindowFuncKind { diff --git a/src/expr/core/src/window_function/session.rs b/src/expr/core/src/window_function/session.rs index 81a77058759bf..cc98c789019cf 100644 --- a/src/expr/core/src/window_function/session.rs +++ b/src/expr/core/src/window_function/session.rs @@ -196,7 +196,7 @@ struct SessionFrameGapRef<'a> { add_expr: &'a dyn Expression, } -impl<'a> SessionFrameGapRef<'a> { +impl SessionFrameGapRef<'_> { fn minimal_next_start_of(&self, end_order_value: impl ToOwnedDatum) -> Datum { let row = OwnedRow::new(vec![end_order_value.to_owned_datum()]); self.add_expr diff --git a/src/expr/impl/Cargo.toml b/src/expr/impl/Cargo.toml index 33c41c09515a5..257cf19b77e7d 100644 --- a/src/expr/impl/Cargo.toml +++ b/src/expr/impl/Cargo.toml @@ -38,7 +38,7 @@ chrono = { version = "0.4", default-features = false, features = [ ] } chrono-tz = { version = "0.10", features = ["case-insensitive"] } educe = "0.6" -fancy-regex = "0.13" +fancy-regex = "0.14" futures-async-stream = { workspace = true } futures-util = "0.3" ginepro = "0.8" diff --git a/src/frontend/Cargo.toml b/src/frontend/Cargo.toml index 800a90cce5e63..76ee2aa076e8e 100644 --- a/src/frontend/Cargo.toml +++ b/src/frontend/Cargo.toml @@ -32,7 +32,7 @@ easy-ext = "1" educe = "0.6" either = "1" enum-as-inner = "0.6" -fancy-regex = "0.13.0" +fancy-regex = "0.14.0" fixedbitset = "0.5" futures = { version = "0.3", default-features = false, features = ["alloc"] } futures-async-stream = { workspace = true } @@ -45,6 +45,9 @@ linkme = { version = "0.3", features = ["used_linker"] } maplit = "1" md5 = "0.7.0" memcomparable = "0.2" +mysql_async = { version = "0.34", default-features = false, features = [ + "default", +] } num-integer = "0.1" parking_lot = { workspace = true } parse-display = "0.10" diff --git a/src/frontend/planner_test/src/lib.rs b/src/frontend/planner_test/src/lib.rs index a6ec179011771..528fa88ef3506 100644 --- a/src/frontend/planner_test/src/lib.rs +++ b/src/frontend/planner_test/src/lib.rs @@ -427,7 +427,7 @@ impl TestCase { columns, constraints, if_not_exists, - source_schema, + format_encode, source_watermarks, append_only, on_conflict, @@ -437,7 +437,7 @@ impl TestCase { wildcard_idx, .. } => { - let source_schema = source_schema.map(|schema| schema.into_v2_with_warning()); + let format_encode = format_encode.map(|schema| schema.into_v2_with_warning()); create_table::handle_create_table( handler_args, @@ -446,7 +446,7 @@ impl TestCase { wildcard_idx, constraints, if_not_exists, - source_schema, + format_encode, source_watermarks, append_only, on_conflict, diff --git a/src/frontend/planner_test/tests/testdata/input/lateral_subquery.yaml b/src/frontend/planner_test/tests/testdata/input/lateral_subquery.yaml index 8b9126f18d641..acc07a17d3555 100644 --- a/src/frontend/planner_test/tests/testdata/input/lateral_subquery.yaml +++ b/src/frontend/planner_test/tests/testdata/input/lateral_subquery.yaml @@ -134,3 +134,12 @@ )d on true; expected_outputs: - batch_plan +- name: lateral join with CTE + sql: | + create table t1(x int, y int); + create table t2(x int, y int); + select * from t1, lateral ( + with cte as (select * from t2 where t2.y = t1.y) select x from cte + ); + expected_outputs: + - batch_plan diff --git a/src/frontend/planner_test/tests/testdata/input/subquery_expr_correlated.yaml b/src/frontend/planner_test/tests/testdata/input/subquery_expr_correlated.yaml index 7c87713e85c20..7194a2806a2e8 100644 --- a/src/frontend/planner_test/tests/testdata/input/subquery_expr_correlated.yaml +++ b/src/frontend/planner_test/tests/testdata/input/subquery_expr_correlated.yaml @@ -584,3 +584,11 @@ from rawdata expected_outputs: - optimized_logical_plan_for_batch +- name: subquery with CTE + sql: | + create table t1(x int, y int); + create table t2(x int, y int); + select * from t1 where t1.x = ( with cte as (select * from t2 where t2.y = t1.y) select x from cte limit 1); + expected_outputs: + - batch_plan + - stream_plan \ No newline at end of file diff --git a/src/frontend/planner_test/tests/testdata/output/lateral_subquery.yaml b/src/frontend/planner_test/tests/testdata/output/lateral_subquery.yaml index 815890d6a73b8..2e3046c7a2e4b 100644 --- a/src/frontend/planner_test/tests/testdata/output/lateral_subquery.yaml +++ b/src/frontend/planner_test/tests/testdata/output/lateral_subquery.yaml @@ -270,3 +270,18 @@ │ └─BatchValues { rows: [[1:Int32, '2024-06-20 19:01:00+00:00':Timestamptz]] } └─BatchExchange { order: [], dist: HashShard(r.src_id) } └─BatchScan { table: r, columns: [r.ts, r.src_id, r.dev_id], distribution: SomeShard } +- name: lateral join with CTE + sql: | + create table t1(x int, y int); + create table t2(x int, y int); + select * from t1, lateral ( + with cte as (select * from t2 where t2.y = t1.y) select x from cte + ); + batch_plan: |- + BatchExchange { order: [], dist: Single } + └─BatchHashJoin { type: Inner, predicate: t1.y IS NOT DISTINCT FROM t2.y, output: [t1.x, t1.y, t2.x] } + ├─BatchExchange { order: [], dist: HashShard(t1.y) } + │ └─BatchScan { table: t1, columns: [t1.x, t1.y], distribution: SomeShard } + └─BatchExchange { order: [], dist: HashShard(t2.y) } + └─BatchFilter { predicate: IsNotNull(t2.y) } + └─BatchScan { table: t2, columns: [t2.x, t2.y], distribution: SomeShard } diff --git a/src/frontend/planner_test/tests/testdata/output/subquery_expr_correlated.yaml b/src/frontend/planner_test/tests/testdata/output/subquery_expr_correlated.yaml index 07399b433c0f6..13d363a6e4877 100644 --- a/src/frontend/planner_test/tests/testdata/output/subquery_expr_correlated.yaml +++ b/src/frontend/planner_test/tests/testdata/output/subquery_expr_correlated.yaml @@ -2193,3 +2193,29 @@ └─LogicalProjectSet { select_list: [$0, JsonbEach($0)] } └─LogicalAgg { group_key: ['{"x": {"value": 123}, "y": {"value": [1, 2, 3]}, "z": {"value": [{"a": 4, "b": 5}, {"a": 6, "b": 7}]}}':Jsonb], aggs: [] } └─LogicalValues { rows: [['{"x": {"value": 123}, "y": {"value": [1, 2, 3]}, "z": {"value": [{"a": 4, "b": 5}, {"a": 6, "b": 7}]}}':Jsonb], ['{"x": {"value": 456}, "y": {"value": [7, 8, 9]}, "z": {"value": [{"a": 0, "b": 1}, {"a": 2, "b": 3}]}}':Jsonb]], schema: Schema { fields: ['{"x": {"value": 123}, "y": {"value": [1, 2, 3]}, "z": {"value": [{"a": 4, "b": 5}, {"a": 6, "b": 7}]}}':Jsonb:Jsonb] } } +- name: subquery with CTE + sql: | + create table t1(x int, y int); + create table t2(x int, y int); + select * from t1 where t1.x = ( with cte as (select * from t2 where t2.y = t1.y) select x from cte limit 1); + batch_plan: |- + BatchExchange { order: [], dist: Single } + └─BatchHashJoin { type: Inner, predicate: t1.y IS NOT DISTINCT FROM t2.y AND t1.x = t2.x, output: [t1.x, t1.y] } + ├─BatchExchange { order: [], dist: HashShard(t1.y) } + │ └─BatchScan { table: t1, columns: [t1.x, t1.y], distribution: SomeShard } + └─BatchGroupTopN { order: [t2.y ASC], limit: 1, offset: 0, group_key: [t2.y] } + └─BatchExchange { order: [], dist: HashShard(t2.y) } + └─BatchProject { exprs: [t2.y, t2.x] } + └─BatchFilter { predicate: IsNotNull(t2.y) } + └─BatchScan { table: t2, columns: [t2.x, t2.y], distribution: SomeShard } + stream_plan: |- + StreamMaterialize { columns: [x, y, t1._row_id(hidden), t2.y(hidden)], stream_key: [t1._row_id, y, x], pk_columns: [t1._row_id, y, x], pk_conflict: NoCheck } + └─StreamExchange { dist: HashShard(t1.x, t1.y, t1._row_id) } + └─StreamHashJoin { type: Inner, predicate: t1.y IS NOT DISTINCT FROM t2.y AND t1.x = t2.x, output: [t1.x, t1.y, t1._row_id, t2.y] } + ├─StreamExchange { dist: HashShard(t1.y) } + │ └─StreamTableScan { table: t1, columns: [t1.x, t1.y, t1._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t1._row_id], pk: [_row_id], dist: UpstreamHashShard(t1._row_id) } + └─StreamGroupTopN { order: [t2.y ASC], limit: 1, offset: 0, group_key: [t2.y] } + └─StreamExchange { dist: HashShard(t2.y) } + └─StreamProject { exprs: [t2.y, t2.x, t2._row_id] } + └─StreamFilter { predicate: IsNotNull(t2.y) } + └─StreamTableScan { table: t2, columns: [t2.x, t2.y, t2._row_id], stream_scan_type: ArrangementBackfill, stream_key: [t2._row_id], pk: [_row_id], dist: UpstreamHashShard(t2._row_id) } diff --git a/src/frontend/src/binder/expr/column.rs b/src/frontend/src/binder/expr/column.rs index cf4f295e960a0..8a8ea6b87f7f5 100644 --- a/src/frontend/src/binder/expr/column.rs +++ b/src/frontend/src/binder/expr/column.rs @@ -151,14 +151,14 @@ impl Binder { } } - for (i, lateral_context) in lateral_contexts.iter().rev().enumerate() { + for (j, lateral_context) in lateral_contexts.iter().rev().enumerate() { if lateral_context.is_visible { let context = &lateral_context.context; if matches!(context.clause, Some(Clause::Insert)) { continue; } // correlated input ref from lateral context `depth` starts from 1. - let depth = i + 1; + let depth = i + j + 1; match context.get_column_binding_index(&table_name, &column_name) { Ok(index) => { let column = &context.columns[index]; diff --git a/src/frontend/src/binder/expr/function/builtin_scalar.rs b/src/frontend/src/binder/expr/function/builtin_scalar.rs index 7781140432577..68b37a3fee4e0 100644 --- a/src/frontend/src/binder/expr/function/builtin_scalar.rs +++ b/src/frontend/src/binder/expr/function/builtin_scalar.rs @@ -432,10 +432,10 @@ impl Binder { ("current_catalog", current_database()), ("current_database", current_database()), ("current_schema", guard_by_len(0, raw(|binder, _inputs| { - return Ok(binder + Ok(binder .first_valid_schema() .map(|schema| ExprImpl::literal_varchar(schema.name())) - .unwrap_or_else(|_| ExprImpl::literal_null(DataType::Varchar))); + .unwrap_or_else(|_| ExprImpl::literal_null(DataType::Varchar))) }))), ("current_schemas", raw(|binder, mut inputs| { let no_match_err = ErrorCode::ExprError( diff --git a/src/frontend/src/binder/expr/function/mod.rs b/src/frontend/src/binder/expr/function/mod.rs index ddc21c6ee7ac7..f7a4007ffd467 100644 --- a/src/frontend/src/binder/expr/function/mod.rs +++ b/src/frontend/src/binder/expr/function/mod.rs @@ -333,6 +333,17 @@ impl Binder { .context("postgres_query error")? .into()); } + // `mysql_query` table function + if func_name.eq("mysql_query") { + reject_syntax!( + arg_list.variadic, + "`VARIADIC` is not allowed in table function call" + ); + self.ensure_table_function_allowed()?; + return Ok(TableFunction::new_mysql_query(args) + .context("mysql_query error")? + .into()); + } // UDTF if let Some(ref udf) = udf && udf.kind.is_table() diff --git a/src/frontend/src/binder/insert.rs b/src/frontend/src/binder/insert.rs index 505acec2265b7..ac4a52f2c11a4 100644 --- a/src/frontend/src/binder/insert.rs +++ b/src/frontend/src/binder/insert.rs @@ -305,7 +305,6 @@ impl Binder { /// Cast a list of `exprs` to corresponding `expected_types` IN ASSIGNMENT CONTEXT. Make sure /// you understand the difference of implicit, assignment and explicit cast before reusing it. - pub(super) fn cast_on_insert( expected_types: &Vec, exprs: Vec, diff --git a/src/frontend/src/binder/relation/mod.rs b/src/frontend/src/binder/relation/mod.rs index af4121e1b9056..861edf575f510 100644 --- a/src/frontend/src/binder/relation/mod.rs +++ b/src/frontend/src/binder/relation/mod.rs @@ -15,6 +15,7 @@ use std::collections::hash_map::Entry; use std::ops::Deref; +use either::Either; use itertools::{EitherOrBoth, Itertools}; use risingwave_common::bail; use risingwave_common::catalog::{Field, TableId, DEFAULT_SCHEMA_NAME}; @@ -134,6 +135,15 @@ impl Relation { with_ordinality: _, } => table_function .collect_correlated_indices_by_depth_and_assign_id(depth + 1, correlated_id), + Relation::Share(share) => match &mut share.input { + BoundShareInput::Query(query) => match query { + Either::Left(query) => query + .collect_correlated_indices_by_depth_and_assign_id(depth, correlated_id), + Either::Right(_) => vec![], + }, + BoundShareInput::ChangeLog(change_log) => change_log + .collect_correlated_indices_by_depth_and_assign_id(depth, correlated_id), + }, _ => vec![], } } diff --git a/src/frontend/src/binder/set_expr.rs b/src/frontend/src/binder/set_expr.rs index 68af5845bf7a4..0d7f74efa95d9 100644 --- a/src/frontend/src/binder/set_expr.rs +++ b/src/frontend/src/binder/set_expr.rs @@ -78,7 +78,6 @@ impl From for BoundSetOperation { impl BoundSetExpr { /// The schema returned by this [`BoundSetExpr`]. - pub fn schema(&self) -> Cow<'_, Schema> { match self { BoundSetExpr::Select(s) => Cow::Borrowed(s.schema()), diff --git a/src/frontend/src/catalog/connection_catalog.rs b/src/frontend/src/catalog/connection_catalog.rs index 54e1210979fe8..03b2ff4203c53 100644 --- a/src/frontend/src/catalog/connection_catalog.rs +++ b/src/frontend/src/catalog/connection_catalog.rs @@ -12,18 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::BTreeMap; -use std::sync::Arc; - -use anyhow::anyhow; -use risingwave_connector::source::kafka::private_link::insert_privatelink_broker_rewrite_map; -use risingwave_connector::WithPropertiesExt; -use risingwave_pb::catalog::connection::private_link_service::PrivateLinkProvider; use risingwave_pb::catalog::connection::Info; use risingwave_pb::catalog::{connection, PbConnection}; use crate::catalog::{ConnectionId, OwnedByUserCatalog}; -use crate::error::{Result, RwError}; use crate::user::UserId; #[derive(Clone, Debug, PartialEq)] @@ -64,24 +56,3 @@ impl OwnedByUserCatalog for ConnectionCatalog { self.owner } } - -pub(crate) fn resolve_private_link_connection( - connection: &Arc, - properties: &mut BTreeMap, -) -> Result<()> { - #[allow(irrefutable_let_patterns)] - if let connection::Info::PrivateLinkService(svc) = &connection.info { - if !properties.is_kafka_connector() { - return Err(RwError::from(anyhow!( - "Private link is only supported for Kafka connector" - ))); - } - // skip all checks for mock connection - if svc.get_provider()? == PrivateLinkProvider::Mock { - return Ok(()); - } - insert_privatelink_broker_rewrite_map(properties, Some(svc), None) - .map_err(RwError::from)?; - } - Ok(()) -} diff --git a/src/frontend/src/catalog/system_catalog/rw_catalog/rw_views.rs b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_views.rs index 6ebf16ddd40ac..7c156d783a1c3 100644 --- a/src/frontend/src/catalog/system_catalog/rw_catalog/rw_views.rs +++ b/src/frontend/src/catalog/system_catalog/rw_catalog/rw_views.rs @@ -45,7 +45,7 @@ fn read_rw_view_info(reader: &SysCatalogReaderImpl) -> Result> { name: view.name().to_string(), schema_id: schema.id() as i32, owner: view.owner as i32, - definition: view.create_sql(), + definition: view.create_sql(schema.name()), acl: get_acl_items(&Object::ViewId(view.id), false, &users, username_map), }) }) diff --git a/src/frontend/src/catalog/view_catalog.rs b/src/frontend/src/catalog/view_catalog.rs index 331613be9415d..e60db583b870b 100644 --- a/src/frontend/src/catalog/view_catalog.rs +++ b/src/frontend/src/catalog/view_catalog.rs @@ -54,8 +54,12 @@ impl ViewCatalog { } /// Returns the SQL statement that can be used to create this view. - pub fn create_sql(&self) -> String { - format!("CREATE VIEW {} AS {}", self.name, self.sql) + pub fn create_sql(&self, schema: String) -> String { + if schema == "public" { + format!("CREATE VIEW {} AS {}", self.name, self.sql) + } else { + format!("CREATE VIEW {}.{} AS {}", schema, self.name, self.sql) + } } } diff --git a/src/frontend/src/expr/table_function.rs b/src/frontend/src/expr/table_function.rs index 5f22398cc5834..cee4188e75791 100644 --- a/src/frontend/src/expr/table_function.rs +++ b/src/frontend/src/expr/table_function.rs @@ -14,7 +14,10 @@ use std::sync::{Arc, LazyLock}; +use anyhow::Context; use itertools::Itertools; +use mysql_async::consts::ColumnType as MySqlColumnType; +use mysql_async::prelude::*; use risingwave_common::array::arrow::IcebergArrowConvert; use risingwave_common::types::{DataType, ScalarImpl, StructType}; use risingwave_connector::source::iceberg::{create_parquet_stream_builder, list_s3_directory}; @@ -22,8 +25,8 @@ pub use risingwave_pb::expr::table_function::PbType as TableFunctionType; use risingwave_pb::expr::PbTableFunction; use thiserror_ext::AsReport; use tokio::runtime::Runtime; -use tokio_postgres; use tokio_postgres::types::Type as TokioPgType; +use {mysql_async, tokio_postgres}; use super::{infer_type, Expr, ExprImpl, ExprRewriter, Literal, RwResult}; use crate::catalog::function_catalog::{FunctionCatalog, FunctionKind}; @@ -298,7 +301,7 @@ impl TableFunction { tokio::spawn(async move { if let Err(e) = connection.await { tracing::error!( - "postgres_query_executor: connection error: {:?}", + "mysql_query_executor: connection error: {:?}", e.as_report() ); } @@ -350,6 +353,162 @@ impl TableFunction { } } + pub fn new_mysql_query(args: Vec) -> RwResult { + static MYSQL_ARGS_LEN: usize = 6; + let args = { + if args.len() != MYSQL_ARGS_LEN { + return Err(BindError("mysql_query function only accepts 6 arguments: mysql_query(hostname varchar, port varchar, username varchar, password varchar, database_name varchar, mysql_query varchar)".to_string()).into()); + } + let mut cast_args = Vec::with_capacity(MYSQL_ARGS_LEN); + for arg in args { + let arg = arg.cast_implicit(DataType::Varchar)?; + cast_args.push(arg); + } + cast_args + }; + let evaled_args = { + let mut evaled_args: Vec = Vec::with_capacity(MYSQL_ARGS_LEN); + for arg in &args { + match arg.try_fold_const() { + Some(Ok(value)) => { + let Some(scalar) = value else { + return Err(BindError( + "mysql_query function does not accept null arguments".to_string(), + ) + .into()); + }; + evaled_args.push(scalar.into_utf8().into()); + } + Some(Err(err)) => { + return Err(err); + } + None => { + return Err(BindError( + "mysql_query function only accepts constant arguments".to_string(), + ) + .into()); + } + } + } + evaled_args + }; + + #[cfg(madsim)] + { + return Err(crate::error::ErrorCode::BindError( + "postgres_query can't be used in the madsim mode".to_string(), + ) + .into()); + } + + #[cfg(not(madsim))] + { + let schema = tokio::task::block_in_place(|| { + RUNTIME.block_on(async { + let database_opts: mysql_async::Opts = { + let port = evaled_args[1] + .parse::() + .context("failed to parse port")?; + mysql_async::OptsBuilder::default() + .ip_or_hostname(evaled_args[0].clone()) + .tcp_port(port) + .user(Some(evaled_args[2].clone())) + .pass(Some(evaled_args[3].clone())) + .db_name(Some(evaled_args[4].clone())) + .into() + }; + + let pool = mysql_async::Pool::new(database_opts); + let mut conn = pool + .get_conn() + .await + .context("failed to connect to mysql in binder")?; + + let query = evaled_args[5].clone(); + let statement = conn + .prep(query) + .await + .context("failed to prepare mysql_query in binder")?; + + let mut rw_types = vec![]; + #[allow(clippy::never_loop)] + for column in statement.columns() { + let name = column.name_str().to_string(); + let data_type = match column.column_type() { + // Boolean types + MySqlColumnType::MYSQL_TYPE_BIT if column.column_length() == 1 => { + DataType::Boolean + } + + // Numeric types + // NOTE(kwannoel): Although `bool/boolean` is a synonym of TINY(1) in MySQL, + // we treat it as Int16 here. It is better to be straightforward in our conversion. + MySqlColumnType::MYSQL_TYPE_TINY => DataType::Int16, + MySqlColumnType::MYSQL_TYPE_SHORT => DataType::Int16, + MySqlColumnType::MYSQL_TYPE_INT24 => DataType::Int32, + MySqlColumnType::MYSQL_TYPE_LONG => DataType::Int32, + MySqlColumnType::MYSQL_TYPE_LONGLONG => DataType::Int64, + MySqlColumnType::MYSQL_TYPE_FLOAT => DataType::Float32, + MySqlColumnType::MYSQL_TYPE_DOUBLE => DataType::Float64, + MySqlColumnType::MYSQL_TYPE_NEWDECIMAL => DataType::Decimal, + MySqlColumnType::MYSQL_TYPE_DECIMAL => DataType::Decimal, + + // Date time types + MySqlColumnType::MYSQL_TYPE_YEAR => DataType::Int32, + MySqlColumnType::MYSQL_TYPE_DATE => DataType::Date, + MySqlColumnType::MYSQL_TYPE_NEWDATE => DataType::Date, + MySqlColumnType::MYSQL_TYPE_TIME => DataType::Time, + MySqlColumnType::MYSQL_TYPE_TIME2 => DataType::Time, + MySqlColumnType::MYSQL_TYPE_DATETIME => DataType::Timestamp, + MySqlColumnType::MYSQL_TYPE_DATETIME2 => DataType::Timestamp, + MySqlColumnType::MYSQL_TYPE_TIMESTAMP => DataType::Timestamptz, + MySqlColumnType::MYSQL_TYPE_TIMESTAMP2 => DataType::Timestamptz, + + // String types + MySqlColumnType::MYSQL_TYPE_VARCHAR + | MySqlColumnType::MYSQL_TYPE_STRING + | MySqlColumnType::MYSQL_TYPE_VAR_STRING => DataType::Varchar, + + // JSON types + MySqlColumnType::MYSQL_TYPE_JSON => DataType::Jsonb, + + // Binary types + MySqlColumnType::MYSQL_TYPE_BIT + | MySqlColumnType::MYSQL_TYPE_BLOB + | MySqlColumnType::MYSQL_TYPE_TINY_BLOB + | MySqlColumnType::MYSQL_TYPE_MEDIUM_BLOB + | MySqlColumnType::MYSQL_TYPE_LONG_BLOB => DataType::Bytea, + + MySqlColumnType::MYSQL_TYPE_UNKNOWN + | MySqlColumnType::MYSQL_TYPE_TYPED_ARRAY + | MySqlColumnType::MYSQL_TYPE_ENUM + | MySqlColumnType::MYSQL_TYPE_SET + | MySqlColumnType::MYSQL_TYPE_GEOMETRY + | MySqlColumnType::MYSQL_TYPE_NULL => { + return Err(crate::error::ErrorCode::BindError( + format!("unsupported column type: {:?}", column.column_type()) + .to_string(), + ) + .into()); + } + }; + rw_types.push((name, data_type)); + } + Ok::(DataType::Struct( + StructType::new(rw_types), + )) + }) + })?; + + Ok(TableFunction { + args, + return_type: schema, + function_type: TableFunctionType::MysqlQuery, + user_defined: None, + }) + } + } + pub fn to_protobuf(&self) -> PbTableFunction { PbTableFunction { function_type: self.function_type as i32, diff --git a/src/frontend/src/expr/type_inference/func.rs b/src/frontend/src/expr/type_inference/func.rs index 99392ad87b971..719484b959f3a 100644 --- a/src/frontend/src/expr/type_inference/func.rs +++ b/src/frontend/src/expr/type_inference/func.rs @@ -1042,7 +1042,7 @@ fn narrow_same_type<'a>( } struct TypeDisplay<'a>(&'a Option); -impl<'a> std::fmt::Display for TypeDisplay<'a> { +impl std::fmt::Display for TypeDisplay<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.0 { Some(t) => t.fmt(f), diff --git a/src/frontend/src/handler/alter_source_with_sr.rs b/src/frontend/src/handler/alter_source_with_sr.rs index bf8cf991d1a4f..5e889ef9f0d7e 100644 --- a/src/frontend/src/handler/alter_source_with_sr.rs +++ b/src/frontend/src/handler/alter_source_with_sr.rs @@ -24,7 +24,7 @@ use risingwave_connector::WithPropertiesExt; use risingwave_pb::catalog::StreamSourceInfo; use risingwave_pb::plan_common::{EncodeType, FormatType}; use risingwave_sqlparser::ast::{ - CompatibleSourceSchema, ConnectorSchema, CreateSourceStatement, Encode, Format, ObjectName, + CompatibleFormatEncode, CreateSourceStatement, Encode, Format, FormatEncodeOptions, ObjectName, SqlOption, Statement, }; use risingwave_sqlparser::parser::Parser; @@ -120,7 +120,7 @@ pub fn fetch_source_catalog_with_db_schema_id( /// and if the FORMAT and ENCODE are modified. pub fn check_format_encode( original_source: &SourceCatalog, - new_connector_schema: &ConnectorSchema, + new_format_encode: &FormatEncodeOptions, ) -> Result<()> { let StreamSourceInfo { format, row_encode, .. @@ -137,9 +137,7 @@ pub fn check_format_encode( .into()); }; - if new_connector_schema.format != old_format - || new_connector_schema.row_encode != old_row_encode - { + if new_format_encode.format != old_format || new_format_encode.row_encode != old_row_encode { bail_not_implemented!( "the original definition is FORMAT {:?} ENCODE {:?}, and altering them is not supported yet", &old_format, @@ -153,19 +151,18 @@ pub fn check_format_encode( /// Refresh the source registry and get the added/dropped columns. pub async fn refresh_sr_and_get_columns_diff( original_source: &SourceCatalog, - connector_schema: &ConnectorSchema, + format_encode: &FormatEncodeOptions, session: &Arc, ) -> Result<(StreamSourceInfo, Vec, Vec)> { let mut with_properties = original_source.with_properties.clone(); - validate_compatibility(connector_schema, &mut with_properties)?; + validate_compatibility(format_encode, &mut with_properties)?; if with_properties.is_cdc_connector() { bail_not_implemented!("altering a cdc source is not supported"); } let (Some(columns_from_resolve_source), source_info) = - bind_columns_from_source(session, connector_schema, Either::Right(&with_properties)) - .await? + bind_columns_from_source(session, format_encode, Either::Right(&with_properties)).await? else { // Source without schema registry is rejected. unreachable!("source without schema registry is rejected") @@ -189,18 +186,18 @@ pub async fn refresh_sr_and_get_columns_diff( Ok((source_info, added_columns, dropped_columns)) } -fn get_connector_schema_from_source(source: &SourceCatalog) -> Result { +fn get_format_encode_from_source(source: &SourceCatalog) -> Result { let [stmt]: [_; 1] = Parser::parse_sql(&source.definition) .context("unable to parse original source definition")? .try_into() .unwrap(); let Statement::CreateSource { - stmt: CreateSourceStatement { source_schema, .. }, + stmt: CreateSourceStatement { format_encode, .. }, } = stmt else { unreachable!() }; - Ok(source_schema.into_v2_with_warning()) + Ok(format_encode.into_v2_with_warning()) } pub async fn handler_refresh_schema( @@ -208,14 +205,14 @@ pub async fn handler_refresh_schema( name: ObjectName, ) -> Result { let (source, _, _) = fetch_source_catalog_with_db_schema_id(&handler_args.session, &name)?; - let connector_schema = get_connector_schema_from_source(&source)?; - handle_alter_source_with_sr(handler_args, name, connector_schema).await + let format_encode = get_format_encode_from_source(&source)?; + handle_alter_source_with_sr(handler_args, name, format_encode).await } pub async fn handle_alter_source_with_sr( handler_args: HandlerArgs, name: ObjectName, - connector_schema: ConnectorSchema, + format_encode: FormatEncodeOptions, ) -> Result { let session = handler_args.session; let (source, database_id, schema_id) = fetch_source_catalog_with_db_schema_id(&session, &name)?; @@ -232,9 +229,9 @@ pub async fn handle_alter_source_with_sr( bail_not_implemented!(issue = 16003, "alter shared source"); } - check_format_encode(&source, &connector_schema)?; + check_format_encode(&source, &format_encode)?; - if !schema_has_schema_registry(&connector_schema) { + if !schema_has_schema_registry(&format_encode) { return Err(ErrorCode::NotSupported( "altering a source without schema registry".to_string(), "try `ALTER SOURCE .. ADD COLUMN ...` instead".to_string(), @@ -243,7 +240,7 @@ pub async fn handle_alter_source_with_sr( } let (source_info, added_columns, dropped_columns) = - refresh_sr_and_get_columns_diff(&source, &connector_schema, &session).await?; + refresh_sr_and_get_columns_diff(&source, &format_encode, &session).await?; if !dropped_columns.is_empty() { bail_not_implemented!( @@ -258,10 +255,10 @@ pub async fn handle_alter_source_with_sr( source.info = source_info; source.columns.extend(added_columns); source.definition = - alter_definition_format_encode(&source.definition, connector_schema.row_options.clone())?; + alter_definition_format_encode(&source.definition, format_encode.row_options.clone())?; let (format_encode_options, format_encode_secret_ref) = resolve_secret_ref_in_with_options( - WithOptions::try_from(connector_schema.row_options())?, + WithOptions::try_from(format_encode.row_options())?, session.as_ref(), )? .into_parts(); @@ -299,19 +296,19 @@ pub fn alter_definition_format_encode( match &mut stmt { Statement::CreateSource { - stmt: CreateSourceStatement { source_schema, .. }, + stmt: CreateSourceStatement { format_encode, .. }, } | Statement::CreateTable { - source_schema: Some(source_schema), + format_encode: Some(format_encode), .. } => { - match source_schema { - CompatibleSourceSchema::V2(schema) => { + match format_encode { + CompatibleFormatEncode::V2(schema) => { schema.row_options = format_encode_options; } // TODO: Confirm the behavior of legacy source schema. // Legacy source schema should be rejected by the handler and never reaches here. - CompatibleSourceSchema::RowFormat(_schema) => unreachable!(), + CompatibleFormatEncode::RowFormat(_schema) => unreachable!(), } } _ => unreachable!(), diff --git a/src/frontend/src/handler/alter_table_column.rs b/src/frontend/src/handler/alter_table_column.rs index 88e886ad667bf..1241553aff04a 100644 --- a/src/frontend/src/handler/alter_table_column.rs +++ b/src/frontend/src/handler/alter_table_column.rs @@ -29,8 +29,8 @@ use risingwave_pb::ddl_service::TableJobType; use risingwave_pb::stream_plan::stream_node::PbNodeBody; use risingwave_pb::stream_plan::{ProjectNode, StreamFragmentGraph}; use risingwave_sqlparser::ast::{ - AlterTableOperation, ColumnDef, ColumnOption, ConnectorSchema, DataType as AstDataType, Encode, - ObjectName, Statement, StructField, + AlterTableOperation, ColumnDef, ColumnOption, DataType as AstDataType, Encode, + FormatEncodeOptions, ObjectName, Statement, StructField, }; use risingwave_sqlparser::parser::Parser; @@ -51,14 +51,14 @@ pub async fn replace_table_with_definition( table_name: ObjectName, definition: Statement, original_catalog: &Arc, - source_schema: Option, + format_encode: Option, ) -> Result<()> { let (source, table, graph, col_index_mapping, job_type) = get_replace_table_plan( session, table_name, definition, original_catalog, - source_schema, + format_encode, None, ) .await?; @@ -86,7 +86,7 @@ pub async fn get_new_table_definition_for_cdc_table( .unwrap(); let Statement::CreateTable { columns: original_columns, - source_schema, + format_encode, .. } = &mut definition else { @@ -94,7 +94,7 @@ pub async fn get_new_table_definition_for_cdc_table( }; assert!( - source_schema.is_none(), + format_encode.is_none(), "source schema should be None for CDC table" ); @@ -165,7 +165,7 @@ pub async fn get_replace_table_plan( table_name: ObjectName, definition: Statement, original_catalog: &Arc, - source_schema: Option, + format_encode: Option, new_version_columns: Option>, // only provided in auto schema change ) -> Result<( Option, @@ -196,7 +196,7 @@ pub async fn get_replace_table_plan( session, table_name, original_catalog, - source_schema, + format_encode, handler_args.clone(), col_id_gen, columns.clone(), @@ -326,19 +326,19 @@ pub async fn handle_alter_table_column( .unwrap(); let Statement::CreateTable { columns, - source_schema, + format_encode, .. } = &mut definition else { panic!("unexpected statement: {:?}", definition); }; - let source_schema = source_schema + let format_encode = format_encode .clone() - .map(|source_schema| source_schema.into_v2_with_warning()); + .map(|format_encode| format_encode.into_v2_with_warning()); let fail_if_has_schema_registry = || { - if let Some(source_schema) = &source_schema - && schema_has_schema_registry(source_schema) + if let Some(format_encode) = &format_encode + && schema_has_schema_registry(format_encode) { Err(ErrorCode::NotSupported( "alter table with schema registry".to_string(), @@ -460,14 +460,14 @@ pub async fn handle_alter_table_column( table_name, definition, &original_catalog, - source_schema, + format_encode, ) .await?; Ok(PgResponse::empty_result(StatementType::ALTER_TABLE)) } -pub fn schema_has_schema_registry(schema: &ConnectorSchema) -> bool { +pub fn schema_has_schema_registry(schema: &FormatEncodeOptions) -> bool { match schema.row_encode { Encode::Avro | Encode::Protobuf => true, Encode::Json => { diff --git a/src/frontend/src/handler/alter_table_with_sr.rs b/src/frontend/src/handler/alter_table_with_sr.rs index d932246759e22..b5489b28b58f8 100644 --- a/src/frontend/src/handler/alter_table_with_sr.rs +++ b/src/frontend/src/handler/alter_table_with_sr.rs @@ -16,7 +16,7 @@ use anyhow::{anyhow, Context}; use fancy_regex::Regex; use pgwire::pg_response::StatementType; use risingwave_common::bail_not_implemented; -use risingwave_sqlparser::ast::{ConnectorSchema, ObjectName, Statement}; +use risingwave_sqlparser::ast::{FormatEncodeOptions, ObjectName, Statement}; use risingwave_sqlparser::parser::Parser; use thiserror_ext::AsReport; @@ -29,15 +29,15 @@ use super::{HandlerArgs, RwPgResponse}; use crate::error::{ErrorCode, Result}; use crate::TableCatalog; -fn get_connector_schema_from_table(table: &TableCatalog) -> Result> { +fn get_format_encode_from_table(table: &TableCatalog) -> Result> { let [stmt]: [_; 1] = Parser::parse_sql(&table.definition) .context("unable to parse original table definition")? .try_into() .unwrap(); - let Statement::CreateTable { source_schema, .. } = stmt else { + let Statement::CreateTable { format_encode, .. } = stmt else { unreachable!() }; - Ok(source_schema.map(|schema| schema.into_v2_with_warning())) + Ok(format_encode.map(|schema| schema.into_v2_with_warning())) } pub async fn handle_refresh_schema( @@ -51,9 +51,9 @@ pub async fn handle_refresh_schema( bail_not_implemented!("alter table with incoming sinks"); } - let connector_schema = { - let connector_schema = get_connector_schema_from_table(&original_table)?; - if !connector_schema + let format_encode = { + let format_encode = get_format_encode_from_table(&original_table)?; + if !format_encode .as_ref() .is_some_and(schema_has_schema_registry) { @@ -63,12 +63,12 @@ pub async fn handle_refresh_schema( ) .into()); } - connector_schema.unwrap() + format_encode.unwrap() }; let definition = alter_definition_format_encode( &original_table.definition, - connector_schema.row_options.clone(), + format_encode.row_options.clone(), )?; let [definition]: [_; 1] = Parser::parse_sql(&definition) @@ -81,7 +81,7 @@ pub async fn handle_refresh_schema( table_name, definition, &original_table, - Some(connector_schema), + Some(format_encode), ) .await; diff --git a/src/frontend/src/handler/create_connection.rs b/src/frontend/src/handler/create_connection.rs index 987f0e9fdd897..d7ef3aa10b883 100644 --- a/src/frontend/src/handler/create_connection.rs +++ b/src/frontend/src/handler/create_connection.rs @@ -16,23 +16,16 @@ use std::collections::BTreeMap; use pgwire::pg_response::{PgResponse, StatementType}; use risingwave_connector::source::kafka::PRIVATELINK_CONNECTION; -use risingwave_pb::catalog::connection::private_link_service::PrivateLinkProvider; use risingwave_pb::ddl_service::create_connection_request; use risingwave_sqlparser::ast::CreateConnectionStatement; use super::RwPgResponse; use crate::binder::Binder; use crate::error::ErrorCode::ProtocolError; -use crate::error::{Result, RwError}; +use crate::error::{ErrorCode, Result, RwError}; use crate::handler::HandlerArgs; pub(crate) const CONNECTION_TYPE_PROP: &str = "type"; -pub(crate) const CONNECTION_PROVIDER_PROP: &str = "provider"; -pub(crate) const CONNECTION_SERVICE_NAME_PROP: &str = "service.name"; -pub(crate) const CONNECTION_TAGS_PROP: &str = "tags"; - -pub(crate) const CLOUD_PROVIDER_MOCK: &str = "mock"; // fake privatelink provider for testing -pub(crate) const CLOUD_PROVIDER_AWS: &str = "aws"; #[inline(always)] fn get_connection_property_required( @@ -48,58 +41,19 @@ fn get_connection_property_required( ))) }) } - -fn resolve_private_link_properties( - with_properties: &BTreeMap, -) -> Result { - let provider = - match get_connection_property_required(with_properties, CONNECTION_PROVIDER_PROP)?.as_str() - { - CLOUD_PROVIDER_MOCK => PrivateLinkProvider::Mock, - CLOUD_PROVIDER_AWS => PrivateLinkProvider::Aws, - provider => { - return Err(RwError::from(ProtocolError(format!( - "Unsupported privatelink provider {}", - provider - )))); - } - }; - match provider { - PrivateLinkProvider::Mock => Ok(create_connection_request::PrivateLink { - provider: provider.into(), - service_name: String::new(), - tags: None, - }), - PrivateLinkProvider::Aws => { - let service_name = - get_connection_property_required(with_properties, CONNECTION_SERVICE_NAME_PROP)?; - Ok(create_connection_request::PrivateLink { - provider: provider.into(), - service_name, - tags: with_properties.get(CONNECTION_TAGS_PROP).cloned(), - }) - } - PrivateLinkProvider::Unspecified => Err(RwError::from(ProtocolError( - "Privatelink provider unspecified".to_string(), - ))), - } -} - fn resolve_create_connection_payload( with_properties: &BTreeMap, ) -> Result { let connection_type = get_connection_property_required(with_properties, CONNECTION_TYPE_PROP)?; - let create_connection_payload = match connection_type.as_str() { - PRIVATELINK_CONNECTION => create_connection_request::Payload::PrivateLink( - resolve_private_link_properties(with_properties)?, - ), - _ => { - return Err(RwError::from(ProtocolError(format!( - "Connection type \"{connection_type}\" is not supported" - )))); - } - }; - Ok(create_connection_payload) + match connection_type.as_str() { + PRIVATELINK_CONNECTION => Err(RwError::from(ErrorCode::Deprecated( + "CREATE CONNECTION to Private Link".to_string(), + "RisingWave Cloud Portal (Please refer to the doc https://docs.risingwave.com/cloud/create-a-connection/)".to_string(), + ))), + _ => Err(RwError::from(ProtocolError(format!( + "Connection type \"{connection_type}\" is not supported" + )))), + } } pub async fn handle_create_connection( diff --git a/src/frontend/src/handler/create_sink.rs b/src/frontend/src/handler/create_sink.rs index fb35c5efc2e99..ea9f9e98f9b71 100644 --- a/src/frontend/src/handler/create_sink.rs +++ b/src/frontend/src/handler/create_sink.rs @@ -22,9 +22,7 @@ use maplit::{convert_args, hashmap}; use pgwire::pg_response::{PgResponse, StatementType}; use risingwave_common::array::arrow::arrow_schema_iceberg::DataType as ArrowDataType; use risingwave_common::array::arrow::IcebergArrowConvert; -use risingwave_common::catalog::{ - ColumnCatalog, ConnectionId, DatabaseId, Schema, SchemaId, TableId, UserId, -}; +use risingwave_common::catalog::{ColumnCatalog, DatabaseId, Schema, SchemaId, TableId, UserId}; use risingwave_common::secret::LocalSecretManager; use risingwave_common::types::DataType; use risingwave_common::{bail, catalog}; @@ -38,7 +36,7 @@ use risingwave_pb::ddl_service::{ReplaceTablePlan, TableJobType}; use risingwave_pb::stream_plan::stream_node::{NodeBody, PbNodeBody}; use risingwave_pb::stream_plan::{MergeNode, StreamFragmentGraph, StreamNode}; use risingwave_sqlparser::ast::{ - ConnectorSchema, CreateSink, CreateSinkStatement, EmitMode, Encode, ExplainOptions, Format, + CreateSink, CreateSinkStatement, EmitMode, Encode, ExplainOptions, Format, FormatEncodeOptions, Query, Statement, }; use risingwave_sqlparser::parser::Parser; @@ -92,12 +90,7 @@ pub async fn gen_sink_plan( let mut with_options = handler_args.with_options.clone(); - let connection_id = { - let conn_id = - resolve_privatelink_in_with_option(&mut with_options, &sink_schema_name, session)?; - conn_id.map(ConnectionId) - }; - + resolve_privatelink_in_with_option(&mut with_options)?; let mut resolved_with_options = resolve_secret_ref_in_with_options(with_options, session)?; let partition_info = get_partition_compute_info(&resolved_with_options).await?; @@ -266,7 +259,7 @@ pub async fn gen_sink_plan( SchemaId::new(sink_schema_id), DatabaseId::new(sink_database_id), UserId::new(session.user_id()), - connection_id, + None, // deprecated: private link connection id dependent_relations.into_iter().collect_vec(), ); @@ -650,7 +643,7 @@ pub(crate) async fn reparse_table_for_sink( .unwrap(); let Statement::CreateTable { name, - source_schema, + format_encode, .. } = &definition else { @@ -658,9 +651,9 @@ pub(crate) async fn reparse_table_for_sink( }; let table_name = name.clone(); - let source_schema = source_schema + let format_encode = format_encode .clone() - .map(|source_schema| source_schema.into_v2_with_warning()); + .map(|format_encode| format_encode.into_v2_with_warning()); // Create handler args as if we're creating a new table with the altered definition. let handler_args = HandlerArgs::new(session.clone(), &definition, Arc::from(""))?; @@ -683,7 +676,7 @@ pub(crate) async fn reparse_table_for_sink( session, table_name, table_catalog, - source_schema, + format_encode, handler_args, col_id_gen, columns, @@ -814,7 +807,10 @@ pub(crate) fn derive_default_column_project_for_sink( /// Transforms the (format, encode, options) from sqlparser AST into an internal struct `SinkFormatDesc`. /// This is an analogy to (part of) [`crate::handler::create_source::bind_columns_from_source`] /// which transforms sqlparser AST `SourceSchemaV2` into `StreamSourceInfo`. -fn bind_sink_format_desc(session: &SessionImpl, value: ConnectorSchema) -> Result { +fn bind_sink_format_desc( + session: &SessionImpl, + value: FormatEncodeOptions, +) -> Result { use risingwave_connector::sink::catalog::{SinkEncode, SinkFormat}; use risingwave_connector::sink::encoder::TimestamptzHandlingMode; use risingwave_sqlparser::ast::{Encode as E, Format as F}; @@ -929,7 +925,7 @@ static CONNECTORS_COMPATIBLE_FORMATS: LazyLock Result<()> { +pub fn validate_compatibility(connector: &str, format_desc: &FormatEncodeOptions) -> Result<()> { let compatible_formats = CONNECTORS_COMPATIBLE_FORMATS .get(connector) .ok_or_else(|| { diff --git a/src/frontend/src/handler/create_source.rs b/src/frontend/src/handler/create_source.rs index 3616997d384cb..9c37799422a59 100644 --- a/src/frontend/src/handler/create_source.rs +++ b/src/frontend/src/handler/create_source.rs @@ -21,6 +21,7 @@ use either::Either; use itertools::Itertools; use maplit::{convert_args, hashmap}; use pgwire::pg_response::{PgResponse, StatementType}; +use rand::Rng; use risingwave_common::array::arrow::{arrow_schema_iceberg, IcebergArrowConvert}; use risingwave_common::bail_not_implemented; use risingwave_common::catalog::{ @@ -62,8 +63,8 @@ use risingwave_pb::catalog::{PbSchemaRegistryNameStrategy, StreamSourceInfo, Wat use risingwave_pb::plan_common::additional_column::ColumnType as AdditionalColumnType; use risingwave_pb::plan_common::{EncodeType, FormatType}; use risingwave_sqlparser::ast::{ - get_delimiter, AstString, ColumnDef, ConnectorSchema, CreateSourceStatement, Encode, Format, - ObjectName, ProtobufSchema, SourceWatermark, TableConstraint, + get_delimiter, AstString, ColumnDef, CreateSourceStatement, Encode, Format, + FormatEncodeOptions, ObjectName, ProtobufSchema, SourceWatermark, TableConstraint, }; use risingwave_sqlparser::parser::{IncludeOption, IncludeOptionItem}; use thiserror_ext::AsReport; @@ -162,9 +163,9 @@ async fn extract_avro_table_schema( if let risingwave_connector::parser::EncodingProperties::Avro(avro_props) = &parser_config.encoding_config && matches!(avro_props.schema_location, SchemaLocation::File { .. }) - && !format_encode_options + && format_encode_options .get("with_deprecated_file_header") - .is_some_and(|v| v == "true") + .is_none_or(|v| v != "true") { bail_not_implemented!(issue = 12871, "avro without schema registry"); } @@ -299,7 +300,7 @@ fn get_name_strategy_or_default(name_strategy: Option) -> Result, ) -> Result<(Option>, StreamSourceInfo)> { const MESSAGE_NAME_KEY: &str = "message"; @@ -313,7 +314,7 @@ pub(crate) async fn bind_columns_from_source( let is_kafka: bool = options_with_secret.is_kafka_connector(); let (format_encode_options, format_encode_secret_refs) = resolve_secret_ref_in_with_options( - WithOptions::try_from(source_schema.row_options())?, + WithOptions::try_from(format_encode.row_options())?, session, )? .into_parts(); @@ -345,18 +346,18 @@ pub(crate) async fn bind_columns_from_source( } let mut stream_source_info = StreamSourceInfo { - format: format_to_prost(&source_schema.format) as i32, - row_encode: row_encode_to_prost(&source_schema.row_encode) as i32, + format: format_to_prost(&format_encode.format) as i32, + row_encode: row_encode_to_prost(&format_encode.row_encode) as i32, format_encode_options, format_encode_secret_refs, ..Default::default() }; - if source_schema.format == Format::Debezium { + if format_encode.format == Format::Debezium { try_consume_string_from_options(&mut format_encode_options_to_consume, DEBEZIUM_IGNORE_KEY); } - let columns = match (&source_schema.format, &source_schema.row_encode) { + let columns = match (&format_encode.format, &format_encode.row_encode) { (Format::Native, Encode::Native) | (Format::Plain, Encode::Bytes) | (Format::DebeziumMongo, Encode::Json) => None, @@ -476,7 +477,7 @@ pub(crate) async fn bind_columns_from_source( Encode::Json, ) => { if matches!( - source_schema.format, + format_encode.format, Format::Plain | Format::Upsert | Format::Debezium ) { // Parse the value but throw it away. @@ -524,8 +525,8 @@ pub(crate) async fn bind_columns_from_source( if !format_encode_options_to_consume.is_empty() { let err_string = format!( "Get unknown format_encode_options for {:?} {:?}: {}", - source_schema.format, - source_schema.row_encode, + format_encode.format, + format_encode.row_encode, format_encode_options_to_consume .keys() .map(|k| k.to_string()) @@ -539,10 +540,10 @@ pub(crate) async fn bind_columns_from_source( fn bind_columns_from_source_for_cdc( session: &SessionImpl, - source_schema: &ConnectorSchema, + format_encode: &FormatEncodeOptions, ) -> Result<(Option>, StreamSourceInfo)> { let (format_encode_options, format_encode_secret_refs) = resolve_secret_ref_in_with_options( - WithOptions::try_from(source_schema.row_options())?, + WithOptions::try_from(format_encode.row_options())?, session, )? .into_parts(); @@ -553,7 +554,7 @@ fn bind_columns_from_source_for_cdc( format_encode_secret_refs.clone(), )?; - match (&source_schema.format, &source_schema.row_encode) { + match (&format_encode.format, &format_encode.row_encode) { (Format::Plain, Encode::Json) => (), (format, encoding) => { // Note: parser will also check this. Just be extra safe here @@ -568,8 +569,8 @@ fn bind_columns_from_source_for_cdc( let schema_config = get_json_schema_location(&mut format_encode_options_to_consume)?; let stream_source_info = StreamSourceInfo { - format: format_to_prost(&source_schema.format) as i32, - row_encode: row_encode_to_prost(&source_schema.row_encode) as i32, + format: format_to_prost(&format_encode.format) as i32, + row_encode: row_encode_to_prost(&format_encode.row_encode) as i32, format_encode_options, use_schema_registry: json_schema_infer_use_schema_registry(&schema_config), cdc_source_job: true, @@ -580,8 +581,8 @@ fn bind_columns_from_source_for_cdc( if !format_encode_options_to_consume.is_empty() { let err_string = format!( "Get unknown format_encode_options for {:?} {:?}: {}", - source_schema.format, - source_schema.row_encode, + format_encode.format, + format_encode.row_encode, format_encode_options_to_consume .keys() .map(|k| k.to_string()) @@ -596,7 +597,7 @@ fn bind_columns_from_source_for_cdc( // check the additional column compatibility with the format and encode fn check_additional_column_compatibility( column_def: &IncludeOptionItem, - source_schema: Option<&ConnectorSchema>, + format_encode: Option<&FormatEncodeOptions>, ) -> Result<()> { // only allow header column have inner field if column_def.inner_field.is_some() @@ -612,7 +613,7 @@ fn check_additional_column_compatibility( } // Payload column only allowed when encode is JSON - if let Some(schema) = source_schema + if let Some(schema) = format_encode && column_def .column_type .real_value() @@ -629,7 +630,7 @@ fn check_additional_column_compatibility( /// add connector-spec columns to the end of column catalog pub fn handle_addition_columns( - source_schema: Option<&ConnectorSchema>, + format_encode: Option<&FormatEncodeOptions>, with_properties: &BTreeMap, mut additional_columns: IncludeOption, columns: &mut Vec, @@ -647,7 +648,7 @@ pub fn handle_addition_columns( } while let Some(item) = additional_columns.pop() { - check_additional_column_compatibility(&item, source_schema)?; + check_additional_column_compatibility(&item, format_encode)?; let data_type = item .header_inner_expect_type @@ -678,7 +679,7 @@ pub fn handle_addition_columns( /// Bind columns from both source and sql defined. pub(crate) fn bind_all_columns( - source_schema: &ConnectorSchema, + format_encode: &FormatEncodeOptions, cols_from_source: Option>, cols_from_sql: Vec, col_defs_from_sql: &[ColumnDef], @@ -707,7 +708,7 @@ pub(crate) fn bind_all_columns( // TODO(yuhao): https://github.com/risingwavelabs/risingwave/issues/12209 Err(RwError::from(ProtocolError( format!("User-defined schema from SQL is not allowed with FORMAT {} ENCODE {}. \ - Please refer to https://www.risingwave.dev/docs/current/sql-create-source/ for more information.", source_schema.format, source_schema.row_encode)))) + Please refer to https://www.risingwave.dev/docs/current/sql-create-source/ for more information.", format_encode.format, format_encode.row_encode)))) } } else { if wildcard_idx.is_some() { @@ -717,7 +718,7 @@ pub(crate) fn bind_all_columns( ))); } let non_generated_sql_defined_columns = non_generated_sql_columns(col_defs_from_sql); - match (&source_schema.format, &source_schema.row_encode) { + match (&format_encode.format, &format_encode.row_encode) { (Format::DebeziumMongo, Encode::Json) => { let mut columns = vec![ ColumnCatalog { @@ -817,7 +818,7 @@ example: /// Bind column from source. Add key column to table columns if necessary. /// Return `pk_names`. pub(crate) async fn bind_source_pk( - source_schema: &ConnectorSchema, + format_encode: &FormatEncodeOptions, source_info: &StreamSourceInfo, columns: &mut [ColumnCatalog], sql_defined_pk_names: Vec, @@ -849,7 +850,7 @@ pub(crate) async fn bind_source_pk( }) .collect_vec(); - let res = match (&source_schema.format, &source_schema.row_encode) { + let res = match (&format_encode.format, &format_encode.row_encode) { (Format::Native, Encode::Native) | (Format::None, Encode::None) | (Format::Plain, _) => { sql_defined_pk_names } @@ -1149,7 +1150,7 @@ pub fn validate_license(connector: &str) -> Result<()> { } pub fn validate_compatibility( - source_schema: &ConnectorSchema, + format_encode: &FormatEncodeOptions, props: &mut BTreeMap, ) -> Result<()> { let mut connector = props @@ -1183,9 +1184,9 @@ pub fn validate_compatibility( validate_license(&connector)?; if connector != KAFKA_CONNECTOR { - let res = match (&source_schema.format, &source_schema.row_encode) { + let res = match (&format_encode.format, &format_encode.row_encode) { (Format::Plain, Encode::Protobuf) | (Format::Plain, Encode::Avro) => { - let mut options = WithOptions::try_from(source_schema.row_options())?; + let mut options = WithOptions::try_from(format_encode.row_options())?; let (_, use_schema_registry) = get_schema_location(options.inner_mut())?; use_schema_registry } @@ -1201,17 +1202,17 @@ pub fn validate_compatibility( } let compatible_encodes = compatible_formats - .get(&source_schema.format) + .get(&format_encode.format) .ok_or_else(|| { RwError::from(ProtocolError(format!( "connector {} does not support format {:?}", - connector, source_schema.format + connector, format_encode.format ))) })?; - if !compatible_encodes.contains(&source_schema.row_encode) { + if !compatible_encodes.contains(&format_encode.row_encode) { return Err(RwError::from(ProtocolError(format!( "connector {} does not support format {:?} with encode {:?}", - connector, source_schema.format, source_schema.row_encode + connector, format_encode.format, format_encode.row_encode )))); } @@ -1267,7 +1268,7 @@ pub fn validate_compatibility( /// /// One should only call this function after all properties of all columns are resolved, like /// generated column descriptors. -pub(super) async fn check_source_schema( +pub(super) async fn check_format_encode( props: &WithOptionsSecResolved, row_id_index: Option, columns: &[ColumnCatalog], @@ -1424,11 +1425,11 @@ pub async fn check_iceberg_source( pub fn bind_connector_props( handler_args: &HandlerArgs, - source_schema: &ConnectorSchema, + format_encode: &FormatEncodeOptions, is_create_source: bool, ) -> Result { let mut with_properties = handler_args.with_options.clone().into_connector_props(); - validate_compatibility(source_schema, &mut with_properties)?; + validate_compatibility(format_encode, &mut with_properties)?; let create_cdc_source_job = with_properties.is_shareable_cdc_connector(); if !is_create_source && with_properties.is_shareable_only_cdc_connector() { @@ -1465,6 +1466,14 @@ pub fn bind_connector_props( .to_string(), ); } + if with_properties.is_mysql_cdc_connector() { + // Generate a random server id for mysql cdc source if needed + // `server.id` (in the range from 1 to 2^32 - 1). This value MUST be unique across whole replication + // group (that is, different from any other server id being used by any master or slave) + with_properties + .entry("server.id".to_string()) + .or_insert(rand::thread_rng().gen_range(1..u32::MAX).to_string()); + } Ok(with_properties) } @@ -1472,7 +1481,7 @@ pub fn bind_connector_props( pub async fn bind_create_source_or_table_with_connector( handler_args: HandlerArgs, full_name: ObjectName, - source_schema: ConnectorSchema, + format_encode: FormatEncodeOptions, with_properties: WithOptions, sql_columns_defs: &[ColumnDef], constraints: Vec, @@ -1500,11 +1509,11 @@ pub async fn bind_create_source_or_table_with_connector( .into()); } if is_create_source { - match source_schema.format { + match format_encode.format { Format::Upsert => { return Err(ErrorCode::BindError(format!( "can't CREATE SOURCE with FORMAT UPSERT\n\nHint: use CREATE TABLE instead\n\n{}", - hint_upsert(&source_schema.row_encode) + hint_upsert(&format_encode.row_encode) )) .into()); } @@ -1520,7 +1529,7 @@ pub async fn bind_create_source_or_table_with_connector( let columns_from_sql = bind_sql_columns(sql_columns_defs)?; let mut columns = bind_all_columns( - &source_schema, + &format_encode, columns_from_resolve_source, columns_from_sql, sql_columns_defs, @@ -1529,7 +1538,7 @@ pub async fn bind_create_source_or_table_with_connector( // add additional columns before bind pk, because `format upsert` requires the key column handle_addition_columns( - Some(&source_schema), + Some(&format_encode), &with_properties, include_column_options, &mut columns, @@ -1550,13 +1559,12 @@ pub async fn bind_create_source_or_table_with_connector( // resolve privatelink connection for Kafka let mut with_properties = with_properties; - let connection_id = - resolve_privatelink_in_with_option(&mut with_properties, &schema_name, session)?; + resolve_privatelink_in_with_option(&mut with_properties)?; let with_properties = resolve_secret_ref_in_with_options(with_properties, session)?; let pk_names = bind_source_pk( - &source_schema, + &format_encode, &source_info, &mut columns, sql_pk_names, @@ -1605,7 +1613,7 @@ pub async fn bind_create_source_or_table_with_connector( sql_columns_defs.to_vec(), &pk_col_ids, )?; - check_source_schema(&with_properties, row_id_index, &columns).await?; + check_format_encode(&with_properties, row_id_index, &columns).await?; let definition = handler_args.normalized_sql.clone(); @@ -1627,7 +1635,7 @@ pub async fn bind_create_source_or_table_with_connector( watermark_descs, associated_table_id, definition, - connection_id, + connection_id: None, // deprecated: private link connection id created_at_epoch: None, initialized_at_epoch: None, version: INITIAL_SOURCE_VERSION_ID, @@ -1659,8 +1667,8 @@ pub async fn handle_create_source( ))); } - let source_schema = stmt.source_schema.into_v2_with_warning(); - let with_properties = bind_connector_props(&handler_args, &source_schema, true)?; + let format_encode = stmt.format_encode.into_v2_with_warning(); + let with_properties = bind_connector_props(&handler_args, &format_encode, true)?; let create_cdc_source_job = with_properties.is_shareable_cdc_connector(); let is_shared = create_cdc_source_job @@ -1673,9 +1681,9 @@ pub async fn handle_create_source( && session.config().streaming_use_shared_source()); let (columns_from_resolve_source, mut source_info) = if create_cdc_source_job { - bind_columns_from_source_for_cdc(&session, &source_schema)? + bind_columns_from_source_for_cdc(&session, &format_encode)? } else { - bind_columns_from_source(&session, &source_schema, Either::Left(&with_properties)).await? + bind_columns_from_source(&session, &format_encode, Either::Left(&with_properties)).await? }; if is_shared { // Note: this field should be called is_shared. Check field doc for more details. @@ -1687,7 +1695,7 @@ pub async fn handle_create_source( let (source_catalog, database_id, schema_id) = bind_create_source_or_table_with_connector( handler_args.clone(), stmt.source_name, - source_schema, + format_encode, with_properties, &stmt.columns, stmt.constraints, diff --git a/src/frontend/src/handler/create_table.rs b/src/frontend/src/handler/create_table.rs index 2c1916174e0b7..eab38a44c4ff4 100644 --- a/src/frontend/src/handler/create_table.rs +++ b/src/frontend/src/handler/create_table.rs @@ -43,8 +43,8 @@ use risingwave_pb::plan_common::{ }; use risingwave_pb::stream_plan::StreamFragmentGraph; use risingwave_sqlparser::ast::{ - CdcTableInfo, ColumnDef, ColumnOption, ConnectorSchema, DataType as AstDataType, - ExplainOptions, Format, ObjectName, OnConflict, SourceWatermark, TableConstraint, + CdcTableInfo, ColumnDef, ColumnOption, DataType as AstDataType, ExplainOptions, Format, + FormatEncodeOptions, ObjectName, OnConflict, SourceWatermark, TableConstraint, }; use risingwave_sqlparser::parser::IncludeOption; use thiserror_ext::AsReport; @@ -468,7 +468,7 @@ pub(crate) async fn gen_create_table_plan_with_source( column_defs: Vec, wildcard_idx: Option, constraints: Vec, - source_schema: ConnectorSchema, + format_encode: FormatEncodeOptions, source_watermarks: Vec, mut col_id_gen: ColumnIdGenerator, append_only: bool, @@ -477,28 +477,28 @@ pub(crate) async fn gen_create_table_plan_with_source( include_column_options: IncludeOption, ) -> Result<(PlanRef, Option, PbTable)> { if append_only - && source_schema.format != Format::Plain - && source_schema.format != Format::Native + && format_encode.format != Format::Plain + && format_encode.format != Format::Native { return Err(ErrorCode::BindError(format!( "Append only table does not support format {}.", - source_schema.format + format_encode.format )) .into()); } let session = &handler_args.session; - let with_properties = bind_connector_props(&handler_args, &source_schema, false)?; + let with_properties = bind_connector_props(&handler_args, &format_encode, false)?; let (columns_from_resolve_source, source_info) = - bind_columns_from_source(session, &source_schema, Either::Left(&with_properties)).await?; + bind_columns_from_source(session, &format_encode, Either::Left(&with_properties)).await?; let overwrite_options = OverwriteOptions::new(&mut handler_args); let rate_limit = overwrite_options.source_rate_limit; let (source_catalog, database_id, schema_id) = bind_create_source_or_table_with_connector( handler_args.clone(), table_name, - source_schema, + format_encode, with_properties, &column_defs, constraints, @@ -940,7 +940,7 @@ fn derive_with_options_for_cdc_table( pub(super) async fn handle_create_table_plan( handler_args: HandlerArgs, explain_options: ExplainOptions, - source_schema: Option, + format_encode: Option, cdc_table_info: Option, table_name: ObjectName, column_defs: Vec, @@ -953,16 +953,16 @@ pub(super) async fn handle_create_table_plan( include_column_options: IncludeOption, ) -> Result<(PlanRef, Option, PbTable, TableJobType)> { let col_id_gen = ColumnIdGenerator::new_initial(); - let source_schema = check_create_table_with_source( + let format_encode = check_create_table_with_source( &handler_args.with_options, - source_schema, + format_encode, &include_column_options, &cdc_table_info, )?; let ((plan, source, table), job_type) = - match (source_schema, cdc_table_info.as_ref()) { - (Some(source_schema), None) => ( + match (format_encode, cdc_table_info.as_ref()) { + (Some(format_encode), None) => ( gen_create_table_plan_with_source( handler_args, explain_options, @@ -970,7 +970,7 @@ pub(super) async fn handle_create_table_plan( column_defs, wildcard_idx, constraints, - source_schema, + format_encode, source_watermarks, col_id_gen, append_only, @@ -1015,12 +1015,12 @@ pub(super) async fn handle_create_table_plan( session.get_database_and_schema_id_for_create(schema_name.clone())?; // cdc table cannot be append-only - let (source_schema, source_name) = + let (format_encode, source_name) = Binder::resolve_schema_qualified_name(db_name, cdc_table.source_name.clone())?; let source = { let catalog_reader = session.env().catalog_reader().read_guard(); - let schema_name = source_schema + let schema_name = format_encode .clone() .unwrap_or(DEFAULT_SCHEMA_NAME.to_string()); let (source, _) = catalog_reader.get_source_by_name( @@ -1235,7 +1235,7 @@ pub async fn handle_create_table( wildcard_idx: Option, constraints: Vec, if_not_exists: bool, - source_schema: Option, + format_encode: Option, source_watermarks: Vec, append_only: bool, on_conflict: Option, @@ -1263,7 +1263,7 @@ pub async fn handle_create_table( let (plan, source, table, job_type) = handle_create_table_plan( handler_args, ExplainOptions::default(), - source_schema, + format_encode, cdc_table_info, table_name.clone(), column_defs, @@ -1298,13 +1298,13 @@ pub async fn handle_create_table( pub fn check_create_table_with_source( with_options: &WithOptions, - source_schema: Option, + format_encode: Option, include_column_options: &IncludeOption, cdc_table_info: &Option, -) -> Result> { +) -> Result> { // skip check for cdc table if cdc_table_info.is_some() { - return Ok(source_schema); + return Ok(format_encode); } let defined_source = with_options.contains_key(UPSTREAM_SOURCE_KEY); if !include_column_options.is_empty() && !defined_source { @@ -1314,11 +1314,11 @@ pub fn check_create_table_with_source( .into()); } if defined_source { - source_schema.as_ref().ok_or_else(|| { + format_encode.as_ref().ok_or_else(|| { ErrorCode::InvalidInputSyntax("Please specify a source schema using FORMAT".to_owned()) })?; } - Ok(source_schema) + Ok(format_encode) } #[allow(clippy::too_many_arguments)] @@ -1326,7 +1326,7 @@ pub async fn generate_stream_graph_for_table( _session: &Arc, table_name: ObjectName, original_catalog: &Arc, - source_schema: Option, + format_encode: Option, handler_args: HandlerArgs, col_id_gen: ColumnIdGenerator, column_defs: Vec, @@ -1341,8 +1341,8 @@ pub async fn generate_stream_graph_for_table( ) -> Result<(StreamFragmentGraph, Table, Option, TableJobType)> { use risingwave_pb::catalog::table::OptionalAssociatedSourceId; - let ((plan, source, table), job_type) = match (source_schema, cdc_table_info.as_ref()) { - (Some(source_schema), None) => ( + let ((plan, source, table), job_type) = match (format_encode, cdc_table_info.as_ref()) { + (Some(format_encode), None) => ( gen_create_table_plan_with_source( handler_args, ExplainOptions::default(), @@ -1350,7 +1350,7 @@ pub async fn generate_stream_graph_for_table( column_defs, wildcard_idx, constraints, - source_schema, + format_encode, source_watermarks, col_id_gen, append_only, @@ -1463,12 +1463,12 @@ fn get_source_and_resolved_table_name( let (database_id, schema_id) = session.get_database_and_schema_id_for_create(schema_name.clone())?; - let (source_schema, source_name) = + let (format_encode, source_name) = Binder::resolve_schema_qualified_name(db_name, cdc_table.source_name.clone())?; let source = { let catalog_reader = session.env().catalog_reader().read_guard(); - let schema_name = source_schema.unwrap_or(DEFAULT_SCHEMA_NAME.to_string()); + let schema_name = format_encode.unwrap_or(DEFAULT_SCHEMA_NAME.to_string()); let (source, _) = catalog_reader.get_source_by_name( db_name, SchemaPath::Name(schema_name.as_str()), diff --git a/src/frontend/src/handler/explain.rs b/src/frontend/src/handler/explain.rs index f9bcc19379256..1740c161c3fbe 100644 --- a/src/frontend/src/handler/explain.rs +++ b/src/frontend/src/handler/explain.rs @@ -57,7 +57,7 @@ async fn do_handle_explain( name, columns, constraints, - source_schema, + format_encode, source_watermarks, append_only, on_conflict, @@ -67,12 +67,12 @@ async fn do_handle_explain( wildcard_idx, .. } => { - let source_schema = source_schema.map(|s| s.into_v2_with_warning()); + let format_encode = format_encode.map(|s| s.into_v2_with_warning()); let (plan, _source, _table, _job_type) = handle_create_table_plan( handler_args, explain_options, - source_schema, + format_encode, cdc_table_info, name.clone(), columns, diff --git a/src/frontend/src/handler/mod.rs b/src/frontend/src/handler/mod.rs index e0bd5a5efae2e..d9a190ca319e7 100644 --- a/src/frontend/src/handler/mod.rs +++ b/src/frontend/src/handler/mod.rs @@ -336,7 +336,7 @@ pub async fn handle( or_replace, temporary, if_not_exists, - source_schema, + format_encode, source_watermarks, append_only, on_conflict, @@ -363,7 +363,7 @@ pub async fn handle( ) .await; } - let source_schema = source_schema.map(|s| s.into_v2_with_warning()); + let format_encode = format_encode.map(|s| s.into_v2_with_warning()); create_table::handle_create_table( handler_args, name, @@ -371,7 +371,7 @@ pub async fn handle( wildcard_idx, constraints, if_not_exists, - source_schema, + format_encode, source_watermarks, append_only, on_conflict, @@ -948,9 +948,9 @@ pub async fn handle( } Statement::AlterSource { name, - operation: AlterSourceOperation::FormatEncode { connector_schema }, + operation: AlterSourceOperation::FormatEncode { format_encode }, } => { - alter_source_with_sr::handle_alter_source_with_sr(handler_args, name, connector_schema) + alter_source_with_sr::handle_alter_source_with_sr(handler_args, name, format_encode) .await } Statement::AlterSource { diff --git a/src/frontend/src/handler/show.rs b/src/frontend/src/handler/show.rs index 0da3e76ce3fe6..d8ec8b44d827e 100644 --- a/src/frontend/src/handler/show.rs +++ b/src/frontend/src/handler/show.rs @@ -633,7 +633,7 @@ pub fn handle_show_create_object( let view = schema .get_view_by_name(&object_name) .ok_or_else(|| CatalogError::NotFound("view", name.to_string()))?; - view.create_sql() + view.create_sql(schema.name()) } ShowCreateType::Table => { let table = schema diff --git a/src/frontend/src/handler/util.rs b/src/frontend/src/handler/util.rs index 9ff2cc92b5525..169716cd504a2 100644 --- a/src/frontend/src/handler/util.rs +++ b/src/frontend/src/handler/util.rs @@ -35,8 +35,8 @@ use risingwave_common::types::{ use risingwave_common::util::epoch::Epoch; use risingwave_common::util::iter_util::ZipEqFast; use risingwave_sqlparser::ast::{ - CompatibleSourceSchema, ConnectorSchema, Expr, Ident, ObjectName, OrderByExpr, Query, Select, - SelectItem, SetExpr, TableFactor, TableWithJoins, + CompatibleFormatEncode, Expr, FormatEncodeOptions, Ident, ObjectName, OrderByExpr, Query, + Select, SelectItem, SetExpr, TableFactor, TableWithJoins, }; use thiserror_ext::AsReport; @@ -194,16 +194,16 @@ pub fn to_pg_field(f: &Field) -> PgFieldDescriptor { } #[easy_ext::ext(SourceSchemaCompatExt)] -impl CompatibleSourceSchema { - /// Convert `self` to [`ConnectorSchema`] and warn the user if the syntax is deprecated. - pub fn into_v2_with_warning(self) -> ConnectorSchema { +impl CompatibleFormatEncode { + /// Convert `self` to [`FormatEncodeOptions`] and warn the user if the syntax is deprecated. + pub fn into_v2_with_warning(self) -> FormatEncodeOptions { match self { - CompatibleSourceSchema::RowFormat(inner) => { + CompatibleFormatEncode::RowFormat(inner) => { // TODO: should be warning current::notice_to_user("RisingWave will stop supporting the syntax \"ROW FORMAT\" in future versions, which will be changed to \"FORMAT ... ENCODE ...\" syntax."); - inner.into_source_schema_v2() + inner.into_format_encode_v2() } - CompatibleSourceSchema::V2(inner) => inner, + CompatibleFormatEncode::V2(inner) => inner, } } } diff --git a/src/frontend/src/handler/variable.rs b/src/frontend/src/handler/variable.rs index 96fd232215ccd..e22ab045565d3 100644 --- a/src/frontend/src/handler/variable.rs +++ b/src/frontend/src/handler/variable.rs @@ -54,7 +54,7 @@ pub fn handle_set( status: &'a mut ParameterStatus, } - impl<'a> ConfigReporter for Reporter<'a> { + impl ConfigReporter for Reporter<'_> { fn report_status(&mut self, key: &str, new_val: String) { if key == "APPLICATION_NAME" { self.status.application_name = Some(new_val); diff --git a/src/frontend/src/lib.rs b/src/frontend/src/lib.rs index 8dbd09a19a629..5c006e191157e 100644 --- a/src/frontend/src/lib.rs +++ b/src/frontend/src/lib.rs @@ -33,7 +33,6 @@ #![feature(error_generic_member_access)] #![feature(iterator_try_collect)] #![feature(used_with_arg)] -#![feature(entry_insert)] #![recursion_limit = "256"] #[cfg(test)] diff --git a/src/frontend/src/optimizer/logical_optimization.rs b/src/frontend/src/optimizer/logical_optimization.rs index 766cde4ecfc7e..e413188154569 100644 --- a/src/frontend/src/optimizer/logical_optimization.rs +++ b/src/frontend/src/optimizer/logical_optimization.rs @@ -136,6 +136,8 @@ static TABLE_FUNCTION_CONVERT: LazyLock = LazyLock::new(|| { TableFunctionToFileScanRule::create(), // Apply postgres query rule next TableFunctionToPostgresQueryRule::create(), + // Apply mysql query rule next + TableFunctionToMySqlQueryRule::create(), // Apply project set rule last TableFunctionToProjectSetRule::create(), ], @@ -159,6 +161,14 @@ static TABLE_FUNCTION_TO_POSTGRES_QUERY: LazyLock = LazyLock: ) }); +static TABLE_FUNCTION_TO_MYSQL_QUERY: LazyLock = LazyLock::new(|| { + OptimizationStage::new( + "Table Function To MySQL", + vec![TableFunctionToMySqlQueryRule::create()], + ApplyOrder::TopDown, + ) +}); + static VALUES_EXTRACT_PROJECT: LazyLock = LazyLock::new(|| { OptimizationStage::new( "Values Extract Project", @@ -713,6 +723,7 @@ impl LogicalOptimizer { // Table function should be converted into `file_scan` before `project_set`. plan = plan.optimize_by_rules(&TABLE_FUNCTION_TO_FILE_SCAN); plan = plan.optimize_by_rules(&TABLE_FUNCTION_TO_POSTGRES_QUERY); + plan = plan.optimize_by_rules(&TABLE_FUNCTION_TO_MYSQL_QUERY); // In order to unnest a table function, we need to convert it into a `project_set` first. plan = plan.optimize_by_rules(&TABLE_FUNCTION_CONVERT); diff --git a/src/frontend/src/optimizer/plan_node/batch_mysql_query.rs b/src/frontend/src/optimizer/plan_node/batch_mysql_query.rs new file mode 100644 index 0000000000000..308b1e82c63f3 --- /dev/null +++ b/src/frontend/src/optimizer/plan_node/batch_mysql_query.rs @@ -0,0 +1,96 @@ +// Copyright 2024 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use pretty_xmlish::XmlNode; +use risingwave_pb::batch_plan::plan_node::NodeBody; +use risingwave_pb::batch_plan::MySqlQueryNode; + +use super::batch::prelude::*; +use super::utils::{childless_record, column_names_pretty, Distill}; +use super::{ + generic, ExprRewritable, PlanBase, PlanRef, ToBatchPb, ToDistributedBatch, ToLocalBatch, +}; +use crate::error::Result; +use crate::optimizer::plan_node::expr_visitable::ExprVisitable; +use crate::optimizer::property::{Distribution, Order}; + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct BatchMySqlQuery { + pub base: PlanBase, + pub core: generic::MySqlQuery, +} + +impl BatchMySqlQuery { + pub fn new(core: generic::MySqlQuery) -> Self { + let base = PlanBase::new_batch_with_core(&core, Distribution::Single, Order::any()); + + Self { base, core } + } + + pub fn column_names(&self) -> Vec<&str> { + self.schema().names_str() + } + + pub fn clone_with_dist(&self) -> Self { + let base = self.base.clone_with_new_distribution(Distribution::Single); + Self { + base, + core: self.core.clone(), + } + } +} + +impl_plan_tree_node_for_leaf! { BatchMySqlQuery } + +impl Distill for BatchMySqlQuery { + fn distill<'a>(&self) -> XmlNode<'a> { + let fields = vec![("columns", column_names_pretty(self.schema()))]; + childless_record("BatchMySqlQuery", fields) + } +} + +impl ToLocalBatch for BatchMySqlQuery { + fn to_local(&self) -> Result { + Ok(self.clone_with_dist().into()) + } +} + +impl ToDistributedBatch for BatchMySqlQuery { + fn to_distributed(&self) -> Result { + Ok(self.clone_with_dist().into()) + } +} + +impl ToBatchPb for BatchMySqlQuery { + fn to_batch_prost_body(&self) -> NodeBody { + NodeBody::MysqlQuery(MySqlQueryNode { + columns: self + .core + .columns() + .iter() + .map(|c| c.to_protobuf()) + .collect(), + hostname: self.core.hostname.clone(), + port: self.core.port.clone(), + username: self.core.username.clone(), + password: self.core.password.clone(), + database: self.core.database.clone(), + query: self.core.query.clone(), + }) + } +} + +impl ExprRewritable for BatchMySqlQuery {} + +impl ExprVisitable for BatchMySqlQuery {} diff --git a/src/frontend/src/optimizer/plan_node/generic/join.rs b/src/frontend/src/optimizer/plan_node/generic/join.rs index f7ce096e73eb2..4663205ea6f09 100644 --- a/src/frontend/src/optimizer/plan_node/generic/join.rs +++ b/src/frontend/src/optimizer/plan_node/generic/join.rs @@ -492,7 +492,6 @@ impl Join { /// predicate. /// /// `InputRef`s in the right pushed condition are indexed by the right child's output schema. - pub fn push_down_into_join( predicate: &mut Condition, left_col_num: usize, @@ -534,7 +533,6 @@ pub fn push_down_into_join( /// pushed part will be removed from the original join predicate. /// /// `InputRef`s in the right pushed condition are indexed by the right child's output schema. - pub fn push_down_join_condition( on_condition: &mut Condition, left_col_num: usize, diff --git a/src/frontend/src/optimizer/plan_node/generic/mod.rs b/src/frontend/src/optimizer/plan_node/generic/mod.rs index 6a076025b906c..c35a367e8ccec 100644 --- a/src/frontend/src/optimizer/plan_node/generic/mod.rs +++ b/src/frontend/src/optimizer/plan_node/generic/mod.rs @@ -92,6 +92,9 @@ pub use file_scan::*; mod postgres_query; pub use postgres_query::*; +mod mysql_query; +pub use mysql_query::*; + pub trait DistillUnit { fn distill_with_name<'a>(&self, name: impl Into>) -> XmlNode<'a>; } diff --git a/src/frontend/src/optimizer/plan_node/generic/mysql_query.rs b/src/frontend/src/optimizer/plan_node/generic/mysql_query.rs new file mode 100644 index 0000000000000..03bbfa0b229eb --- /dev/null +++ b/src/frontend/src/optimizer/plan_node/generic/mysql_query.rs @@ -0,0 +1,67 @@ +// Copyright 2024 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use educe::Educe; +use risingwave_common::catalog::{ColumnDesc, ColumnId, Schema}; + +use super::GenericPlanNode; +use crate::optimizer::optimizer_context::OptimizerContextRef; +use crate::optimizer::property::FunctionalDependencySet; + +#[derive(Debug, Clone, Educe)] +#[educe(PartialEq, Eq, Hash)] +pub struct MySqlQuery { + pub schema: Schema, + pub hostname: String, + pub port: String, + pub username: String, + pub password: String, + pub database: String, + pub query: String, + + #[educe(PartialEq(ignore))] + #[educe(Hash(ignore))] + pub ctx: OptimizerContextRef, +} + +impl GenericPlanNode for MySqlQuery { + fn schema(&self) -> Schema { + self.schema.clone() + } + + fn stream_key(&self) -> Option> { + None + } + + fn ctx(&self) -> OptimizerContextRef { + self.ctx.clone() + } + + fn functional_dependency(&self) -> FunctionalDependencySet { + FunctionalDependencySet::new(self.schema.len()) + } +} + +impl MySqlQuery { + pub fn columns(&self) -> Vec { + self.schema + .fields + .iter() + .enumerate() + .map(|(i, f)| { + ColumnDesc::named(f.name.clone(), ColumnId::new(i as i32), f.data_type.clone()) + }) + .collect() + } +} diff --git a/src/frontend/src/optimizer/plan_node/generic/over_window.rs b/src/frontend/src/optimizer/plan_node/generic/over_window.rs index fc10df60421bb..c65dabd3ee09c 100644 --- a/src/frontend/src/optimizer/plan_node/generic/over_window.rs +++ b/src/frontend/src/optimizer/plan_node/generic/over_window.rs @@ -45,7 +45,7 @@ struct PlanWindowFunctionDisplay<'a> { pub input_schema: &'a Schema, } -impl<'a> std::fmt::Debug for PlanWindowFunctionDisplay<'a> { +impl std::fmt::Debug for PlanWindowFunctionDisplay<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let window_function = self.window_function; if f.alternate() { diff --git a/src/frontend/src/optimizer/plan_node/logical_agg.rs b/src/frontend/src/optimizer/plan_node/logical_agg.rs index 4e2474287c969..7f2b527979242 100644 --- a/src/frontend/src/optimizer/plan_node/logical_agg.rs +++ b/src/frontend/src/optimizer/plan_node/logical_agg.rs @@ -440,7 +440,6 @@ impl LogicalAgg { /// `MergeProject` z /// / \ /// x y - fn build_approx_percentile_aggs( &self, input: PlanRef, diff --git a/src/frontend/src/optimizer/plan_node/logical_mysql_query.rs b/src/frontend/src/optimizer/plan_node/logical_mysql_query.rs new file mode 100644 index 0000000000000..1512fe60120a3 --- /dev/null +++ b/src/frontend/src/optimizer/plan_node/logical_mysql_query.rs @@ -0,0 +1,115 @@ +// Copyright 2024 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use pretty_xmlish::XmlNode; +use risingwave_common::bail; +use risingwave_common::catalog::Schema; + +use super::generic::GenericPlanRef; +use super::utils::{childless_record, Distill}; +use super::{ + generic, BatchMySqlQuery, ColPrunable, ExprRewritable, Logical, LogicalProject, PlanBase, + PlanRef, PredicatePushdown, ToBatch, ToStream, +}; +use crate::error::Result; +use crate::optimizer::plan_node::expr_visitable::ExprVisitable; +use crate::optimizer::plan_node::utils::column_names_pretty; +use crate::optimizer::plan_node::{ + ColumnPruningContext, LogicalFilter, PredicatePushdownContext, RewriteStreamContext, + ToStreamContext, +}; +use crate::utils::{ColIndexMapping, Condition}; +use crate::OptimizerContextRef; + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct LogicalMySqlQuery { + pub base: PlanBase, + pub core: generic::MySqlQuery, +} + +impl LogicalMySqlQuery { + pub fn new( + ctx: OptimizerContextRef, + schema: Schema, + hostname: String, + port: String, + username: String, + password: String, + database: String, + query: String, + ) -> Self { + let core = generic::MySqlQuery { + schema, + hostname, + port, + username, + password, + database, + query, + ctx, + }; + + let base = PlanBase::new_logical_with_core(&core); + + LogicalMySqlQuery { base, core } + } +} + +impl_plan_tree_node_for_leaf! {LogicalMySqlQuery} +impl Distill for LogicalMySqlQuery { + fn distill<'a>(&self) -> XmlNode<'a> { + let fields = vec![("columns", column_names_pretty(self.schema()))]; + childless_record("LogicalMySqlQuery", fields) + } +} + +impl ColPrunable for LogicalMySqlQuery { + fn prune_col(&self, required_cols: &[usize], _ctx: &mut ColumnPruningContext) -> PlanRef { + LogicalProject::with_out_col_idx(self.clone().into(), required_cols.iter().cloned()).into() + } +} + +impl ExprRewritable for LogicalMySqlQuery {} + +impl ExprVisitable for LogicalMySqlQuery {} + +impl PredicatePushdown for LogicalMySqlQuery { + fn predicate_pushdown( + &self, + predicate: Condition, + _ctx: &mut PredicatePushdownContext, + ) -> PlanRef { + // No pushdown. + LogicalFilter::create(self.clone().into(), predicate) + } +} + +impl ToBatch for LogicalMySqlQuery { + fn to_batch(&self) -> Result { + Ok(BatchMySqlQuery::new(self.core.clone()).into()) + } +} + +impl ToStream for LogicalMySqlQuery { + fn to_stream(&self, _ctx: &mut ToStreamContext) -> Result { + bail!("mysql_query function is not supported in streaming mode") + } + + fn logical_rewrite_for_stream( + &self, + _ctx: &mut RewriteStreamContext, + ) -> Result<(PlanRef, ColIndexMapping)> { + bail!("mysql_query function is not supported in streaming mode") + } +} diff --git a/src/frontend/src/optimizer/plan_node/logical_over_window.rs b/src/frontend/src/optimizer/plan_node/logical_over_window.rs index 7273e7d418515..0e4e56ac2a200 100644 --- a/src/frontend/src/optimizer/plan_node/logical_over_window.rs +++ b/src/frontend/src/optimizer/plan_node/logical_over_window.rs @@ -151,7 +151,7 @@ impl<'a> LogicalOverWindowBuilder<'a> { } } -impl<'a> ExprRewriter for LogicalOverWindowBuilder<'a> { +impl ExprRewriter for LogicalOverWindowBuilder<'_> { fn rewrite_window_function(&mut self, window_func: WindowFunction) -> ExprImpl { let dummy = Literal::new(None, window_func.return_type()).into(); match self.try_rewrite_window_function(window_func) { @@ -226,7 +226,7 @@ impl<'a> OverWindowProjectBuilder<'a> { } } -impl<'a> ExprVisitor for OverWindowProjectBuilder<'a> { +impl ExprVisitor for OverWindowProjectBuilder<'_> { fn visit_window_function(&mut self, window_function: &WindowFunction) { if let Err(e) = self.try_visit_window_function(window_function) { self.error = Some(e); diff --git a/src/frontend/src/optimizer/plan_node/logical_postgres_query.rs b/src/frontend/src/optimizer/plan_node/logical_postgres_query.rs index 9082bd86a3f37..d3d793fb8ba01 100644 --- a/src/frontend/src/optimizer/plan_node/logical_postgres_query.rs +++ b/src/frontend/src/optimizer/plan_node/logical_postgres_query.rs @@ -103,13 +103,13 @@ impl ToBatch for LogicalPostgresQuery { impl ToStream for LogicalPostgresQuery { fn to_stream(&self, _ctx: &mut ToStreamContext) -> Result { - bail!("file_scan function is not supported in streaming mode") + bail!("postgres_query function is not supported in streaming mode") } fn logical_rewrite_for_stream( &self, _ctx: &mut RewriteStreamContext, ) -> Result<(PlanRef, ColIndexMapping)> { - bail!("file_scan function is not supported in streaming mode") + bail!("postgres_query function is not supported in streaming mode") } } diff --git a/src/frontend/src/optimizer/plan_node/logical_scan.rs b/src/frontend/src/optimizer/plan_node/logical_scan.rs index e2aeaa6b9517a..ccb90889cb946 100644 --- a/src/frontend/src/optimizer/plan_node/logical_scan.rs +++ b/src/frontend/src/optimizer/plan_node/logical_scan.rs @@ -419,13 +419,13 @@ impl PredicatePushdown for LogicalScan { self.clone_with_predicate(predicate.and(self.predicate().clone())) .into() } else { - return LogicalFilter::create( + LogicalFilter::create( self.clone_with_predicate(predicate.and(self.predicate().clone())) .into(), Condition { conjunctions: non_pushable_predicate, }, - ); + ) } } } diff --git a/src/frontend/src/optimizer/plan_node/logical_sys_scan.rs b/src/frontend/src/optimizer/plan_node/logical_sys_scan.rs index a85afa8afa039..cf711f74a039f 100644 --- a/src/frontend/src/optimizer/plan_node/logical_sys_scan.rs +++ b/src/frontend/src/optimizer/plan_node/logical_sys_scan.rs @@ -309,13 +309,13 @@ impl PredicatePushdown for LogicalSysScan { self.clone_with_predicate(predicate.and(self.predicate().clone())) .into() } else { - return LogicalFilter::create( + LogicalFilter::create( self.clone_with_predicate(predicate.and(self.predicate().clone())) .into(), Condition { conjunctions: non_pushable_predicate, }, - ); + ) } } } diff --git a/src/frontend/src/optimizer/plan_node/merge_eq_nodes.rs b/src/frontend/src/optimizer/plan_node/merge_eq_nodes.rs index 36d0c1bec73ac..b136d3468472f 100644 --- a/src/frontend/src/optimizer/plan_node/merge_eq_nodes.rs +++ b/src/frontend/src/optimizer/plan_node/merge_eq_nodes.rs @@ -92,7 +92,7 @@ impl VisitPlan for Counter { where F: FnMut(&mut Self), { - if !self.counts.get(&plan.id()).is_some_and(|c| *c > 1) { + if self.counts.get(&plan.id()).is_none_or(|c| *c <= 1) { f(self); } } diff --git a/src/frontend/src/optimizer/plan_node/mod.rs b/src/frontend/src/optimizer/plan_node/mod.rs index 9b814ab8289c2..432475b52809b 100644 --- a/src/frontend/src/optimizer/plan_node/mod.rs +++ b/src/frontend/src/optimizer/plan_node/mod.rs @@ -935,10 +935,14 @@ mod batch_file_scan; mod batch_iceberg_scan; mod batch_kafka_scan; mod batch_postgres_query; + +mod batch_mysql_query; mod derive; mod logical_file_scan; mod logical_iceberg_scan; mod logical_postgres_query; + +mod logical_mysql_query; mod stream_cdc_table_scan; mod stream_share; mod stream_temporal_join; @@ -961,6 +965,7 @@ pub use batch_limit::BatchLimit; pub use batch_log_seq_scan::BatchLogSeqScan; pub use batch_lookup_join::BatchLookupJoin; pub use batch_max_one_row::BatchMaxOneRow; +pub use batch_mysql_query::BatchMySqlQuery; pub use batch_nested_loop_join::BatchNestedLoopJoin; pub use batch_over_window::BatchOverWindow; pub use batch_postgres_query::BatchPostgresQuery; @@ -997,6 +1002,7 @@ pub use logical_kafka_scan::LogicalKafkaScan; pub use logical_limit::LogicalLimit; pub use logical_max_one_row::LogicalMaxOneRow; pub use logical_multi_join::{LogicalMultiJoin, LogicalMultiJoinBuilder}; +pub use logical_mysql_query::LogicalMySqlQuery; pub use logical_now::LogicalNow; pub use logical_over_window::LogicalOverWindow; pub use logical_postgres_query::LogicalPostgresQuery; @@ -1112,6 +1118,7 @@ macro_rules! for_all_plan_nodes { , { Logical, ChangeLog } , { Logical, FileScan } , { Logical, PostgresQuery } + , { Logical, MySqlQuery } , { Batch, SimpleAgg } , { Batch, HashAgg } , { Batch, SortAgg } @@ -1144,6 +1151,7 @@ macro_rules! for_all_plan_nodes { , { Batch, IcebergScan } , { Batch, FileScan } , { Batch, PostgresQuery } + , { Batch, MySqlQuery } , { Stream, Project } , { Stream, Filter } , { Stream, TableScan } @@ -1226,6 +1234,7 @@ macro_rules! for_logical_plan_nodes { , { Logical, ChangeLog } , { Logical, FileScan } , { Logical, PostgresQuery } + , { Logical, MySqlQuery } } }; } @@ -1267,6 +1276,7 @@ macro_rules! for_batch_plan_nodes { , { Batch, IcebergScan } , { Batch, FileScan } , { Batch, PostgresQuery } + , { Batch, MySqlQuery } } }; } diff --git a/src/frontend/src/optimizer/plan_visitor/plan_correlated_id_finder.rs b/src/frontend/src/optimizer/plan_visitor/plan_correlated_id_finder.rs index e2d042791af5d..4a62fe5aa4f8a 100644 --- a/src/frontend/src/optimizer/plan_visitor/plan_correlated_id_finder.rs +++ b/src/frontend/src/optimizer/plan_visitor/plan_correlated_id_finder.rs @@ -43,7 +43,6 @@ impl PlanCorrelatedIdFinder { impl PlanVisitor for PlanCorrelatedIdFinder { /// `correlated_input_ref` can only appear in `LogicalProject`, `LogicalFilter`, /// `LogicalJoin` or the `filter` clause of `PlanAggCall` of `LogicalAgg` now. - type Result = (); type DefaultBehavior = impl DefaultBehavior; diff --git a/src/frontend/src/optimizer/rule/apply_union_transpose_rule.rs b/src/frontend/src/optimizer/rule/apply_union_transpose_rule.rs index 0bfe6e268e14e..e66f4c8e11bc8 100644 --- a/src/frontend/src/optimizer/rule/apply_union_transpose_rule.rs +++ b/src/frontend/src/optimizer/rule/apply_union_transpose_rule.rs @@ -39,7 +39,6 @@ use crate::optimizer::PlanRef; /// / \ / \ /// Domain T1 Domain T2 /// ``` - pub struct ApplyUnionTransposeRule {} impl Rule for ApplyUnionTransposeRule { fn apply(&self, plan: PlanRef) -> Option { diff --git a/src/frontend/src/optimizer/rule/mod.rs b/src/frontend/src/optimizer/rule/mod.rs index 56d79bf7b408b..7468f1c96524c 100644 --- a/src/frontend/src/optimizer/rule/mod.rs +++ b/src/frontend/src/optimizer/rule/mod.rs @@ -161,6 +161,7 @@ mod pull_up_correlated_predicate_agg_rule; mod source_to_iceberg_scan_rule; mod source_to_kafka_scan_rule; mod table_function_to_file_scan_rule; +mod table_function_to_mysql_query_rule; mod table_function_to_postgres_query_rule; mod values_extract_project_rule; @@ -169,6 +170,7 @@ pub use pull_up_correlated_predicate_agg_rule::*; pub use source_to_iceberg_scan_rule::*; pub use source_to_kafka_scan_rule::*; pub use table_function_to_file_scan_rule::*; +pub use table_function_to_mysql_query_rule::*; pub use table_function_to_postgres_query_rule::*; pub use values_extract_project_rule::*; @@ -234,6 +236,7 @@ macro_rules! for_all_rules { , { TableFunctionToProjectSetRule } , { TableFunctionToFileScanRule } , { TableFunctionToPostgresQueryRule } + , { TableFunctionToMySqlQueryRule } , { ApplyLimitTransposeRule } , { CommonSubExprExtractRule } , { BatchProjectMergeRule } diff --git a/src/frontend/src/optimizer/rule/pull_up_correlated_predicate_agg_rule.rs b/src/frontend/src/optimizer/rule/pull_up_correlated_predicate_agg_rule.rs index 4a59dcda785b8..0cd3dfdd5716a 100644 --- a/src/frontend/src/optimizer/rule/pull_up_correlated_predicate_agg_rule.rs +++ b/src/frontend/src/optimizer/rule/pull_up_correlated_predicate_agg_rule.rs @@ -59,7 +59,6 @@ use crate::utils::{Condition, IndexSet}; /// | /// Filter /// ``` - pub struct PullUpCorrelatedPredicateAggRule {} impl Rule for PullUpCorrelatedPredicateAggRule { fn apply(&self, plan: PlanRef) -> Option { diff --git a/src/frontend/src/optimizer/rule/table_function_to_mysql_query_rule.rs b/src/frontend/src/optimizer/rule/table_function_to_mysql_query_rule.rs new file mode 100644 index 0000000000000..0ad534825790c --- /dev/null +++ b/src/frontend/src/optimizer/rule/table_function_to_mysql_query_rule.rs @@ -0,0 +1,91 @@ +// Copyright 2024 RisingWave Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use itertools::Itertools; +use risingwave_common::catalog::{Field, Schema}; +use risingwave_common::types::{DataType, ScalarImpl}; +use risingwave_common::util::iter_util::ZipEqDebug; + +use super::{BoxedRule, Rule}; +use crate::expr::{Expr, TableFunctionType}; +use crate::optimizer::plan_node::generic::GenericPlanRef; +// use crate::optimizer::plan_node::{LogicalMySqlQuery, LogicalTableFunction}; +use crate::optimizer::plan_node::{LogicalMySqlQuery, LogicalTableFunction}; +use crate::optimizer::PlanRef; + +/// Transform a special `TableFunction` (with `MYSQL_QUERY` table function type) into a `LogicalMySqlQuery` +pub struct TableFunctionToMySqlQueryRule {} +impl Rule for TableFunctionToMySqlQueryRule { + fn apply(&self, plan: PlanRef) -> Option { + let logical_table_function: &LogicalTableFunction = plan.as_logical_table_function()?; + if logical_table_function.table_function.function_type != TableFunctionType::MysqlQuery { + return None; + } + assert!(!logical_table_function.with_ordinality); + let table_function_return_type = logical_table_function.table_function().return_type(); + + if let DataType::Struct(st) = table_function_return_type.clone() { + let fields = st + .types() + .zip_eq_debug(st.names()) + .map(|(data_type, name)| Field::with_name(data_type.clone(), name.to_string())) + .collect_vec(); + + let schema = Schema::new(fields); + + assert_eq!(logical_table_function.table_function().args.len(), 6); + let mut eval_args = vec![]; + for arg in &logical_table_function.table_function().args { + assert_eq!(arg.return_type(), DataType::Varchar); + let value = arg.try_fold_const().unwrap().unwrap(); + match value { + Some(ScalarImpl::Utf8(s)) => { + eval_args.push(s.to_string()); + } + _ => { + unreachable!("must be a varchar") + } + } + } + let hostname = eval_args[0].clone(); + let port = eval_args[1].clone(); + let username = eval_args[2].clone(); + let password = eval_args[3].clone(); + let database = eval_args[4].clone(); + let query = eval_args[5].clone(); + + Some( + LogicalMySqlQuery::new( + logical_table_function.ctx(), + schema, + hostname, + port, + username, + password, + database, + query, + ) + .into(), + ) + } else { + unreachable!("TableFunction return type should be struct") + } + } +} + +impl TableFunctionToMySqlQueryRule { + pub fn create() -> BoxedRule { + Box::new(TableFunctionToMySqlQueryRule {}) + } +} diff --git a/src/frontend/src/scheduler/streaming_manager.rs b/src/frontend/src/scheduler/streaming_manager.rs index dfe04acaa24dd..6e9c07eb8f98e 100644 --- a/src/frontend/src/scheduler/streaming_manager.rs +++ b/src/frontend/src/scheduler/streaming_manager.rs @@ -92,7 +92,7 @@ pub struct StreamingJobGuard<'a> { tracker: &'a StreamingJobTracker, } -impl<'a> Drop for StreamingJobGuard<'a> { +impl Drop for StreamingJobGuard<'_> { fn drop(&mut self) { self.tracker.delete_job(&self.task_id); } diff --git a/src/frontend/src/utils/condition.rs b/src/frontend/src/utils/condition.rs index 5bb53a84ae410..f3f9738e2cae1 100644 --- a/src/frontend/src/utils/condition.rs +++ b/src/frontend/src/utils/condition.rs @@ -663,14 +663,14 @@ impl Condition { new_conds: &ScalarImpl, eq_conds: &[Option], ) -> bool { - return !eq_conds.is_empty() + !eq_conds.is_empty() && eq_conds.iter().all(|l| { if let Some(l) = l { l != new_conds } else { true } - }); + }) } fn merge_lower_bound_conjunctions(lb: Vec>) -> Bound { diff --git a/src/frontend/src/utils/index_set.rs b/src/frontend/src/utils/index_set.rs index 000fb2ef924a1..e34d2f1f66cb4 100644 --- a/src/frontend/src/utils/index_set.rs +++ b/src/frontend/src/utils/index_set.rs @@ -106,7 +106,7 @@ impl BitAnd for IndexSet { } } -impl<'a> BitAnd for &'a IndexSet { +impl BitAnd for &IndexSet { type Output = IndexSet; fn bitand(self, rhs: Self) -> Self::Output { @@ -126,7 +126,7 @@ impl BitOr for IndexSet { } } -impl<'a> BitOr for &'a IndexSet { +impl BitOr for &IndexSet { type Output = IndexSet; fn bitor(self, rhs: Self) -> Self::Output { diff --git a/src/frontend/src/utils/pretty_serde.rs b/src/frontend/src/utils/pretty_serde.rs index 37959ce3eb975..705267c3163b6 100644 --- a/src/frontend/src/utils/pretty_serde.rs +++ b/src/frontend/src/utils/pretty_serde.rs @@ -30,7 +30,7 @@ use serde::{Serialize, Serializer}; pub struct PrettySerde<'a>(pub Pretty<'a>); -impl<'a> Serialize for PrettySerde<'a> { +impl Serialize for PrettySerde<'_> { fn serialize(&self, serializer: S) -> Result where S: Serializer, diff --git a/src/frontend/src/utils/with_options.rs b/src/frontend/src/utils/with_options.rs index e306103c02e39..9d61021dab4fe 100644 --- a/src/frontend/src/utils/with_options.rs +++ b/src/frontend/src/utils/with_options.rs @@ -16,7 +16,7 @@ use std::collections::BTreeMap; use std::num::NonZeroU32; use risingwave_connector::source::kafka::private_link::{ - insert_privatelink_broker_rewrite_map, CONNECTION_NAME_KEY, PRIVATELINK_ENDPOINT_KEY, + insert_privatelink_broker_rewrite_map, PRIVATELINK_ENDPOINT_KEY, }; pub use risingwave_connector::WithOptionsSecResolved; use risingwave_connector::WithPropertiesExt; @@ -28,7 +28,6 @@ use risingwave_sqlparser::ast::{ }; use super::OverwriteOptions; -use crate::catalog::connection_catalog::resolve_private_link_connection; use crate::catalog::ConnectionId; use crate::error::{ErrorCode, Result as RwResult, RwError}; use crate::session::SessionImpl; @@ -186,8 +185,6 @@ pub(crate) fn resolve_secret_ref_in_with_options( pub(crate) fn resolve_privatelink_in_with_option( with_options: &mut WithOptions, - schema_name: &Option, - session: &SessionImpl, ) -> RwResult> { let is_kafka = with_options.is_kafka_connector(); let privatelink_endpoint = with_options.remove(PRIVATELINK_ENDPOINT_KEY); @@ -201,28 +198,8 @@ pub(crate) fn resolve_privatelink_in_with_option( } insert_privatelink_broker_rewrite_map(with_options.inner_mut(), None, Some(endpoint)) .map_err(RwError::from)?; - return Ok(None); } - - let connection_name = with_options - .remove(CONNECTION_NAME_KEY) - .map(|s| s.to_lowercase()); - let connection_id = match connection_name { - Some(connection_name) => { - let connection = session - .get_connection_by_name(schema_name.clone(), &connection_name) - .map_err(|_| ErrorCode::ItemNotFound(connection_name))?; - if !is_kafka { - return Err(RwError::from(ErrorCode::ProtocolError( - "Connection is only supported in kafka connector".to_string(), - ))); - } - resolve_private_link_connection(&connection, with_options.inner_mut())?; - Some(connection.id) - } - None => None, - }; - Ok(connection_id) + Ok(None) } impl TryFrom<&[SqlOption]> for WithOptions { diff --git a/src/jni_core/src/lib.rs b/src/jni_core/src/lib.rs index 3f776c3a2e984..d700d979896cc 100644 --- a/src/jni_core/src/lib.rs +++ b/src/jni_core/src/lib.rs @@ -118,7 +118,7 @@ pub struct SliceGuard<'env, 'array> { slice: &'array [u8], } -impl<'env, 'array> Deref for SliceGuard<'env, 'array> { +impl Deref for SliceGuard<'_, '_> { type Target = [u8]; fn deref(&self) -> &Self::Target { @@ -132,7 +132,7 @@ pub struct Pointer<'a, T> { _phantom: PhantomData<&'a T>, } -impl<'a, T> Default for Pointer<'a, T> { +impl Default for Pointer<'_, T> { fn default() -> Self { Self { pointer: 0, @@ -187,7 +187,7 @@ impl<'a> Deref for EnvParam<'a> { } } -impl<'a> DerefMut for EnvParam<'a> { +impl DerefMut for EnvParam<'_> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.env } @@ -306,7 +306,7 @@ impl JavaBindingIterator<'static> { } } -impl<'a> Deref for JavaBindingIterator<'a> { +impl Deref for JavaBindingIterator<'_> { type Target = OwnedRow; fn deref(&self) -> &Self::Target { diff --git a/src/meta/Cargo.toml b/src/meta/Cargo.toml index defe4e14c8fd8..01aed8dd76b6b 100644 --- a/src/meta/Cargo.toml +++ b/src/meta/Cargo.toml @@ -91,7 +91,7 @@ uuid = { version = "1", features = ["v4"] } [target.'cfg(not(madsim))'.dependencies] axum = { workspace = true } -tower-http = { version = "0.5", features = [ +tower-http = { version = "0.6", features = [ "add-extension", "cors", "fs", diff --git a/src/meta/model/src/connection.rs b/src/meta/model/src/connection.rs index a6cfa4aefb58c..dce0daa462fc5 100644 --- a/src/meta/model/src/connection.rs +++ b/src/meta/model/src/connection.rs @@ -26,6 +26,8 @@ pub struct Model { #[sea_orm(primary_key, auto_increment = false)] pub connection_id: ConnectionId, pub name: String, + + // todo: Private link service has been deprecated, consider using a new field for the connection info pub info: PrivateLinkService, } diff --git a/src/meta/node/src/server.rs b/src/meta/node/src/server.rs index ff91fe9aa9eed..2ea5480eba6f6 100644 --- a/src/meta/node/src/server.rs +++ b/src/meta/node/src/server.rs @@ -87,7 +87,6 @@ use crate::controller::SqlMetaStore; use crate::hummock::HummockManager; use crate::manager::sink_coordination::SinkCoordinatorManager; use crate::manager::{IdleManager, MetaOpts, MetaSrvEnv}; -use crate::rpc::cloud_provider::AwsEc2Client; use crate::rpc::election::sql::{MySqlDriver, PostgresDriver, SqlBackendElectionClient}; use crate::rpc::metrics::{ start_fragment_info_monitor, start_worker_info_monitor, GLOBAL_META_METRICS, @@ -530,17 +529,8 @@ pub async fn start_service_as_election_leader( compactor_manager.clone(), )); - let mut aws_cli = None; - if let Some(my_vpc_id) = &env.opts.vpc_id - && let Some(security_group_id) = &env.opts.security_group_id - { - let cli = AwsEc2Client::new(my_vpc_id, security_group_id).await; - aws_cli = Some(cli); - } - let ddl_srv = DdlServiceImpl::new( env.clone(), - aws_cli.clone(), metadata_manager.clone(), stream_manager.clone(), source_manager.clone(), @@ -584,7 +574,7 @@ pub async fn start_service_as_election_leader( let session_params_srv = SessionParamsServiceImpl::new(env.session_params_manager_impl_ref()); let serving_srv = ServingServiceImpl::new(serving_vnode_mapping.clone(), metadata_manager.clone()); - let cloud_srv = CloudServiceImpl::new(metadata_manager.clone(), aws_cli); + let cloud_srv = CloudServiceImpl::new(); let event_log_srv = EventLogServiceImpl::new(env.event_log_manager_ref()); let cluster_limit_srv = ClusterLimitServiceImpl::new(env.clone(), metadata_manager.clone()); @@ -702,6 +692,7 @@ pub async fn start_service_as_election_leader( .add_service(MetaMemberServiceServer::new(meta_member_srv)) .add_service(DdlServiceServer::new(ddl_srv).max_decoding_message_size(usize::MAX)) .add_service(UserServiceServer::new(user_srv)) + .add_service(CloudServiceServer::new(cloud_srv)) .add_service(ScaleServiceServer::new(scale_srv).max_decoding_message_size(usize::MAX)) .add_service(HealthServer::new(health_srv)) .add_service(BackupServiceServer::new(backup_srv)) @@ -709,7 +700,6 @@ pub async fn start_service_as_election_leader( .add_service(SessionParamServiceServer::new(session_params_srv)) .add_service(TelemetryInfoServiceServer::new(telemetry_srv)) .add_service(ServingServiceServer::new(serving_srv)) - .add_service(CloudServiceServer::new(cloud_srv)) .add_service(SinkCoordinationServiceServer::new(sink_coordination_srv)) .add_service(EventLogServiceServer::new(event_log_srv)) .add_service(ClusterLimitServiceServer::new(cluster_limit_srv)); diff --git a/src/meta/service/src/cloud_service.rs b/src/meta/service/src/cloud_service.rs index e913b91826b6f..553a8189116c3 100644 --- a/src/meta/service/src/cloud_service.rs +++ b/src/meta/service/src/cloud_service.rs @@ -18,14 +18,10 @@ use std::sync::LazyLock; use async_trait::async_trait; use regex::Regex; use risingwave_connector::error::ConnectorResult; -use risingwave_connector::source::kafka::private_link::insert_privatelink_broker_rewrite_map; use risingwave_connector::source::{ ConnectorProperties, SourceEnumeratorContext, SourceProperties, SplitEnumerator, }; use risingwave_connector::{dispatch_source_prop, WithOptionsSecResolved}; -use risingwave_meta::manager::MetadataManager; -use risingwave_meta_model::ConnectionId; -use risingwave_pb::catalog::connection::Info::PrivateLinkService; use risingwave_pb::cloud_service::cloud_service_server::CloudService; use risingwave_pb::cloud_service::rw_cloud_validate_source_response::{Error, ErrorType}; use risingwave_pb::cloud_service::{ @@ -33,20 +29,11 @@ use risingwave_pb::cloud_service::{ }; use thiserror_ext::AsReport; use tonic::{Request, Response, Status}; - -use crate::rpc::cloud_provider::AwsEc2Client; - -pub struct CloudServiceImpl { - metadata_manager: MetadataManager, - aws_client: Option, -} +pub struct CloudServiceImpl {} impl CloudServiceImpl { - pub fn new(metadata_manager: MetadataManager, aws_client: Option) -> Self { - Self { - metadata_manager, - aws_client, - } + pub fn new() -> Self { + Self {} } } @@ -76,70 +63,7 @@ impl CloudService for CloudServiceImpl { "unexpected source type, only kafka source is supported", )); } - let mut source_cfg: BTreeMap = req.source_config.into_iter().collect(); - // if connection_id provided, check whether endpoint service is available and resolve - // broker rewrite map currently only support aws privatelink connection - if let Some(connection_id_str) = source_cfg.get("connection.id") { - let connection_id = connection_id_str.parse::().map_err(|e| { - Status::invalid_argument(format!( - "connection.id is not an integer: {}", - e.as_report() - )) - })?; - - let connection = self - .metadata_manager - .catalog_controller - .get_connection_by_id(connection_id) - .await; - - if let Err(e) = connection { - return Ok(new_rwc_validate_fail_response( - ErrorType::PrivatelinkConnectionNotFound, - e.to_report_string(), - )); - } - if let Some(PrivateLinkService(service)) = connection.unwrap().info { - if self.aws_client.is_none() { - return Ok(new_rwc_validate_fail_response( - ErrorType::AwsClientNotConfigured, - "AWS client is not configured".to_string(), - )); - } - let cli = self.aws_client.as_ref().unwrap(); - let privatelink_status = cli - .is_vpc_endpoint_ready(service.endpoint_id.as_str()) - .await; - match privatelink_status { - Err(e) => { - return Ok(new_rwc_validate_fail_response( - ErrorType::PrivatelinkUnavailable, - e.to_report_string(), - )); - } - Ok(false) => { - return Ok(new_rwc_validate_fail_response( - ErrorType::PrivatelinkUnavailable, - format!("Private link endpoint {} is not ready", service.endpoint_id,), - )); - } - _ => (), - }; - if let Err(e) = - insert_privatelink_broker_rewrite_map(&mut source_cfg, Some(&service), None) - { - return Ok(new_rwc_validate_fail_response( - ErrorType::PrivatelinkResolveErr, - e.to_report_string(), - )); - } - } else { - return Ok(new_rwc_validate_fail_response( - ErrorType::PrivatelinkResolveErr, - format!("connection {} has no info available", connection_id), - )); - } - } + let source_cfg: BTreeMap = req.source_config.into_iter().collect(); // XXX: We can't use secret in cloud validate source. let source_cfg = WithOptionsSecResolved::without_secrets(source_cfg); diff --git a/src/meta/service/src/ddl_service.rs b/src/meta/service/src/ddl_service.rs index 1578813e2ead9..71b45b1887eff 100644 --- a/src/meta/service/src/ddl_service.rs +++ b/src/meta/service/src/ddl_service.rs @@ -25,12 +25,8 @@ use risingwave_connector::sink::catalog::SinkId; use risingwave_meta::manager::{EventLogManagerRef, MetadataManager}; use risingwave_meta::rpc::ddl_controller::fill_table_stream_graph_info; use risingwave_meta::rpc::metrics::MetaMetrics; -use risingwave_pb::catalog::connection::private_link_service::{ - PbPrivateLinkProvider, PrivateLinkProvider, -}; -use risingwave_pb::catalog::connection::PbPrivateLinkService; use risingwave_pb::catalog::table::OptionalAssociatedSourceId; -use risingwave_pb::catalog::{connection, Comment, Connection, CreateType, Secret, Table}; +use risingwave_pb::catalog::{Comment, CreateType, Secret, Table}; use risingwave_pb::common::worker_node::State; use risingwave_pb::common::WorkerType; use risingwave_pb::ddl_service::ddl_service_server::DdlService; @@ -44,12 +40,11 @@ use tonic::{Request, Response, Status}; use crate::barrier::BarrierManagerRef; use crate::manager::sink_coordination::SinkCoordinatorManager; use crate::manager::{MetaSrvEnv, StreamingJob}; -use crate::rpc::cloud_provider::AwsEc2Client; use crate::rpc::ddl_controller::{ DdlCommand, DdlController, DropMode, ReplaceTableInfo, StreamingJobId, }; use crate::stream::{GlobalStreamManagerRef, SourceManagerRef}; -use crate::{MetaError, MetaResult}; +use crate::MetaError; #[derive(Clone)] pub struct DdlServiceImpl { @@ -58,7 +53,6 @@ pub struct DdlServiceImpl { metadata_manager: MetadataManager, sink_manager: SinkCoordinatorManager, ddl_controller: DdlController, - aws_client: Arc>, meta_metrics: Arc, } @@ -66,7 +60,6 @@ impl DdlServiceImpl { #[allow(clippy::too_many_arguments)] pub async fn new( env: MetaSrvEnv, - aws_client: Option, metadata_manager: MetadataManager, stream_manager: GlobalStreamManagerRef, source_manager: SourceManagerRef, @@ -74,22 +67,19 @@ impl DdlServiceImpl { sink_manager: SinkCoordinatorManager, meta_metrics: Arc, ) -> Self { - let aws_cli_ref = Arc::new(aws_client); let ddl_controller = DdlController::new( env.clone(), metadata_manager.clone(), stream_manager, source_manager, barrier_manager, - aws_cli_ref.clone(), ) .await; Self { env, metadata_manager, - ddl_controller, - aws_client: aws_cli_ref, sink_manager, + ddl_controller, meta_metrics, } } @@ -231,11 +221,6 @@ impl DdlService for DdlServiceImpl { let req = request.into_inner(); let source = req.get_source()?.clone(); - // validate connection before starting the DDL procedure - if let Some(connection_id) = source.connection_id { - self.validate_connection(connection_id).await?; - } - match req.fragment_graph { None => { let version = self @@ -297,11 +282,6 @@ impl DdlService for DdlServiceImpl { let fragment_graph = req.get_fragment_graph()?.clone(); let affected_table_change = req.get_affected_table_change().cloned().ok(); - // validate connection before starting the DDL procedure - if let Some(connection_id) = sink.connection_id { - self.validate_connection(connection_id).await?; - } - let stream_job = match &affected_table_change { None => StreamingJob::Sink(sink, None), Some(change) => { @@ -748,63 +728,10 @@ impl DdlService for DdlServiceImpl { } match req.payload.unwrap() { - create_connection_request::Payload::PrivateLink(link) => { - // currently we only support AWS - let private_link_svc = match link.get_provider()? { - PbPrivateLinkProvider::Mock => PbPrivateLinkService { - provider: link.provider, - service_name: String::new(), - endpoint_id: String::new(), - endpoint_dns_name: String::new(), - dns_entries: HashMap::new(), - }, - PbPrivateLinkProvider::Aws => { - if let Some(aws_cli) = self.aws_client.as_ref() { - let tags_env = self - .env - .opts - .privatelink_endpoint_default_tags - .as_ref() - .map(|tags| { - tags.iter() - .map(|(key, val)| (key.as_str(), val.as_str())) - .collect() - }); - aws_cli - .create_aws_private_link( - &link.service_name, - link.tags.as_deref(), - tags_env, - ) - .await? - } else { - return Err(Status::from(MetaError::unavailable( - "AWS client is not configured", - ))); - } - } - PbPrivateLinkProvider::Unspecified => { - return Err(Status::invalid_argument("Privatelink provider unspecified")); - } - }; - let connection = Connection { - id: 0, - schema_id: req.schema_id, - database_id: req.database_id, - name: req.name, - owner: req.owner_id, - info: Some(connection::Info::PrivateLinkService(private_link_svc)), - }; - - // save private link info to catalog - let version = self - .ddl_controller - .run_command(DdlCommand::CreateConnection(connection)) - .await?; - - Ok(Response::new(CreateConnectionResponse { version })) + create_connection_request::Payload::PrivateLink(_) => { + panic!("Private Link Connection has been deprecated") } - } + }; } async fn list_connections( @@ -1088,33 +1015,6 @@ impl DdlService for DdlServiceImpl { } } -impl DdlServiceImpl { - async fn validate_connection(&self, connection_id: u32) -> MetaResult<()> { - let connection = self - .metadata_manager - .catalog_controller - .get_connection_by_id(connection_id as _) - .await?; - if let Some(connection::Info::PrivateLinkService(svc)) = &connection.info { - // skip all checks for mock connection - if svc.get_provider()? == PrivateLinkProvider::Mock { - return Ok(()); - } - - // check whether private link is ready - if let Some(aws_cli) = self.aws_client.as_ref() { - if !aws_cli.is_vpc_endpoint_ready(&svc.endpoint_id).await? { - return Err(MetaError::from(anyhow!( - "Private link endpoint {} is not ready", - svc.endpoint_id - ))); - } - } - } - Ok(()) - } -} - fn add_auto_schema_change_fail_event_log( meta_metrics: &Arc, table_id: u32, diff --git a/src/meta/service/src/stream_service.rs b/src/meta/service/src/stream_service.rs index 8715ed315be80..1833c19aeaecd 100644 --- a/src/meta/service/src/stream_service.rs +++ b/src/meta/service/src/stream_service.rs @@ -17,6 +17,7 @@ use std::collections::{HashMap, HashSet}; use itertools::Itertools; use risingwave_common::catalog::{DatabaseId, TableId}; use risingwave_connector::source::SplitMetaData; +use risingwave_meta::controller::fragment::StreamingJobInfo; use risingwave_meta::manager::{LocalNotification, MetadataManager}; use risingwave_meta::model; use risingwave_meta::model::ActorId; @@ -238,27 +239,35 @@ impl StreamManagerService for StreamServiceImpl { &self, _request: Request, ) -> Result, Status> { - let job_states = self + let job_infos = self .metadata_manager .catalog_controller - .list_streaming_job_states() + .list_streaming_job_infos() .await?; - let states = job_states + let states = job_infos .into_iter() - .map(|(table_id, state, parallelism, max_parallelism)| { - let parallelism = match parallelism { - StreamingParallelism::Adaptive => model::TableParallelism::Adaptive, - StreamingParallelism::Custom => model::TableParallelism::Custom, - StreamingParallelism::Fixed(n) => model::TableParallelism::Fixed(n as _), - }; - - list_table_fragment_states_response::TableFragmentState { - table_id: table_id as _, - state: PbState::from(state) as _, - parallelism: Some(parallelism.into()), - max_parallelism: max_parallelism as _, - } - }) + .map( + |StreamingJobInfo { + job_id, + job_status, + parallelism, + max_parallelism, + .. + }| { + let parallelism = match parallelism { + StreamingParallelism::Adaptive => model::TableParallelism::Adaptive, + StreamingParallelism::Custom => model::TableParallelism::Custom, + StreamingParallelism::Fixed(n) => model::TableParallelism::Fixed(n as _), + }; + + list_table_fragment_states_response::TableFragmentState { + table_id: job_id as _, + state: PbState::from(job_status) as _, + parallelism: Some(parallelism.into()), + max_parallelism: max_parallelism as _, + } + }, + ) .collect_vec(); Ok(Response::new(ListTableFragmentStatesResponse { states })) diff --git a/src/meta/src/barrier/mod.rs b/src/meta/src/barrier/mod.rs index cbf31fc97ae49..5207a6375fd0a 100644 --- a/src/meta/src/barrier/mod.rs +++ b/src/meta/src/barrier/mod.rs @@ -1363,7 +1363,6 @@ impl DatabaseCheckpointControl { impl GlobalBarrierWorker { /// Set barrier manager status. - async fn failure_recovery(&mut self, err: MetaError) { self.clear_on_err(&err).await; diff --git a/src/meta/src/controller/catalog.rs b/src/meta/src/controller/catalog.rs index d0067d495c79e..1e53183c490ab 100644 --- a/src/meta/src/controller/catalog.rs +++ b/src/meta/src/controller/catalog.rs @@ -32,9 +32,9 @@ use risingwave_meta_model::{ actor, connection, database, fragment, function, index, object, object_dependency, schema, secret, sink, source, streaming_job, subscription, table, user_privilege, view, ActorId, ActorUpstreamActors, ColumnCatalogArray, ConnectionId, CreateType, DatabaseId, FragmentId, - FunctionId, I32Array, IndexId, JobStatus, ObjectId, PrivateLinkService, Property, SchemaId, - SecretId, SinkId, SourceId, StreamNode, StreamSourceInfo, StreamingParallelism, SubscriptionId, - TableId, UserId, ViewId, + FunctionId, I32Array, IndexId, JobStatus, ObjectId, Property, SchemaId, SecretId, SinkId, + SourceId, StreamNode, StreamSourceInfo, StreamingParallelism, SubscriptionId, TableId, UserId, + ViewId, }; use risingwave_pb::catalog::subscription::SubscriptionState; use risingwave_pb::catalog::table::PbTableType; @@ -111,7 +111,8 @@ pub struct ReleaseContext { /// Dropped source list, need to unregister from source manager. pub(crate) source_ids: Vec, /// Dropped connection list, need to delete from vpc endpoints. - pub(crate) connections: Vec, + #[allow(dead_code)] + pub(crate) connections: Vec, /// Dropped fragments that are fetching data from the target source. pub(crate) source_fragments: HashMap>, @@ -369,7 +370,7 @@ impl CatalogController { .all(&txn) .await? .into_iter() - .map(|conn| conn.info) + .map(|conn| conn.connection_id) .collect_vec(); // Find affect users with privileges on the database and the objects in the database. diff --git a/src/meta/src/controller/fragment.rs b/src/meta/src/controller/fragment.rs index 811fe287c3fb7..78e6bde07b87b 100644 --- a/src/meta/src/controller/fragment.rs +++ b/src/meta/src/controller/fragment.rs @@ -23,11 +23,12 @@ use risingwave_common::hash::{VnodeCount, VnodeCountCompat, WorkerSlotId}; use risingwave_common::util::stream_graph_visitor::visit_stream_node; use risingwave_meta_model::actor::ActorStatus; use risingwave_meta_model::fragment::DistributionType; +use risingwave_meta_model::object::ObjectType; use risingwave_meta_model::prelude::{Actor, ActorDispatcher, Fragment, Sink, StreamingJob}; use risingwave_meta_model::{ - actor, actor_dispatcher, fragment, object, sink, streaming_job, ActorId, ActorUpstreamActors, - ConnectorSplits, DatabaseId, ExprContext, FragmentId, I32Array, JobStatus, ObjectId, SinkId, - SourceId, StreamNode, StreamingParallelism, TableId, VnodeBitmap, WorkerId, + actor, actor_dispatcher, fragment, object, sink, source, streaming_job, table, ActorId, + ActorUpstreamActors, ConnectorSplits, DatabaseId, ExprContext, FragmentId, I32Array, JobStatus, + ObjectId, SinkId, SourceId, StreamNode, StreamingParallelism, TableId, VnodeBitmap, WorkerId, }; use risingwave_meta_model_migration::{Alias, SelectStatement}; use risingwave_pb::common::PbActorLocation; @@ -50,9 +51,10 @@ use risingwave_pb::stream_plan::{ use sea_orm::sea_query::Expr; use sea_orm::ActiveValue::Set; use sea_orm::{ - ColumnTrait, DbErr, EntityTrait, JoinType, ModelTrait, PaginatorTrait, QueryFilter, - QuerySelect, RelationTrait, SelectGetableTuple, Selector, TransactionTrait, Value, + ColumnTrait, DbErr, EntityTrait, FromQueryResult, JoinType, ModelTrait, PaginatorTrait, + QueryFilter, QuerySelect, RelationTrait, SelectGetableTuple, Selector, TransactionTrait, Value, }; +use serde::{Deserialize, Serialize}; use tracing::debug; use crate::controller::catalog::{CatalogController, CatalogControllerInner}; @@ -78,6 +80,17 @@ pub struct FragmentParallelismInfo { pub vnode_count: usize, } +#[derive(Clone, Debug, FromQueryResult, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] // for dashboard +pub struct StreamingJobInfo { + pub job_id: ObjectId, + pub obj_type: ObjectType, + pub name: String, + pub job_status: JobStatus, + pub parallelism: StreamingParallelism, + pub max_parallelism: i32, +} + impl CatalogControllerInner { /// List all fragment vnode mapping info for all CREATED streaming jobs. pub async fn all_running_fragment_mappings( @@ -707,19 +720,35 @@ impl CatalogController { ) } - pub async fn list_streaming_job_states( - &self, - ) -> MetaResult> { + pub async fn list_streaming_job_infos(&self) -> MetaResult> { let inner = self.inner.read().await; let job_states = StreamingJob::find() .select_only() + .column(streaming_job::Column::JobId) + .join(JoinType::InnerJoin, streaming_job::Relation::Object.def()) + .column(object::Column::ObjType) + .join(JoinType::LeftJoin, table::Relation::Object1.def().rev()) + .join(JoinType::LeftJoin, source::Relation::Object.def().rev()) + .join(JoinType::LeftJoin, sink::Relation::Object.def().rev()) + .column_as( + Expr::if_null( + Expr::col((table::Entity, table::Column::Name)), + Expr::if_null( + Expr::col((source::Entity, source::Column::Name)), + Expr::if_null( + Expr::col((sink::Entity, sink::Column::Name)), + Expr::val(""), + ), + ), + ), + "name", + ) .columns([ - streaming_job::Column::JobId, streaming_job::Column::JobStatus, streaming_job::Column::Parallelism, streaming_job::Column::MaxParallelism, ]) - .into_tuple() + .into_model() .all(&inner.db) .await?; Ok(job_states) diff --git a/src/meta/src/dashboard/mod.rs b/src/meta/src/dashboard/mod.rs index 2bef0505ce443..32a3388cbcc90 100644 --- a/src/meta/src/dashboard/mod.rs +++ b/src/meta/src/dashboard/mod.rs @@ -76,6 +76,7 @@ pub(super) mod handlers { use thiserror_ext::AsReport; use super::*; + use crate::controller::fragment::StreamingJobInfo; pub struct DashboardError(anyhow::Error); pub type Result = std::result::Result; @@ -191,20 +192,17 @@ pub(super) mod handlers { Ok(Json(views)) } - pub async fn list_fragments( + pub async fn list_streaming_jobs( Extension(srv): Extension, - ) -> Result>> { - let table_fragments = srv + ) -> Result>> { + let streaming_jobs = srv .metadata_manager .catalog_controller - .table_fragments() + .list_streaming_job_infos() .await - .map_err(err)? - .values() - .cloned() - .collect_vec(); + .map_err(err)?; - Ok(Json(table_fragments)) + Ok(Json(streaming_jobs)) } /// In the ddl backpressure graph, we want to compute the backpressure between relations. @@ -508,7 +506,7 @@ impl DashboardService { let api_router = Router::new() .route("/clusters/:ty", get(list_clusters)) - .route("/fragments2", get(list_fragments)) + .route("/streaming_jobs", get(list_streaming_jobs)) .route("/fragments/job_id/:job_id", get(list_fragments_by_job_id)) .route("/relation_id_infos", get(get_relation_id_infos)) .route( diff --git a/src/meta/src/error.rs b/src/meta/src/error.rs index 8bfe188d4a3fa..f1c3bb0ffdd8a 100644 --- a/src/meta/src/error.rs +++ b/src/meta/src/error.rs @@ -116,9 +116,6 @@ pub enum MetaErrorInner { SinkError, ), - #[error("AWS SDK error: {0}")] - Aws(#[source] BoxedError), - #[error(transparent)] Internal( #[from] @@ -132,6 +129,9 @@ pub enum MetaErrorInner { #[error("Integrity check failed")] IntegrityCheckFailed, + + #[error("{0} has been deprecated, please use {1} instead.")] + Deprecated(String, String), } impl MetaError { @@ -156,15 +156,6 @@ impl MetaError { } } -impl From> for MetaError -where - E: std::error::Error + Sync + Send + 'static, -{ - fn from(e: aws_sdk_ec2::error::SdkError) -> Self { - MetaErrorInner::Aws(e.into()).into() - } -} - impl From for tonic::Status { fn from(err: MetaError) -> Self { use tonic::Code; diff --git a/src/meta/src/hummock/manager/compaction/compaction_group_manager.rs b/src/meta/src/hummock/manager/compaction/compaction_group_manager.rs index cc5b80cc29ca7..02b63ab47de62 100644 --- a/src/meta/src/hummock/manager/compaction/compaction_group_manager.rs +++ b/src/meta/src/hummock/manager/compaction/compaction_group_manager.rs @@ -489,6 +489,7 @@ impl CompactionGroupManager { CompactionGroupTransaction::new(&mut self.compaction_groups) } + #[expect(clippy::type_complexity)] pub fn start_owned_compaction_groups_txn>( inner: P, ) -> BTreeMapTransactionInner< @@ -591,7 +592,7 @@ fn update_compaction_config(target: &mut CompactionConfig, items: &[MutableConfi } } -impl<'a> CompactionGroupTransaction<'a> { +impl CompactionGroupTransaction<'_> { /// Inserts compaction group configs if they do not exist. pub fn try_create_compaction_groups( &mut self, diff --git a/src/meta/src/hummock/manager/compaction/mod.rs b/src/meta/src/hummock/manager/compaction/mod.rs index 80c6e5ca81c99..cf2c448f10021 100644 --- a/src/meta/src/hummock/manager/compaction/mod.rs +++ b/src/meta/src/hummock/manager/compaction/mod.rs @@ -143,7 +143,7 @@ fn init_selectors() -> HashMap HummockVersionTransaction<'a> { +impl HummockVersionTransaction<'_> { fn apply_compact_task(&mut self, compact_task: &CompactTask) { let mut version_delta = self.new_delta(); let trivial_move = CompactStatus::is_trivial_move_task(compact_task); @@ -1097,7 +1097,6 @@ impl HummockManager { /// /// Return Ok(false) indicates either the task is not found, /// or the task is not owned by `context_id` when `context_id` is not None. - pub async fn report_compact_tasks(&self, report_tasks: Vec) -> Result> { let mut guard = self.compaction.write().await; let deterministic_mode = self.env.opts.compaction_deterministic_test; diff --git a/src/meta/src/hummock/manager/transaction.rs b/src/meta/src/hummock/manager/transaction.rs index 57a228f35805f..054ae657d594d 100644 --- a/src/meta/src/hummock/manager/transaction.rs +++ b/src/meta/src/hummock/manager/transaction.rs @@ -199,7 +199,7 @@ impl<'a> HummockVersionTransaction<'a> { } } -impl<'a> InMemValTransaction for HummockVersionTransaction<'a> { +impl InMemValTransaction for HummockVersionTransaction<'_> { fn commit(self) { if let Some((version, deltas)) = self.pre_applied_version { *self.orig_version = version; @@ -258,7 +258,7 @@ pub(super) struct SingleDeltaTransaction<'a, 'b> { delta: Option, } -impl<'a, 'b> SingleDeltaTransaction<'a, 'b> { +impl SingleDeltaTransaction<'_, '_> { pub(super) fn latest_version(&self) -> &HummockVersion { self.version_txn.latest_version() } @@ -278,7 +278,7 @@ impl<'a, 'b> SingleDeltaTransaction<'a, 'b> { } } -impl<'a, 'b> Deref for SingleDeltaTransaction<'a, 'b> { +impl Deref for SingleDeltaTransaction<'_, '_> { type Target = HummockVersionDelta; fn deref(&self) -> &Self::Target { @@ -286,13 +286,13 @@ impl<'a, 'b> Deref for SingleDeltaTransaction<'a, 'b> { } } -impl<'a, 'b> DerefMut for SingleDeltaTransaction<'a, 'b> { +impl DerefMut for SingleDeltaTransaction<'_, '_> { fn deref_mut(&mut self) -> &mut Self::Target { self.delta.as_mut().expect("should exist") } } -impl<'a, 'b> Drop for SingleDeltaTransaction<'a, 'b> { +impl Drop for SingleDeltaTransaction<'_, '_> { fn drop(&mut self) { if let Some(delta) = self.delta.take() { self.version_txn.pre_apply(delta); @@ -317,7 +317,7 @@ impl<'a> HummockVersionStatsTransaction<'a> { } } -impl<'a> InMemValTransaction for HummockVersionStatsTransaction<'a> { +impl InMemValTransaction for HummockVersionStatsTransaction<'_> { fn commit(self) { if self.stats.has_new_value() { let stats = self.stats.clone(); @@ -337,7 +337,7 @@ where } } -impl<'a> Deref for HummockVersionStatsTransaction<'a> { +impl Deref for HummockVersionStatsTransaction<'_> { type Target = HummockVersionStats; fn deref(&self) -> &Self::Target { @@ -345,7 +345,7 @@ impl<'a> Deref for HummockVersionStatsTransaction<'a> { } } -impl<'a> DerefMut for HummockVersionStatsTransaction<'a> { +impl DerefMut for HummockVersionStatsTransaction<'_> { fn deref_mut(&mut self) -> &mut Self::Target { self.stats.deref_mut() } diff --git a/src/meta/src/hummock/test_utils.rs b/src/meta/src/hummock/test_utils.rs index 006b31475461d..b6631a672e385 100644 --- a/src/meta/src/hummock/test_utils.rs +++ b/src/meta/src/hummock/test_utils.rs @@ -336,6 +336,7 @@ pub async fn setup_compute_env_with_metric( compactor_streams_change_tx, ) .await; + let fake_host_address = HostAddress { host: "127.0.0.1".to_string(), port, diff --git a/src/meta/src/lib.rs b/src/meta/src/lib.rs index 8d65cf00f1a4d..7b80eddb347d5 100644 --- a/src/meta/src/lib.rs +++ b/src/meta/src/lib.rs @@ -26,12 +26,10 @@ #![cfg_attr(coverage, feature(coverage_attribute))] #![feature(custom_test_frameworks)] #![test_runner(risingwave_test_runner::test_runner::run_failpont_tests)] -#![feature(is_sorted)] #![feature(impl_trait_in_assoc_type)] #![feature(const_option)] #![feature(anonymous_lifetime_in_impl_trait)] #![feature(duration_millis_float)] -#![feature(option_get_or_insert_default)] pub mod backup_restore; pub mod barrier; diff --git a/src/meta/src/manager/diagnose.rs b/src/meta/src/manager/diagnose.rs index de8f983056a6a..f2d2cd58dd494 100644 --- a/src/meta/src/manager/diagnose.rs +++ b/src/meta/src/manager/diagnose.rs @@ -28,7 +28,7 @@ use risingwave_pb::meta::event_log::Event; use risingwave_pb::meta::EventLog; use risingwave_pb::monitor_service::StackTraceResponse; use risingwave_rpc_client::ComputeClientPool; -use risingwave_sqlparser::ast::{CompatibleSourceSchema, Statement, Value}; +use risingwave_sqlparser::ast::{CompatibleFormatEncode, Statement, Value}; use risingwave_sqlparser::parser::Parser; use serde_json::json; use thiserror_ext::AsReport; @@ -731,28 +731,28 @@ fn redact_all_sql_options(sql: &str) -> Option { let options = match statement { Statement::CreateTable { with_options, - source_schema, + format_encode, .. } => { - let connector_schema = match source_schema { - Some(CompatibleSourceSchema::V2(cs)) => Some(&mut cs.row_options), + let format_encode = match format_encode { + Some(CompatibleFormatEncode::V2(cs)) => Some(&mut cs.row_options), _ => None, }; - (Some(with_options), connector_schema) + (Some(with_options), format_encode) } Statement::CreateSource { stmt } => { - let connector_schema = match &mut stmt.source_schema { - CompatibleSourceSchema::V2(cs) => Some(&mut cs.row_options), + let format_encode = match &mut stmt.format_encode { + CompatibleFormatEncode::V2(cs) => Some(&mut cs.row_options), _ => None, }; - (Some(&mut stmt.with_properties.0), connector_schema) + (Some(&mut stmt.with_properties.0), format_encode) } Statement::CreateSink { stmt } => { - let connector_schema = match &mut stmt.sink_schema { + let format_encode = match &mut stmt.sink_schema { Some(cs) => Some(&mut cs.row_options), _ => None, }; - (Some(&mut stmt.with_properties.0), connector_schema) + (Some(&mut stmt.with_properties.0), format_encode) } _ => (None, None), }; diff --git a/src/meta/src/manager/metadata.rs b/src/meta/src/manager/metadata.rs index ce90747d859f1..1f6be9d15ec81 100644 --- a/src/meta/src/manager/metadata.rs +++ b/src/meta/src/manager/metadata.rs @@ -629,7 +629,7 @@ impl MetadataManager { pub async fn count_streaming_job(&self) -> MetaResult { self.catalog_controller - .list_streaming_job_states() + .list_streaming_job_infos() .await .map(|x| x.len()) } diff --git a/src/meta/src/model/mod.rs b/src/meta/src/model/mod.rs index 829ebb5890258..10d7f08448ade 100644 --- a/src/meta/src/model/mod.rs +++ b/src/meta/src/model/mod.rs @@ -113,7 +113,7 @@ impl<'a, T> VarTransaction<'a, T> { } } -impl<'a, T> Deref for VarTransaction<'a, T> { +impl Deref for VarTransaction<'_, T> { type Target = T; fn deref(&self) -> &Self::Target { @@ -124,7 +124,7 @@ impl<'a, T> Deref for VarTransaction<'a, T> { } } -impl<'a, T: Clone> DerefMut for VarTransaction<'a, T> { +impl DerefMut for VarTransaction<'_, T> { fn deref_mut(&mut self) -> &mut Self::Target { if self.new_value.is_none() { self.new_value.replace(self.orig_value_ref.clone()); @@ -133,7 +133,7 @@ impl<'a, T: Clone> DerefMut for VarTransaction<'a, T> { } } -impl<'a, T> InMemValTransaction for VarTransaction<'a, T> +impl InMemValTransaction for VarTransaction<'_, T> where T: PartialEq, { @@ -202,7 +202,7 @@ impl<'a, K: Ord, V: Clone> BTreeMapTransactionValueGuard<'a, K, V> { } } -impl<'a, K: Ord, V: Clone> Deref for BTreeMapTransactionValueGuard<'a, K, V> { +impl Deref for BTreeMapTransactionValueGuard<'_, K, V> { type Target = V; fn deref(&self) -> &Self::Target { @@ -216,7 +216,7 @@ impl<'a, K: Ord, V: Clone> Deref for BTreeMapTransactionValueGuard<'a, K, V> { } } -impl<'a, K: Ord, V: Clone> DerefMut for BTreeMapTransactionValueGuard<'a, K, V> { +impl DerefMut for BTreeMapTransactionValueGuard<'_, K, V> { fn deref_mut(&mut self) -> &mut Self::Target { let is_occupied = matches!( self.staging_entry.as_ref().unwrap(), @@ -478,7 +478,7 @@ impl<'a, K: Ord + Debug, V: Clone> BTreeMapEntryTransaction<'a, K, V> { } } -impl<'a, K, V> Deref for BTreeMapEntryTransaction<'a, K, V> { +impl Deref for BTreeMapEntryTransaction<'_, K, V> { type Target = V; fn deref(&self) -> &Self::Target { @@ -486,13 +486,13 @@ impl<'a, K, V> Deref for BTreeMapEntryTransaction<'a, K, V> { } } -impl<'a, K, V> DerefMut for BTreeMapEntryTransaction<'a, K, V> { +impl DerefMut for BTreeMapEntryTransaction<'_, K, V> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.new_value } } -impl<'a, K: Ord, V: PartialEq> InMemValTransaction for BTreeMapEntryTransaction<'a, K, V> { +impl InMemValTransaction for BTreeMapEntryTransaction<'_, K, V> { fn commit(self) { self.tree_ref.insert(self.key, self.new_value); } diff --git a/src/meta/src/rpc/cloud_provider.rs b/src/meta/src/rpc/cloud_provider.rs deleted file mode 100644 index fce20d5eea096..0000000000000 --- a/src/meta/src/rpc/cloud_provider.rs +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2024 RisingWave Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::collections::HashMap; - -use anyhow::anyhow; -use aws_config::retry::RetryConfig; -use aws_sdk_ec2::error::ProvideErrorMetadata; -use aws_sdk_ec2::types::{Filter, ResourceType, State, Tag, TagSpecification, VpcEndpointType}; -use itertools::Itertools; -use risingwave_pb::catalog::connection::private_link_service::PrivateLinkProvider; -use risingwave_pb::catalog::connection::PrivateLinkService; - -use crate::{MetaError, MetaResult}; - -#[derive(Clone)] -pub struct AwsEc2Client { - client: aws_sdk_ec2::Client, - /// `vpc_id`: The VPC of the running RisingWave instance - vpc_id: String, - security_group_id: String, -} - -impl AwsEc2Client { - pub async fn new(vpc_id: &str, security_group_id: &str) -> Self { - let sdk_config = aws_config::from_env() - .retry_config(RetryConfig::standard().with_max_attempts(4)) - .load() - .await; - let client = aws_sdk_ec2::Client::new(&sdk_config); - - Self { - client, - vpc_id: vpc_id.to_string(), - security_group_id: security_group_id.to_string(), - } - } - - pub async fn delete_vpc_endpoint(&self, vpc_endpoint_id: &str) -> MetaResult<()> { - let output = self - .client - .delete_vpc_endpoints() - .vpc_endpoint_ids(vpc_endpoint_id) - .send() - .await - .map_err(|e| { - anyhow!( - "Failed to delete VPC endpoint. endpoint_id {vpc_endpoint_id}, error: {:?}, aws_request_id: {:?}", - e.message(), - e.meta().extra("aws_request_id") - ) - })?; - - if !output.unsuccessful().is_empty() { - return Err(MetaError::from(anyhow!( - "Failed to delete VPC endpoint {}, error: {:?}", - vpc_endpoint_id, - output.unsuccessful() - ))); - } - Ok(()) - } - - /// `service_name`: The name of the endpoint service we want to access - /// `tags_user_str`: The tags specified in with clause of `create connection` - /// `tags_env`: The default tags specified in env var `RW_PRIVATELINK_ENDPOINT_DEFAULT_TAGS` - pub async fn create_aws_private_link( - &self, - service_name: &str, - tags_user_str: Option<&str>, - tags_env: Option>, - ) -> MetaResult { - // fetch the AZs of the endpoint service - let service_azs = self.get_endpoint_service_az_names(service_name).await?; - let subnet_and_azs = self.describe_subnets(&self.vpc_id, &service_azs).await?; - - let subnet_ids: Vec = subnet_and_azs.iter().map(|(id, _, _)| id.clone()).collect(); - let az_to_azid_map: HashMap = subnet_and_azs - .into_iter() - .map(|(_, az, az_id)| (az, az_id)) - .collect(); - - let tags_vec = match tags_user_str { - Some(tags_user_str) => { - let mut tags_user = tags_user_str - .split(',') - .map(|s| { - s.split_once('=').ok_or_else(|| { - MetaError::invalid_parameter("Failed to parse `tags` parameter") - }) - }) - .collect::>>()?; - match tags_env { - Some(tags_env) => { - tags_user.extend(tags_env); - Some(tags_user) - } - None => Some(tags_user), - } - } - None => tags_env, - }; - - let (endpoint_id, endpoint_dns_names) = self - .create_vpc_endpoint( - &self.vpc_id, - service_name, - &self.security_group_id, - &subnet_ids, - tags_vec, - ) - .await?; - - // The number of returned DNS names may not equal to the input AZs, - // because some AZs may not have a subnet in the RW VPC - let mut azid_to_dns_map = HashMap::new(); - if endpoint_dns_names.first().is_none() { - return Err(MetaError::from(anyhow!( - "No DNS name returned for the endpoint" - ))); - } - - // The first dns name doesn't has AZ info - let endpoint_dns_name = endpoint_dns_names.first().unwrap().clone(); - for dns_name in &endpoint_dns_names { - for az in az_to_azid_map.keys() { - if dns_name.contains(az) { - azid_to_dns_map - .insert(az_to_azid_map.get(az).unwrap().clone(), dns_name.clone()); - break; - } - } - } - - Ok(PrivateLinkService { - provider: PrivateLinkProvider::Aws.into(), - service_name: service_name.to_string(), - endpoint_id, - dns_entries: azid_to_dns_map, - endpoint_dns_name, - }) - } - - pub async fn is_vpc_endpoint_ready(&self, vpc_endpoint_id: &str) -> MetaResult { - let mut is_ready = false; - let filter = Filter::builder() - .name("vpc-endpoint-id") - .values(vpc_endpoint_id) - .build(); - let output = self - .client - .describe_vpc_endpoints() - .set_filters(Some(vec![filter])) - .send() - .await - .map_err(|e| { - anyhow!( - "Failed to check availability of VPC endpoint. endpoint_id: {vpc_endpoint_id}, error: {:?}, aws_request_id: {:?}", - e.message(), - e.meta().extra("aws_request_id") - ) - })?; - - match output.vpc_endpoints { - Some(endpoints) => { - let endpoint = endpoints - .into_iter() - .exactly_one() - .map_err(|_| anyhow!("More than one VPC endpoint found with the same ID"))?; - if let Some(state) = endpoint.state { - match state { - State::Available => { - is_ready = true; - } - // forward-compatible with protocol change - other => { - is_ready = other.as_str().eq_ignore_ascii_case("available"); - } - } - } - } - None => { - return Err(MetaError::from(anyhow!( - "No VPC endpoint found with the ID {}", - vpc_endpoint_id - ))); - } - } - Ok(is_ready) - } - - async fn get_endpoint_service_az_names(&self, service_name: &str) -> MetaResult> { - let mut service_azs = Vec::new(); - let output = self - .client - .describe_vpc_endpoint_services() - .set_service_names(Some(vec![service_name.to_string()])) - .send() - .await - .map_err(|e| { - anyhow!( - "Failed to describe VPC endpoint service, error: {:?}, aws_request_id: {:?}", - e.message(), - e.meta().extra("aws_request_id") - ) - })?; - - match output.service_details { - Some(details) => { - let detail = details.into_iter().exactly_one().map_err(|_| { - anyhow!("More than one VPC endpoint service found with the same name") - })?; - if let Some(azs) = detail.availability_zones { - service_azs.extend(azs.into_iter()); - } - } - None => { - return Err(MetaError::from(anyhow!( - "No VPC endpoint service found with the name {}", - service_name - ))); - } - } - Ok(service_azs) - } - - async fn describe_subnets( - &self, - vpc_id: &str, - az_names: &[String], - ) -> MetaResult> { - let vpc_filter = Filter::builder().name("vpc-id").values(vpc_id).build(); - let az_filter = Filter::builder() - .name("availability-zone") - .set_values(Some(Vec::from(az_names))) - .build(); - let output = self - .client - .describe_subnets() - .set_filters(Some(vec![vpc_filter, az_filter])) - .send() - .await - .map_err(|e| { - anyhow!("Failed to describe subnets for vpc_id {vpc_id}. error: {:?}, aws_request_id: {:?}", - e.message(), - e.meta().extra("aws_request_id")) - })?; - - let subnets = output - .subnets - .unwrap_or_default() - .into_iter() - .unique_by(|s| s.availability_zone().unwrap_or_default().to_string()) - .map(|s| { - ( - s.subnet_id.unwrap_or_default(), - s.availability_zone.unwrap_or_default(), - s.availability_zone_id.unwrap_or_default(), - ) - }) - .collect(); - Ok(subnets) - } - - async fn create_vpc_endpoint( - &self, - vpc_id: &str, - service_name: &str, - security_group_id: &str, - subnet_ids: &[String], - tags_vec: Option>, - ) -> MetaResult<(String, Vec)> { - let tag_spec = match tags_vec { - Some(tags_vec) => { - let tags = tags_vec - .into_iter() - .map(|(tag_key, tag_val)| { - Tag::builder() - .set_key(Some(tag_key.to_string())) - .set_value(Some(tag_val.to_string())) - .build() - }) - .collect(); - Some(vec![TagSpecification::builder() - .set_resource_type(Some(ResourceType::VpcEndpoint)) - .set_tags(Some(tags)) - .build()]) - } - None => None, - }; - - let output = self - .client - .create_vpc_endpoint() - .vpc_endpoint_type(VpcEndpointType::Interface) - .vpc_id(vpc_id) - .security_group_ids(security_group_id) - .service_name(service_name) - .set_subnet_ids(Some(subnet_ids.to_owned())) - .set_tag_specifications(tag_spec) - .send() - .await - .map_err(|e| { - anyhow!( - "Failed to create vpc endpoint: vpc_id {vpc_id}, \ - service_name {service_name}. error: {:?}, aws_request_id: {:?}", - e.message(), - e.meta().extra("aws_request_id") - ) - })?; - - let endpoint = output.vpc_endpoint().unwrap(); - let mut dns_names = Vec::new(); - - endpoint.dns_entries().iter().for_each(|e| { - if let Some(dns_name) = e.dns_name() { - dns_names.push(dns_name.to_string()); - } - }); - - Ok(( - endpoint.vpc_endpoint_id().unwrap_or_default().to_string(), - dns_names, - )) - } -} diff --git a/src/meta/src/rpc/ddl_controller.rs b/src/meta/src/rpc/ddl_controller.rs index 01b6577dab43e..73da6807921a2 100644 --- a/src/meta/src/rpc/ddl_controller.rs +++ b/src/meta/src/rpc/ddl_controller.rs @@ -20,7 +20,6 @@ use std::time::Duration; use anyhow::{anyhow, Context}; use itertools::Itertools; -use rand::Rng; use risingwave_common::bitmap::Bitmap; use risingwave_common::config::DefaultParallelism; use risingwave_common::hash::{ActorMapping, VnodeCountCompat}; @@ -32,10 +31,8 @@ use risingwave_common::util::stream_graph_visitor::{ }; use risingwave_common::{bail, hash, must_match}; use risingwave_connector::error::ConnectorError; -use risingwave_connector::source::cdc::CdcSourceType; use risingwave_connector::source::{ ConnectorProperties, SourceEnumeratorContext, SourceProperties, SplitEnumerator, - UPSTREAM_SOURCE_KEY, }; use risingwave_connector::{dispatch_source_prop, WithOptionsSecResolved}; use risingwave_meta_model::object::ObjectType; @@ -43,13 +40,11 @@ use risingwave_meta_model::{ ConnectionId, DatabaseId, FunctionId, IndexId, ObjectId, SchemaId, SecretId, SinkId, SourceId, SubscriptionId, TableId, UserId, ViewId, }; -use risingwave_pb::catalog::connection::private_link_service::PbPrivateLinkProvider; -use risingwave_pb::catalog::connection::PrivateLinkService; use risingwave_pb::catalog::source::OptionalAssociatedTableId; use risingwave_pb::catalog::table::OptionalAssociatedSourceId; use risingwave_pb::catalog::{ - connection, Comment, Connection, CreateType, Database, Function, PbSink, PbSource, PbTable, - Schema, Secret, Sink, Source, Subscription, Table, View, + Comment, Connection, CreateType, Database, Function, PbSink, PbSource, PbTable, Schema, Secret, + Sink, Source, Subscription, Table, View, }; use risingwave_pb::ddl_service::alter_owner_request::Object; use risingwave_pb::ddl_service::{ @@ -67,7 +62,6 @@ use risingwave_pb::stream_plan::{ use thiserror_ext::AsReport; use tokio::sync::Semaphore; use tokio::time::sleep; -use tracing::log::warn; use tracing::Instrument; use crate::barrier::BarrierManagerRef; @@ -79,7 +73,6 @@ use crate::manager::{ IGNORED_NOTIFICATION_VERSION, }; use crate::model::{FragmentId, StreamContext, TableFragments, TableParallelism}; -use crate::rpc::cloud_provider::AwsEc2Client; use crate::stream::{ create_source_worker_handle, validate_sink, ActorGraphBuildResult, ActorGraphBuilder, CompleteStreamFragmentGraph, CreateStreamingJobContext, CreateStreamingJobOption, @@ -201,7 +194,6 @@ pub struct DdlController { pub(crate) source_manager: SourceManagerRef, barrier_manager: BarrierManagerRef, - aws_client: Arc>, // The semaphore is used to limit the number of concurrent streaming job creation. pub(crate) creating_streaming_job_permits: Arc, } @@ -271,7 +263,6 @@ impl DdlController { stream_manager: GlobalStreamManagerRef, source_manager: SourceManagerRef, barrier_manager: BarrierManagerRef, - aws_client: Arc>, ) -> Self { let creating_streaming_job_permits = Arc::new(CreatingStreamingJobPermit::new(&env).await); Self { @@ -280,7 +271,6 @@ impl DdlController { stream_manager, source_manager, barrier_manager, - aws_client, creating_streaming_job_permits, } } @@ -559,21 +549,6 @@ impl DdlController { .await } - pub(crate) async fn delete_vpc_endpoint(&self, svc: &PrivateLinkService) -> MetaResult<()> { - // delete AWS vpc endpoint - if svc.get_provider()? == PbPrivateLinkProvider::Aws { - if let Some(aws_cli) = self.aws_client.as_ref() { - aws_cli.delete_vpc_endpoint(&svc.endpoint_id).await?; - } else { - warn!( - "AWS client is not initialized, skip deleting vpc endpoint {}", - svc.endpoint_id - ); - } - } - Ok(()) - } - async fn create_subscription( &self, mut subscription: Subscription, @@ -1183,14 +1158,11 @@ impl DdlController { .await; } ObjectType::Connection => { - let (version, conn) = self + let (version, _conn) = self .metadata_manager .catalog_controller .drop_connection(object_id) .await?; - if let Some(connection::Info::PrivateLinkService(svc)) = &conn.info { - self.delete_vpc_endpoint(svc).await?; - } return Ok(version); } _ => { @@ -1312,22 +1284,12 @@ impl DdlController { streaming_job_ids, state_table_ids, source_ids, - connections, source_fragments, removed_actors, removed_fragments, + .. } = release_ctx; - // delete vpc endpoints. - for conn in connections { - let _ = self - .delete_vpc_endpoint(&conn.to_protobuf()) - .await - .inspect_err(|err| { - tracing::warn!(err = ?err.as_report(), "failed to delete vpc endpoint"); - }); - } - // unregister sources. self.source_manager .unregister_sources(source_ids.into_iter().map(|id| id as _).collect()) @@ -2025,25 +1987,6 @@ pub fn fill_table_stream_graph_info( source_node.source_inner.as_mut().unwrap().source_id = source.id; source_count += 1; - // Generate a random server id for mysql cdc source if needed - // `server.id` (in the range from 1 to 2^32 - 1). This value MUST be unique across whole replication - // group (that is, different from any other server id being used by any master or slave) - if let Some(connector) = source.with_properties.get(UPSTREAM_SOURCE_KEY) - && matches!( - CdcSourceType::from(connector.as_str()), - CdcSourceType::Mysql - ) - { - let props = &mut source_node.source_inner.as_mut().unwrap().with_properties; - let rand_server_id = rand::thread_rng().gen_range(1..u32::MAX); - props - .entry("server.id".to_string()) - .or_insert(rand_server_id.to_string()); - - // make these two `Source` consistent - props.clone_into(&mut source.with_properties); - } - assert_eq!( source_count, 1, "require exactly 1 external stream source when creating table with a connector" diff --git a/src/meta/src/rpc/mod.rs b/src/meta/src/rpc/mod.rs index 8b256d1b2145e..9f840ded5aa47 100644 --- a/src/meta/src/rpc/mod.rs +++ b/src/meta/src/rpc/mod.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub mod cloud_provider; pub mod ddl_controller; pub mod election; pub mod intercept; diff --git a/src/sqlparser/src/ast/ddl.rs b/src/sqlparser/src/ast/ddl.rs index 89e8f24bf5922..15ac6a9d27623 100644 --- a/src/sqlparser/src/ast/ddl.rs +++ b/src/sqlparser/src/ast/ddl.rs @@ -20,7 +20,7 @@ use core::fmt; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use super::ConnectorSchema; +use super::FormatEncodeOptions; use crate::ast::{ display_comma_separated, display_separated, DataType, Expr, Ident, ObjectName, SetVariableValue, }; @@ -182,7 +182,7 @@ pub enum AlterSourceOperation { AddColumn { column_def: ColumnDef }, ChangeOwner { new_owner_name: Ident }, SetSchema { new_schema_name: ObjectName }, - FormatEncode { connector_schema: ConnectorSchema }, + FormatEncode { format_encode: FormatEncodeOptions }, RefreshSchema, SetSourceRateLimit { rate_limit: i32 }, } @@ -413,8 +413,8 @@ impl fmt::Display for AlterSourceOperation { AlterSourceOperation::SetSchema { new_schema_name } => { write!(f, "SET SCHEMA {}", new_schema_name) } - AlterSourceOperation::FormatEncode { connector_schema } => { - write!(f, "{connector_schema}") + AlterSourceOperation::FormatEncode { format_encode } => { + write!(f, "{format_encode}") } AlterSourceOperation::RefreshSchema => { write!(f, "REFRESH SCHEMA") @@ -731,7 +731,7 @@ impl fmt::Display for ColumnOption { fn display_constraint_name(name: &'_ Option) -> impl fmt::Display + '_ { struct ConstraintName<'a>(&'a Option); - impl<'a> fmt::Display for ConstraintName<'a> { + impl fmt::Display for ConstraintName<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if let Some(name) = self.0 { write!(f, "CONSTRAINT {} ", name)?; diff --git a/src/sqlparser/src/ast/legacy_source.rs b/src/sqlparser/src/ast/legacy_source.rs index 5fb2e233a67df..7a5abf35a2df8 100644 --- a/src/sqlparser/src/ast/legacy_source.rs +++ b/src/sqlparser/src/ast/legacy_source.rs @@ -23,7 +23,7 @@ use serde::{Deserialize, Serialize}; use winnow::PResult; use crate::ast::{ - display_separated, AstString, ConnectorSchema, Encode, Format, Ident, ObjectName, ParseTo, + display_separated, AstString, Encode, Format, FormatEncodeOptions, Ident, ObjectName, ParseTo, SqlOption, Value, }; use crate::keywords::Keyword; @@ -32,45 +32,45 @@ use crate::{impl_fmt_display, impl_parse_to, parser_err}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum CompatibleSourceSchema { - RowFormat(SourceSchema), - V2(ConnectorSchema), +pub enum CompatibleFormatEncode { + RowFormat(LegacyRowFormat), + V2(FormatEncodeOptions), } -impl fmt::Display for CompatibleSourceSchema { +impl fmt::Display for CompatibleFormatEncode { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - CompatibleSourceSchema::RowFormat(inner) => { + CompatibleFormatEncode::RowFormat(inner) => { write!(f, "{}", inner) } - CompatibleSourceSchema::V2(inner) => { + CompatibleFormatEncode::V2(inner) => { write!(f, "{}", inner) } } } } -impl CompatibleSourceSchema { - pub(crate) fn into_v2(self) -> ConnectorSchema { +impl CompatibleFormatEncode { + pub(crate) fn into_v2(self) -> FormatEncodeOptions { match self { - CompatibleSourceSchema::RowFormat(inner) => inner.into_source_schema_v2(), - CompatibleSourceSchema::V2(inner) => inner, + CompatibleFormatEncode::RowFormat(inner) => inner.into_format_encode_v2(), + CompatibleFormatEncode::V2(inner) => inner, } } } -impl From for CompatibleSourceSchema { - fn from(value: ConnectorSchema) -> Self { +impl From for CompatibleFormatEncode { + fn from(value: FormatEncodeOptions) -> Self { Self::V2(value) } } -pub fn parse_source_schema(p: &mut Parser<'_>) -> PResult { +pub fn parse_format_encode(p: &mut Parser<'_>) -> PResult { if let Some(schema_v2) = p.parse_schema()? { if schema_v2.key_encode.is_some() { parser_err!("key encode clause is not supported in source schema"); } - Ok(CompatibleSourceSchema::V2(schema_v2)) + Ok(CompatibleFormatEncode::V2(schema_v2)) } else if p.peek_nth_any_of_keywords(0, &[Keyword::ROW]) && p.peek_nth_any_of_keywords(1, &[Keyword::FORMAT]) { @@ -79,34 +79,34 @@ pub fn parse_source_schema(p: &mut Parser<'_>) -> PResult SourceSchema::Json, - "UPSERT_JSON" => SourceSchema::UpsertJson, + "JSON" => LegacyRowFormat::Json, + "UPSERT_JSON" => LegacyRowFormat::UpsertJson, "PROTOBUF" => { impl_parse_to!(protobuf_schema: ProtobufSchema, p); - SourceSchema::Protobuf(protobuf_schema) + LegacyRowFormat::Protobuf(protobuf_schema) } - "DEBEZIUM_JSON" => SourceSchema::DebeziumJson, - "DEBEZIUM_MONGO_JSON" => SourceSchema::DebeziumMongoJson, + "DEBEZIUM_JSON" => LegacyRowFormat::DebeziumJson, + "DEBEZIUM_MONGO_JSON" => LegacyRowFormat::DebeziumMongoJson, "AVRO" => { impl_parse_to!(avro_schema: AvroSchema, p); - SourceSchema::Avro(avro_schema) + LegacyRowFormat::Avro(avro_schema) } "UPSERT_AVRO" => { impl_parse_to!(avro_schema: AvroSchema, p); - SourceSchema::UpsertAvro(avro_schema) + LegacyRowFormat::UpsertAvro(avro_schema) } - "MAXWELL" => SourceSchema::Maxwell, - "CANAL_JSON" => SourceSchema::CanalJson, + "MAXWELL" => LegacyRowFormat::Maxwell, + "CANAL_JSON" => LegacyRowFormat::CanalJson, "CSV" => { impl_parse_to!(csv_info: CsvInfo, p); - SourceSchema::Csv(csv_info) + LegacyRowFormat::Csv(csv_info) } - "NATIVE" => SourceSchema::Native, // used internally by schema change + "NATIVE" => LegacyRowFormat::Native, // used internally by schema change "DEBEZIUM_AVRO" => { impl_parse_to!(avro_schema: DebeziumAvroSchema, p); - SourceSchema::DebeziumAvro(avro_schema) + LegacyRowFormat::DebeziumAvro(avro_schema) } - "BYTES" => SourceSchema::Bytes, + "BYTES" => LegacyRowFormat::Bytes, _ => { parser_err!( "expected JSON | UPSERT_JSON | PROTOBUF | DEBEZIUM_JSON | DEBEZIUM_AVRO \ @@ -114,7 +114,7 @@ pub fn parse_source_schema(p: &mut Parser<'_>) -> PResult) -> PResult ConnectorSchema { +impl LegacyRowFormat { + pub fn into_format_encode_v2(self) -> FormatEncodeOptions { let (format, row_encode) = match self { - SourceSchema::Protobuf(_) => (Format::Plain, Encode::Protobuf), - SourceSchema::Json => (Format::Plain, Encode::Json), - SourceSchema::DebeziumJson => (Format::Debezium, Encode::Json), - SourceSchema::DebeziumMongoJson => (Format::DebeziumMongo, Encode::Json), - SourceSchema::UpsertJson => (Format::Upsert, Encode::Json), - SourceSchema::Avro(_) => (Format::Plain, Encode::Avro), - SourceSchema::UpsertAvro(_) => (Format::Upsert, Encode::Avro), - SourceSchema::Maxwell => (Format::Maxwell, Encode::Json), - SourceSchema::CanalJson => (Format::Canal, Encode::Json), - SourceSchema::Csv(_) => (Format::Plain, Encode::Csv), - SourceSchema::DebeziumAvro(_) => (Format::Debezium, Encode::Avro), - SourceSchema::Bytes => (Format::Plain, Encode::Bytes), - SourceSchema::Native => (Format::Native, Encode::Native), + LegacyRowFormat::Protobuf(_) => (Format::Plain, Encode::Protobuf), + LegacyRowFormat::Json => (Format::Plain, Encode::Json), + LegacyRowFormat::DebeziumJson => (Format::Debezium, Encode::Json), + LegacyRowFormat::DebeziumMongoJson => (Format::DebeziumMongo, Encode::Json), + LegacyRowFormat::UpsertJson => (Format::Upsert, Encode::Json), + LegacyRowFormat::Avro(_) => (Format::Plain, Encode::Avro), + LegacyRowFormat::UpsertAvro(_) => (Format::Upsert, Encode::Avro), + LegacyRowFormat::Maxwell => (Format::Maxwell, Encode::Json), + LegacyRowFormat::CanalJson => (Format::Canal, Encode::Json), + LegacyRowFormat::Csv(_) => (Format::Plain, Encode::Csv), + LegacyRowFormat::DebeziumAvro(_) => (Format::Debezium, Encode::Avro), + LegacyRowFormat::Bytes => (Format::Plain, Encode::Bytes), + LegacyRowFormat::Native => (Format::Native, Encode::Native), }; let row_options = match self { - SourceSchema::Protobuf(schema) => { + LegacyRowFormat::Protobuf(schema) => { let mut options = vec![SqlOption { name: ObjectName(vec![Ident { value: "message".into(), @@ -184,7 +184,7 @@ impl SourceSchema { } options } - SourceSchema::Avro(schema) | SourceSchema::UpsertAvro(schema) => { + LegacyRowFormat::Avro(schema) | LegacyRowFormat::UpsertAvro(schema) => { if schema.use_schema_registry { vec![SqlOption { name: ObjectName(vec![Ident { @@ -203,7 +203,7 @@ impl SourceSchema { }] } } - SourceSchema::DebeziumAvro(schema) => { + LegacyRowFormat::DebeziumAvro(schema) => { vec![SqlOption { name: ObjectName(vec![Ident { value: "schema.registry".into(), @@ -212,7 +212,7 @@ impl SourceSchema { value: Value::SingleQuotedString(schema.row_schema_location.0), }] } - SourceSchema::Csv(schema) => { + LegacyRowFormat::Csv(schema) => { vec![ SqlOption { name: ObjectName(vec![Ident { @@ -239,7 +239,7 @@ impl SourceSchema { _ => vec![], }; - ConnectorSchema { + FormatEncodeOptions { format, row_encode, row_options, @@ -248,23 +248,27 @@ impl SourceSchema { } } -impl fmt::Display for SourceSchema { +impl fmt::Display for LegacyRowFormat { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "ROW FORMAT ")?; match self { - SourceSchema::Protobuf(protobuf_schema) => write!(f, "PROTOBUF {}", protobuf_schema), - SourceSchema::Json => write!(f, "JSON"), - SourceSchema::UpsertJson => write!(f, "UPSERT_JSON"), - SourceSchema::Maxwell => write!(f, "MAXWELL"), - SourceSchema::DebeziumJson => write!(f, "DEBEZIUM_JSON"), - SourceSchema::DebeziumMongoJson => write!(f, "DEBEZIUM_MONGO_JSON"), - SourceSchema::Avro(avro_schema) => write!(f, "AVRO {}", avro_schema), - SourceSchema::UpsertAvro(avro_schema) => write!(f, "UPSERT_AVRO {}", avro_schema), - SourceSchema::CanalJson => write!(f, "CANAL_JSON"), - SourceSchema::Csv(csv_info) => write!(f, "CSV {}", csv_info), - SourceSchema::Native => write!(f, "NATIVE"), - SourceSchema::DebeziumAvro(avro_schema) => write!(f, "DEBEZIUM_AVRO {}", avro_schema), - SourceSchema::Bytes => write!(f, "BYTES"), + LegacyRowFormat::Protobuf(protobuf_schema) => { + write!(f, "PROTOBUF {}", protobuf_schema) + } + LegacyRowFormat::Json => write!(f, "JSON"), + LegacyRowFormat::UpsertJson => write!(f, "UPSERT_JSON"), + LegacyRowFormat::Maxwell => write!(f, "MAXWELL"), + LegacyRowFormat::DebeziumJson => write!(f, "DEBEZIUM_JSON"), + LegacyRowFormat::DebeziumMongoJson => write!(f, "DEBEZIUM_MONGO_JSON"), + LegacyRowFormat::Avro(avro_schema) => write!(f, "AVRO {}", avro_schema), + LegacyRowFormat::UpsertAvro(avro_schema) => write!(f, "UPSERT_AVRO {}", avro_schema), + LegacyRowFormat::CanalJson => write!(f, "CANAL_JSON"), + LegacyRowFormat::Csv(csv_info) => write!(f, "CSV {}", csv_info), + LegacyRowFormat::Native => write!(f, "NATIVE"), + LegacyRowFormat::DebeziumAvro(avro_schema) => { + write!(f, "DEBEZIUM_AVRO {}", avro_schema) + } + LegacyRowFormat::Bytes => write!(f, "BYTES"), } } } diff --git a/src/sqlparser/src/ast/mod.rs b/src/sqlparser/src/ast/mod.rs index 5dcea9c339d87..563dc66be4780 100644 --- a/src/sqlparser/src/ast/mod.rs +++ b/src/sqlparser/src/ast/mod.rs @@ -42,7 +42,7 @@ pub use self::ddl::{ ReferentialAction, SourceWatermark, TableConstraint, }; pub use self::legacy_source::{ - get_delimiter, AvroSchema, CompatibleSourceSchema, DebeziumAvroSchema, ProtobufSchema, + get_delimiter, AvroSchema, CompatibleFormatEncode, DebeziumAvroSchema, ProtobufSchema, }; pub use self::operator::{BinaryOperator, QualifiedOperator, UnaryOperator}; pub use self::query::{ @@ -76,7 +76,7 @@ where sep: &'static str, } -impl<'a, T> fmt::Display for DisplaySeparated<'a, T> +impl fmt::Display for DisplaySeparated<'_, T> where T: fmt::Display, { @@ -1286,8 +1286,8 @@ pub enum Statement { wildcard_idx: Option, constraints: Vec, with_options: Vec, - /// Optional schema of the external source with which the table is created - source_schema: Option, + /// `FORMAT ... ENCODE ...` for table with connector + format_encode: Option, /// The watermark defined on source. source_watermarks: Vec, /// Append only table. @@ -1827,7 +1827,7 @@ impl fmt::Display for Statement { or_replace, if_not_exists, temporary, - source_schema, + format_encode, source_watermarks, append_only, on_conflict, @@ -1874,8 +1874,8 @@ impl fmt::Display for Statement { if !with_options.is_empty() { write!(f, " WITH ({})", display_comma_separated(with_options))?; } - if let Some(source_schema) = source_schema { - write!(f, " {}", source_schema)?; + if let Some(format_encode) = format_encode { + write!(f, " {}", format_encode)?; } if let Some(query) = query { write!(f, " AS {}", query)?; diff --git a/src/sqlparser/src/ast/query.rs b/src/sqlparser/src/ast/query.rs index 0dd7ab2f626f6..428fe4e4c5417 100644 --- a/src/sqlparser/src/ast/query.rs +++ b/src/sqlparser/src/ast/query.rs @@ -321,6 +321,7 @@ impl fmt::Display for With { } /// A single CTE (used after `WITH`): `alias [(col1, col2, ...)] AS ( query )` +/// /// The names in the column list before `AS`, when specified, replace the names /// of the columns returned by the query. The parser does not validate that the /// number of columns in the query matches the number of columns in the query. @@ -464,9 +465,8 @@ impl fmt::Display for TableFactor { match self { TableFactor::Table { name, alias, as_of } => { write!(f, "{}", name)?; - match as_of { - Some(as_of) => write!(f, "{}", as_of)?, - None => (), + if let Some(as_of) = as_of { + write!(f, "{}", as_of)? } if let Some(alias) = alias { write!(f, " AS {}", alias)?; @@ -541,7 +541,7 @@ impl fmt::Display for Join { } fn suffix(constraint: &'_ JoinConstraint) -> impl fmt::Display + '_ { struct Suffix<'a>(&'a JoinConstraint); - impl<'a> fmt::Display for Suffix<'a> { + impl fmt::Display for Suffix<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.0 { JoinConstraint::On(expr) => write!(f, " ON {}", expr), diff --git a/src/sqlparser/src/ast/statement.rs b/src/sqlparser/src/ast/statement.rs index a0f919091b56d..72680161defea 100644 --- a/src/sqlparser/src/ast/statement.rs +++ b/src/sqlparser/src/ast/statement.rs @@ -22,7 +22,7 @@ use serde::{Deserialize, Serialize}; use winnow::PResult; use super::ddl::SourceWatermark; -use super::legacy_source::{parse_source_schema, CompatibleSourceSchema}; +use super::legacy_source::{parse_format_encode, CompatibleFormatEncode}; use super::{EmitMode, Ident, ObjectType, Query, Value}; use crate::ast::{ display_comma_separated, display_separated, ColumnDef, ObjectName, SqlOption, TableConstraint, @@ -76,7 +76,7 @@ macro_rules! impl_fmt_display { // source_name: Ident, // with_properties: AstOption, // [Keyword::ROW, Keyword::FORMAT], -// source_schema: SourceSchema, +// format_encode: SourceSchema, // [Keyword::WATERMARK, Keyword::FOR] column [Keyword::AS] // }); #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -90,7 +90,7 @@ pub struct CreateSourceStatement { pub constraints: Vec, pub source_name: ObjectName, pub with_properties: WithProperties, - pub source_schema: CompatibleSourceSchema, + pub format_encode: CompatibleFormatEncode, pub source_watermarks: Vec, pub include_column_options: IncludeOption, } @@ -222,7 +222,7 @@ impl Encode { /// `FORMAT ... ENCODE ... [(a=b, ...)] [KEY ENCODE ...]` #[derive(Debug, Clone, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct ConnectorSchema { +pub struct FormatEncodeOptions { pub format: Format, pub row_encode: Encode, pub row_options: Vec, @@ -232,33 +232,33 @@ pub struct ConnectorSchema { impl Parser<'_> { /// Peek the next tokens to see if it is `FORMAT` or `ROW FORMAT` (for compatibility). - fn peek_source_schema_format(&mut self) -> bool { + fn peek_format_encode_format(&mut self) -> bool { (self.peek_nth_any_of_keywords(0, &[Keyword::ROW]) && self.peek_nth_any_of_keywords(1, &[Keyword::FORMAT])) // ROW FORMAT || self.peek_nth_any_of_keywords(0, &[Keyword::FORMAT]) // FORMAT } /// Parse the source schema. The behavior depends on the `connector` type. - pub fn parse_source_schema_with_connector( + pub fn parse_format_encode_with_connector( &mut self, connector: &str, cdc_source_job: bool, - ) -> PResult { + ) -> PResult { // row format for cdc source must be debezium json // row format for nexmark source must be native // default row format for datagen source is native // FIXME: parse input `connector` to enum type instead using string here if connector.contains("-cdc") { let expected = if cdc_source_job { - ConnectorSchema::plain_json() + FormatEncodeOptions::plain_json() } else if connector.contains("mongodb") { - ConnectorSchema::debezium_mongo_json() + FormatEncodeOptions::debezium_mongo_json() } else { - ConnectorSchema::debezium_json() + FormatEncodeOptions::debezium_json() }; - if self.peek_source_schema_format() { - let schema = parse_source_schema(self)?.into_v2(); + if self.peek_format_encode_format() { + let schema = parse_format_encode(self)?.into_v2(); if schema != expected { parser_err!( "Row format for CDC connectors should be \ @@ -268,9 +268,9 @@ impl Parser<'_> { } Ok(expected.into()) } else if connector.contains("nexmark") { - let expected = ConnectorSchema::native(); - if self.peek_source_schema_format() { - let schema = parse_source_schema(self)?.into_v2(); + let expected = FormatEncodeOptions::native(); + if self.peek_format_encode_format() { + let schema = parse_format_encode(self)?.into_v2(); if schema != expected { parser_err!( "Row format for nexmark connectors should be \ @@ -280,15 +280,15 @@ impl Parser<'_> { } Ok(expected.into()) } else if connector.contains("datagen") { - Ok(if self.peek_source_schema_format() { - parse_source_schema(self)? + Ok(if self.peek_format_encode_format() { + parse_format_encode(self)? } else { - ConnectorSchema::native().into() + FormatEncodeOptions::native().into() }) } else if connector.contains("iceberg") { - let expected = ConnectorSchema::none(); - if self.peek_source_schema_format() { - let schema = parse_source_schema(self)?.into_v2(); + let expected = FormatEncodeOptions::none(); + if self.peek_format_encode_format() { + let schema = parse_format_encode(self)?.into_v2(); if schema != expected { parser_err!( "Row format for iceberg connectors should be \ @@ -298,12 +298,12 @@ impl Parser<'_> { } Ok(expected.into()) } else { - Ok(parse_source_schema(self)?) + Ok(parse_format_encode(self)?) } } /// Parse `FORMAT ... ENCODE ... (...)`. - pub fn parse_schema(&mut self) -> PResult> { + pub fn parse_schema(&mut self) -> PResult> { if !self.parse_keyword(Keyword::FORMAT) { return Ok(None); } @@ -325,7 +325,7 @@ impl Parser<'_> { None }; - Ok(Some(ConnectorSchema { + Ok(Some(FormatEncodeOptions { format, row_encode, row_options, @@ -334,9 +334,9 @@ impl Parser<'_> { } } -impl ConnectorSchema { +impl FormatEncodeOptions { pub const fn plain_json() -> Self { - ConnectorSchema { + FormatEncodeOptions { format: Format::Plain, row_encode: Encode::Json, row_options: Vec::new(), @@ -346,7 +346,7 @@ impl ConnectorSchema { /// Create a new source schema with `Debezium` format and `Json` encoding. pub const fn debezium_json() -> Self { - ConnectorSchema { + FormatEncodeOptions { format: Format::Debezium, row_encode: Encode::Json, row_options: Vec::new(), @@ -355,7 +355,7 @@ impl ConnectorSchema { } pub const fn debezium_mongo_json() -> Self { - ConnectorSchema { + FormatEncodeOptions { format: Format::DebeziumMongo, row_encode: Encode::Json, row_options: Vec::new(), @@ -365,7 +365,7 @@ impl ConnectorSchema { /// Create a new source schema with `Native` format and encoding. pub const fn native() -> Self { - ConnectorSchema { + FormatEncodeOptions { format: Format::Native, row_encode: Encode::Native, row_options: Vec::new(), @@ -376,7 +376,7 @@ impl ConnectorSchema { /// Create a new source schema with `None` format and encoding. /// Used for self-explanatory source like iceberg. pub const fn none() -> Self { - ConnectorSchema { + FormatEncodeOptions { format: Format::None, row_encode: Encode::None, row_options: Vec::new(), @@ -389,7 +389,7 @@ impl ConnectorSchema { } } -impl fmt::Display for ConnectorSchema { +impl fmt::Display for FormatEncodeOptions { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "FORMAT {} ENCODE {}", self.format, self.row_encode)?; @@ -467,7 +467,7 @@ impl fmt::Display for CreateSourceStatement { v.push(format!("{}", item)); } impl_fmt_display!(with_properties, v, self); - impl_fmt_display!(source_schema, v, self); + impl_fmt_display!(format_encode, v, self); v.iter().join(" ").fmt(f) } } @@ -503,7 +503,7 @@ pub struct CreateSinkStatement { pub sink_from: CreateSink, pub columns: Vec, pub emit_mode: Option, - pub sink_schema: Option, + pub sink_schema: Option, pub into_table_name: Option, } diff --git a/src/sqlparser/src/ast/value.rs b/src/sqlparser/src/ast/value.rs index 9cae715e09278..2bf8a6fdf3a02 100644 --- a/src/sqlparser/src/ast/value.rs +++ b/src/sqlparser/src/ast/value.rs @@ -181,7 +181,7 @@ impl fmt::Display for DateTimeField { pub struct EscapeSingleQuoteString<'a>(&'a str); -impl<'a> fmt::Display for EscapeSingleQuoteString<'a> { +impl fmt::Display for EscapeSingleQuoteString<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for c in self.0.chars() { if c == '\'' { diff --git a/src/sqlparser/src/parser.rs b/src/sqlparser/src/parser.rs index 4874e5320056d..b383869c0d3d0 100644 --- a/src/sqlparser/src/parser.rs +++ b/src/sqlparser/src/parser.rs @@ -2143,7 +2143,7 @@ impl Parser<'_> { // row format for nexmark source must be native // default row format for datagen source is native - let source_schema = self.parse_source_schema_with_connector(&connector, cdc_source_job)?; + let format_encode = self.parse_format_encode_with_connector(&connector, cdc_source_job)?; let stmt = CreateSourceStatement { temporary, @@ -2153,7 +2153,7 @@ impl Parser<'_> { constraints, source_name, with_properties: WithProperties(with_options), - source_schema, + format_encode, source_watermarks, include_column_options: include_options, }; @@ -2585,8 +2585,8 @@ impl Parser<'_> { .find(|&opt| opt.name.real_value() == UPSTREAM_SOURCE_KEY); let connector = option.map(|opt| opt.value.to_string()); - let source_schema = if let Some(connector) = connector { - Some(self.parse_source_schema_with_connector(&connector, false)?) + let format_encode = if let Some(connector) = connector { + Some(self.parse_format_encode_with_connector(&connector, false)?) } else { None // Table is NOT created with an external connector. }; @@ -2621,7 +2621,7 @@ impl Parser<'_> { with_options, or_replace, if_not_exists, - source_schema, + format_encode, source_watermarks, append_only, on_conflict, @@ -3475,11 +3475,11 @@ impl Parser<'_> { return self.expected("SCHEMA after SET"); } } else if self.peek_nth_any_of_keywords(0, &[Keyword::FORMAT]) { - let connector_schema = self.parse_schema()?.unwrap(); - if connector_schema.key_encode.is_some() { + let format_encode = self.parse_schema()?.unwrap(); + if format_encode.key_encode.is_some() { parser_err!("key encode clause is not supported in source schema"); } - AlterSourceOperation::FormatEncode { connector_schema } + AlterSourceOperation::FormatEncode { format_encode } } else if self.parse_keywords(&[Keyword::REFRESH, Keyword::SCHEMA]) { AlterSourceOperation::RefreshSchema } else { diff --git a/src/sqlparser/src/parser_v2/impl_.rs b/src/sqlparser/src/parser_v2/impl_.rs index e4b6af2919745..328fccb200764 100644 --- a/src/sqlparser/src/parser_v2/impl_.rs +++ b/src/sqlparser/src/parser_v2/impl_.rs @@ -26,7 +26,7 @@ impl<'a> Offset> for CheckpointWrapper<'a> { } // Used for diagnostics with `--features winnow/debug`. -impl<'a> std::fmt::Debug for Parser<'a> { +impl std::fmt::Debug for Parser<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if let Some(token) = self.0.first() { write!(f, "{}", token.token)?; @@ -52,7 +52,7 @@ impl<'a> Offset> for Parser<'a> { } } -impl<'a> SliceLen for Parser<'a> { +impl SliceLen for Parser<'_> { #[inline(always)] fn slice_len(&self) -> usize { self.0.len() @@ -135,7 +135,7 @@ impl<'a> Stream for Parser<'a> { } } -impl<'a> UpdateSlice for Parser<'a> { +impl UpdateSlice for Parser<'_> { #[inline(always)] fn update_slice(self, inner: Self::Slice) -> Self { Parser(self.0.update_slice(inner.0)) diff --git a/src/sqlparser/src/test_utils.rs b/src/sqlparser/src/test_utils.rs index bfdf04dcc2d39..b6675928302cd 100644 --- a/src/sqlparser/src/test_utils.rs +++ b/src/sqlparser/src/test_utils.rs @@ -45,6 +45,7 @@ pub fn parse_sql_statements(sql: &str) -> Result, ParserError> { } /// Ensures that `sql` parses as a single statement and returns it. +/// /// If non-empty `canonical` SQL representation is provided, /// additionally asserts that parsing `sql` results in the same parse /// tree as parsing `canonical`, and that serializing it back to string diff --git a/src/sqlparser/tests/testdata/create.yaml b/src/sqlparser/tests/testdata/create.yaml index 1c1f80818478b..8390cc980cc25 100644 --- a/src/sqlparser/tests/testdata/create.yaml +++ b/src/sqlparser/tests/testdata/create.yaml @@ -43,19 +43,19 @@ ^ - input: CREATE SOURCE IF NOT EXISTS src WITH (kafka.topic = 'abc', kafka.brokers = 'localhost:1001') FORMAT PLAIN ENCODE PROTOBUF (message = 'Foo', schema.location = 'file://') formatted_sql: CREATE SOURCE IF NOT EXISTS src WITH (kafka.topic = 'abc', kafka.brokers = 'localhost:1001') FORMAT PLAIN ENCODE PROTOBUF (message = 'Foo', schema.location = 'file://') - formatted_ast: 'CreateSource { stmt: CreateSourceStatement { temporary: false, if_not_exists: true, columns: [], wildcard_idx: None, constraints: [], source_name: ObjectName([Ident { value: "src", quote_style: None }]), with_properties: WithProperties([SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "topic", quote_style: None }]), value: SingleQuotedString("abc") }, SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "brokers", quote_style: None }]), value: SingleQuotedString("localhost:1001") }]), source_schema: V2(ConnectorSchema { format: Plain, row_encode: Protobuf, row_options: [SqlOption { name: ObjectName([Ident { value: "message", quote_style: None }]), value: SingleQuotedString("Foo") }, SqlOption { name: ObjectName([Ident { value: "schema", quote_style: None }, Ident { value: "location", quote_style: None }]), value: SingleQuotedString("file://") }], key_encode: None }), source_watermarks: [], include_column_options: [] } }' + formatted_ast: 'CreateSource { stmt: CreateSourceStatement { temporary: false, if_not_exists: true, columns: [], wildcard_idx: None, constraints: [], source_name: ObjectName([Ident { value: "src", quote_style: None }]), with_properties: WithProperties([SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "topic", quote_style: None }]), value: SingleQuotedString("abc") }, SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "brokers", quote_style: None }]), value: SingleQuotedString("localhost:1001") }]), format_encode: V2(FormatEncodeOptions { format: Plain, row_encode: Protobuf, row_options: [SqlOption { name: ObjectName([Ident { value: "message", quote_style: None }]), value: SingleQuotedString("Foo") }, SqlOption { name: ObjectName([Ident { value: "schema", quote_style: None }, Ident { value: "location", quote_style: None }]), value: SingleQuotedString("file://") }], key_encode: None }), source_watermarks: [], include_column_options: [] } }' - input: CREATE SOURCE IF NOT EXISTS src WITH (kafka.topic = 'abc', kafka.brokers = 'localhost:1001') FORMAT PLAIN ENCODE PROTOBUF (message = 'Foo', schema.registry = 'http://') formatted_sql: CREATE SOURCE IF NOT EXISTS src WITH (kafka.topic = 'abc', kafka.brokers = 'localhost:1001') FORMAT PLAIN ENCODE PROTOBUF (message = 'Foo', schema.registry = 'http://') - formatted_ast: 'CreateSource { stmt: CreateSourceStatement { temporary: false, if_not_exists: true, columns: [], wildcard_idx: None, constraints: [], source_name: ObjectName([Ident { value: "src", quote_style: None }]), with_properties: WithProperties([SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "topic", quote_style: None }]), value: SingleQuotedString("abc") }, SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "brokers", quote_style: None }]), value: SingleQuotedString("localhost:1001") }]), source_schema: V2(ConnectorSchema { format: Plain, row_encode: Protobuf, row_options: [SqlOption { name: ObjectName([Ident { value: "message", quote_style: None }]), value: SingleQuotedString("Foo") }, SqlOption { name: ObjectName([Ident { value: "schema", quote_style: None }, Ident { value: "registry", quote_style: None }]), value: SingleQuotedString("http://") }], key_encode: None }), source_watermarks: [], include_column_options: [] } }' + formatted_ast: 'CreateSource { stmt: CreateSourceStatement { temporary: false, if_not_exists: true, columns: [], wildcard_idx: None, constraints: [], source_name: ObjectName([Ident { value: "src", quote_style: None }]), with_properties: WithProperties([SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "topic", quote_style: None }]), value: SingleQuotedString("abc") }, SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "brokers", quote_style: None }]), value: SingleQuotedString("localhost:1001") }]), format_encode: V2(FormatEncodeOptions { format: Plain, row_encode: Protobuf, row_options: [SqlOption { name: ObjectName([Ident { value: "message", quote_style: None }]), value: SingleQuotedString("Foo") }, SqlOption { name: ObjectName([Ident { value: "schema", quote_style: None }, Ident { value: "registry", quote_style: None }]), value: SingleQuotedString("http://") }], key_encode: None }), source_watermarks: [], include_column_options: [] } }' - input: CREATE SOURCE IF NOT EXISTS src (*, WATERMARK FOR event_time AS event_time - INTERVAL '60' SECOND) WITH (kafka.topic = 'abc', kafka.brokers = 'localhost:1001') FORMAT PLAIN ENCODE PROTOBUF (message = 'Foo', schema.registry = 'http://') formatted_sql: CREATE SOURCE IF NOT EXISTS src (*, WATERMARK FOR event_time AS event_time - INTERVAL '60' SECOND) WITH (kafka.topic = 'abc', kafka.brokers = 'localhost:1001') FORMAT PLAIN ENCODE PROTOBUF (message = 'Foo', schema.registry = 'http://') - formatted_ast: 'CreateSource { stmt: CreateSourceStatement { temporary: false, if_not_exists: true, columns: [], wildcard_idx: Some(0), constraints: [], source_name: ObjectName([Ident { value: "src", quote_style: None }]), with_properties: WithProperties([SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "topic", quote_style: None }]), value: SingleQuotedString("abc") }, SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "brokers", quote_style: None }]), value: SingleQuotedString("localhost:1001") }]), source_schema: V2(ConnectorSchema { format: Plain, row_encode: Protobuf, row_options: [SqlOption { name: ObjectName([Ident { value: "message", quote_style: None }]), value: SingleQuotedString("Foo") }, SqlOption { name: ObjectName([Ident { value: "schema", quote_style: None }, Ident { value: "registry", quote_style: None }]), value: SingleQuotedString("http://") }], key_encode: None }), source_watermarks: [SourceWatermark { column: Ident { value: "event_time", quote_style: None }, expr: BinaryOp { left: Identifier(Ident { value: "event_time", quote_style: None }), op: Minus, right: Value(Interval { value: "60", leading_field: Some(Second), leading_precision: None, last_field: None, fractional_seconds_precision: None }) } }], include_column_options: [] } }' + formatted_ast: 'CreateSource { stmt: CreateSourceStatement { temporary: false, if_not_exists: true, columns: [], wildcard_idx: Some(0), constraints: [], source_name: ObjectName([Ident { value: "src", quote_style: None }]), with_properties: WithProperties([SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "topic", quote_style: None }]), value: SingleQuotedString("abc") }, SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "brokers", quote_style: None }]), value: SingleQuotedString("localhost:1001") }]), format_encode: V2(FormatEncodeOptions { format: Plain, row_encode: Protobuf, row_options: [SqlOption { name: ObjectName([Ident { value: "message", quote_style: None }]), value: SingleQuotedString("Foo") }, SqlOption { name: ObjectName([Ident { value: "schema", quote_style: None }, Ident { value: "registry", quote_style: None }]), value: SingleQuotedString("http://") }], key_encode: None }), source_watermarks: [SourceWatermark { column: Ident { value: "event_time", quote_style: None }, expr: BinaryOp { left: Identifier(Ident { value: "event_time", quote_style: None }), op: Minus, right: Value(Interval { value: "60", leading_field: Some(Second), leading_precision: None, last_field: None, fractional_seconds_precision: None }) } }], include_column_options: [] } }' - input: CREATE SOURCE IF NOT EXISTS src (PRIMARY KEY (event_id), WATERMARK FOR event_time AS event_time - INTERVAL '60' SECOND) WITH (kafka.topic = 'abc', kafka.brokers = 'localhost:1001') FORMAT PLAIN ENCODE PROTOBUF (message = 'Foo', schema.registry = 'http://') formatted_sql: CREATE SOURCE IF NOT EXISTS src (PRIMARY KEY (event_id), WATERMARK FOR event_time AS event_time - INTERVAL '60' SECOND) WITH (kafka.topic = 'abc', kafka.brokers = 'localhost:1001') FORMAT PLAIN ENCODE PROTOBUF (message = 'Foo', schema.registry = 'http://') - formatted_ast: 'CreateSource { stmt: CreateSourceStatement { temporary: false, if_not_exists: true, columns: [], wildcard_idx: None, constraints: [Unique { name: None, columns: [Ident { value: "event_id", quote_style: None }], is_primary: true }], source_name: ObjectName([Ident { value: "src", quote_style: None }]), with_properties: WithProperties([SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "topic", quote_style: None }]), value: SingleQuotedString("abc") }, SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "brokers", quote_style: None }]), value: SingleQuotedString("localhost:1001") }]), source_schema: V2(ConnectorSchema { format: Plain, row_encode: Protobuf, row_options: [SqlOption { name: ObjectName([Ident { value: "message", quote_style: None }]), value: SingleQuotedString("Foo") }, SqlOption { name: ObjectName([Ident { value: "schema", quote_style: None }, Ident { value: "registry", quote_style: None }]), value: SingleQuotedString("http://") }], key_encode: None }), source_watermarks: [SourceWatermark { column: Ident { value: "event_time", quote_style: None }, expr: BinaryOp { left: Identifier(Ident { value: "event_time", quote_style: None }), op: Minus, right: Value(Interval { value: "60", leading_field: Some(Second), leading_precision: None, last_field: None, fractional_seconds_precision: None }) } }], include_column_options: [] } }' + formatted_ast: 'CreateSource { stmt: CreateSourceStatement { temporary: false, if_not_exists: true, columns: [], wildcard_idx: None, constraints: [Unique { name: None, columns: [Ident { value: "event_id", quote_style: None }], is_primary: true }], source_name: ObjectName([Ident { value: "src", quote_style: None }]), with_properties: WithProperties([SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "topic", quote_style: None }]), value: SingleQuotedString("abc") }, SqlOption { name: ObjectName([Ident { value: "kafka", quote_style: None }, Ident { value: "brokers", quote_style: None }]), value: SingleQuotedString("localhost:1001") }]), format_encode: V2(FormatEncodeOptions { format: Plain, row_encode: Protobuf, row_options: [SqlOption { name: ObjectName([Ident { value: "message", quote_style: None }]), value: SingleQuotedString("Foo") }, SqlOption { name: ObjectName([Ident { value: "schema", quote_style: None }, Ident { value: "registry", quote_style: None }]), value: SingleQuotedString("http://") }], key_encode: None }), source_watermarks: [SourceWatermark { column: Ident { value: "event_time", quote_style: None }, expr: BinaryOp { left: Identifier(Ident { value: "event_time", quote_style: None }), op: Minus, right: Value(Interval { value: "60", leading_field: Some(Second), leading_precision: None, last_field: None, fractional_seconds_precision: None }) } }], include_column_options: [] } }' - input: CREATE SOURCE bid (auction INTEGER, bidder INTEGER, price INTEGER, WATERMARK FOR auction AS auction - 1, "date_time" TIMESTAMP) with (connector = 'nexmark', nexmark.table.type = 'Bid', nexmark.split.num = '12', nexmark.min.event.gap.in.ns = '0') formatted_sql: CREATE SOURCE bid (auction INT, bidder INT, price INT, "date_time" TIMESTAMP, WATERMARK FOR auction AS auction - 1) WITH (connector = 'nexmark', nexmark.table.type = 'Bid', nexmark.split.num = '12', nexmark.min.event.gap.in.ns = '0') FORMAT NATIVE ENCODE NATIVE - formatted_ast: 'CreateSource { stmt: CreateSourceStatement { temporary: false, if_not_exists: false, columns: [ColumnDef { name: Ident { value: "auction", quote_style: None }, data_type: Some(Int), collation: None, options: [] }, ColumnDef { name: Ident { value: "bidder", quote_style: None }, data_type: Some(Int), collation: None, options: [] }, ColumnDef { name: Ident { value: "price", quote_style: None }, data_type: Some(Int), collation: None, options: [] }, ColumnDef { name: Ident { value: "date_time", quote_style: Some(''"'') }, data_type: Some(Timestamp(false)), collation: None, options: [] }], wildcard_idx: None, constraints: [], source_name: ObjectName([Ident { value: "bid", quote_style: None }]), with_properties: WithProperties([SqlOption { name: ObjectName([Ident { value: "connector", quote_style: None }]), value: SingleQuotedString("nexmark") }, SqlOption { name: ObjectName([Ident { value: "nexmark", quote_style: None }, Ident { value: "table", quote_style: None }, Ident { value: "type", quote_style: None }]), value: SingleQuotedString("Bid") }, SqlOption { name: ObjectName([Ident { value: "nexmark", quote_style: None }, Ident { value: "split", quote_style: None }, Ident { value: "num", quote_style: None }]), value: SingleQuotedString("12") }, SqlOption { name: ObjectName([Ident { value: "nexmark", quote_style: None }, Ident { value: "min", quote_style: None }, Ident { value: "event", quote_style: None }, Ident { value: "gap", quote_style: None }, Ident { value: "in", quote_style: None }, Ident { value: "ns", quote_style: None }]), value: SingleQuotedString("0") }]), source_schema: V2(ConnectorSchema { format: Native, row_encode: Native, row_options: [], key_encode: None }), source_watermarks: [SourceWatermark { column: Ident { value: "auction", quote_style: None }, expr: BinaryOp { left: Identifier(Ident { value: "auction", quote_style: None }), op: Minus, right: Value(Number("1")) } }], include_column_options: [] } }' + formatted_ast: 'CreateSource { stmt: CreateSourceStatement { temporary: false, if_not_exists: false, columns: [ColumnDef { name: Ident { value: "auction", quote_style: None }, data_type: Some(Int), collation: None, options: [] }, ColumnDef { name: Ident { value: "bidder", quote_style: None }, data_type: Some(Int), collation: None, options: [] }, ColumnDef { name: Ident { value: "price", quote_style: None }, data_type: Some(Int), collation: None, options: [] }, ColumnDef { name: Ident { value: "date_time", quote_style: Some(''"'') }, data_type: Some(Timestamp(false)), collation: None, options: [] }], wildcard_idx: None, constraints: [], source_name: ObjectName([Ident { value: "bid", quote_style: None }]), with_properties: WithProperties([SqlOption { name: ObjectName([Ident { value: "connector", quote_style: None }]), value: SingleQuotedString("nexmark") }, SqlOption { name: ObjectName([Ident { value: "nexmark", quote_style: None }, Ident { value: "table", quote_style: None }, Ident { value: "type", quote_style: None }]), value: SingleQuotedString("Bid") }, SqlOption { name: ObjectName([Ident { value: "nexmark", quote_style: None }, Ident { value: "split", quote_style: None }, Ident { value: "num", quote_style: None }]), value: SingleQuotedString("12") }, SqlOption { name: ObjectName([Ident { value: "nexmark", quote_style: None }, Ident { value: "min", quote_style: None }, Ident { value: "event", quote_style: None }, Ident { value: "gap", quote_style: None }, Ident { value: "in", quote_style: None }, Ident { value: "ns", quote_style: None }]), value: SingleQuotedString("0") }]), format_encode: V2(FormatEncodeOptions { format: Native, row_encode: Native, row_options: [], key_encode: None }), source_watermarks: [SourceWatermark { column: Ident { value: "auction", quote_style: None }, expr: BinaryOp { left: Identifier(Ident { value: "auction", quote_style: None }), op: Minus, right: Value(Number("1")) } }], include_column_options: [] } }' - input: |- CREATE SOURCE s (raw BYTEA) diff --git a/src/storage/backup/src/meta_snapshot_v1.rs b/src/storage/backup/src/meta_snapshot_v1.rs index b873f863b6d05..c073dba1c2706 100644 --- a/src/storage/backup/src/meta_snapshot_v1.rs +++ b/src/storage/backup/src/meta_snapshot_v1.rs @@ -29,8 +29,7 @@ use risingwave_pb::user::UserInfo; use crate::error::{BackupError, BackupResult}; use crate::meta_snapshot::{MetaSnapshot, Metadata}; -/// TODO: remove `ClusterMetadata` and even the trait, after applying model v2. - +// TODO: remove `ClusterMetadata` and even the trait, after applying model v2. pub type MetaSnapshotV1 = MetaSnapshot; impl Display for ClusterMetadata { diff --git a/src/storage/benches/bench_compactor.rs b/src/storage/benches/bench_compactor.rs index 75ab989559d92..f09c5c3fa15b9 100644 --- a/src/storage/benches/bench_compactor.rs +++ b/src/storage/benches/bench_compactor.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::collections::HashMap; use std::ops::Range; use std::sync::Arc; @@ -32,6 +33,7 @@ use risingwave_object_store::object::object_metrics::ObjectStoreMetrics; use risingwave_object_store::object::{InMemObjectStore, ObjectStore, ObjectStoreImpl}; use risingwave_pb::hummock::compact_task::PbTaskType; use risingwave_pb::hummock::PbTableSchema; +use risingwave_storage::compaction_catalog_manager::CompactionCatalogAgent; use risingwave_storage::hummock::compactor::compactor_runner::compact_and_build_sst; use risingwave_storage::hummock::compactor::{ ConcatSstableIterator, DummyCompactionFilter, TaskConfig, TaskProgress, @@ -133,8 +135,13 @@ async fn build_table( policy: CachePolicy::Fill(CacheContext::Default), }, ); - let mut builder = - SstableBuilder::<_, Xor16FilterBuilder>::for_test(sstable_object_id, writer, opt); + let table_id_to_vnode = HashMap::from_iter(vec![(0, VirtualNode::COUNT_FOR_TEST)]); + let mut builder = SstableBuilder::<_, Xor16FilterBuilder>::for_test( + sstable_object_id, + writer, + opt, + table_id_to_vnode, + ); let value = b"1234567890123456789"; let mut full_key = test_key_of(0, epoch, TableId::new(0)); let table_key_len = full_key.user_key.table_key.len(); @@ -177,8 +184,14 @@ async fn build_table_2( policy: CachePolicy::Fill(CacheContext::Default), }, ); - let mut builder = - SstableBuilder::<_, Xor16FilterBuilder>::for_test(sstable_object_id, writer, opt); + + let table_id_to_vnode = HashMap::from_iter(vec![(table_id, VirtualNode::COUNT_FOR_TEST)]); + let mut builder = SstableBuilder::<_, Xor16FilterBuilder>::for_test( + sstable_object_id, + writer, + opt, + table_id_to_vnode, + ); let mut full_key = test_key_of(0, epoch, TableId::new(table_id)); let table_key_len = full_key.user_key.table_key.len(); @@ -273,8 +286,11 @@ async fn compact>( bloom_false_positive: 0.001, ..Default::default() }; - let mut builder = - CapacitySplitTableBuilder::for_test(LocalTableBuilderFactory::new(32, sstable_store, opt)); + let compaction_catalog_agent_ref = CompactionCatalogAgent::for_test(vec![0]); + let mut builder = CapacitySplitTableBuilder::for_test( + LocalTableBuilderFactory::new(32, sstable_store, opt), + compaction_catalog_agent_ref, + ); let task_config = task_config.unwrap_or_else(|| TaskConfig { key_range: KeyRange::inf(), diff --git a/src/storage/benches/bench_multi_builder.rs b/src/storage/benches/bench_multi_builder.rs index 2a7b379a55269..fc25674ad626b 100644 --- a/src/storage/benches/bench_multi_builder.rs +++ b/src/storage/benches/bench_multi_builder.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::collections::HashMap; use std::env; use std::ops::Range; use std::sync::atomic::AtomicU64; @@ -24,11 +25,13 @@ use foyer::{Engine, HybridCacheBuilder}; use rand::random; use risingwave_common::catalog::TableId; use risingwave_common::config::{MetricLevel, ObjectStoreConfig}; +use risingwave_common::hash::VirtualNode; use risingwave_hummock_sdk::key::{FullKey, UserKey}; use risingwave_hummock_sdk::sstable_info::SstableInfo; use risingwave_object_store::object::{ InMemObjectStore, ObjectStore, ObjectStoreImpl, S3ObjectStore, }; +use risingwave_storage::compaction_catalog_manager::CompactionCatalogAgent; use risingwave_storage::hummock::iterator::{ConcatIterator, ConcatIteratorInner, HummockIterator}; use risingwave_storage::hummock::multi_builder::{CapacitySplitTableBuilder, TableBuilderFactory}; use risingwave_storage::hummock::value::HummockValue; @@ -83,7 +86,11 @@ impl TableBuilderFactory for LocalTableBuilderFactory Self { Self { - filter_key_extractor_manager, + compaction_catalog_manager, system_params_manager, version: 0, } } fn handle_catalog_snapshot(&mut self, tables: Vec) { - let all_filter_key_extractors: HashMap> = tables - .iter() - .map(|t| (t.id, Arc::new(FilterKeyExtractorImpl::from_table(t)))) - .collect(); - self.filter_key_extractor_manager - .sync(all_filter_key_extractors); + self.compaction_catalog_manager + .sync(tables.into_iter().map(|t| (t.id, t)).collect()); } fn handle_catalog_notification(&mut self, operation: Operation, table_catalog: Table) { match operation { Operation::Add | Operation::Update => { - self.filter_key_extractor_manager.update( - table_catalog.id, - Arc::new(FilterKeyExtractorImpl::from_table(&table_catalog)), - ); + self.compaction_catalog_manager + .update(table_catalog.id, table_catalog); } Operation::Delete => { - self.filter_key_extractor_manager.remove(table_catalog.id); + self.compaction_catalog_manager.remove(table_catalog.id); } _ => panic!("receive an unsupported notify {:?}", operation), diff --git a/src/storage/compactor/src/server.rs b/src/storage/compactor/src/server.rs index e604cd34a2a1f..72ae1542f116a 100644 --- a/src/storage/compactor/src/server.rs +++ b/src/storage/compactor/src/server.rs @@ -36,8 +36,8 @@ use risingwave_pb::common::WorkerType; use risingwave_pb::compactor::compactor_service_server::CompactorServiceServer; use risingwave_pb::monitor_service::monitor_service_server::MonitorServiceServer; use risingwave_rpc_client::{GrpcCompactorProxyClient, MetaClient}; -use risingwave_storage::filter_key_extractor::{ - FilterKeyExtractorManager, RemoteTableAccessor, RpcFilterKeyExtractorManager, +use risingwave_storage::compaction_catalog_manager::{ + CompactionCatalogManager, RemoteTableAccessor, }; use risingwave_storage::hummock::compactor::{ new_compaction_await_tree_reg_ref, CompactionAwaitTreeRegRef, CompactionExecutor, @@ -212,12 +212,13 @@ pub async fn compactor_serve( compactor_metrics, ) = prepare_start_parameters(config.clone(), system_params_reader.clone()).await; - let filter_key_extractor_manager = Arc::new(RpcFilterKeyExtractorManager::new(Box::new( + let compaction_catalog_manager_ref = Arc::new(CompactionCatalogManager::new(Box::new( RemoteTableAccessor::new(meta_client.clone()), ))); + let system_params_manager = Arc::new(LocalSystemParamsManager::new(system_params_reader)); let compactor_observer_node = CompactorObserverNode::new( - filter_key_extractor_manager.clone(), + compaction_catalog_manager_ref.clone(), system_params_manager.clone(), ); let observer_manager = @@ -234,9 +235,6 @@ pub async fn compactor_serve( hummock_meta_client.clone(), storage_opts.sstable_id_remote_fetch_number, )); - let filter_key_extractor_manager = FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - filter_key_extractor_manager.clone(), - ); let compaction_executor = Arc::new(CompactionExecutor::new( opts.compaction_worker_threads_number, @@ -263,7 +261,7 @@ pub async fn compactor_serve( compactor_context.clone(), hummock_meta_client.clone(), sstable_object_id_manager.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_manager_ref, ), ]; diff --git a/src/storage/hummock_sdk/src/compaction_group/hummock_version_ext.rs b/src/storage/hummock_sdk/src/compaction_group/hummock_version_ext.rs index 9abd99cefb517..7a1f0851d3d4d 100644 --- a/src/storage/hummock_sdk/src/compaction_group/hummock_version_ext.rs +++ b/src/storage/hummock_sdk/src/compaction_group/hummock_version_ext.rs @@ -116,7 +116,7 @@ impl HummockVersion { pub fn get_sst_infos_from_groups<'a>( &'a self, select_group: &'a HashSet, - ) -> impl Iterator + 'a { + ) -> impl Iterator + 'a { self.levels .iter() .filter_map(|(cg_id, level)| { @@ -332,7 +332,7 @@ impl HummockVersion { let [parent_levels, cur_levels] = self .levels .get_many_mut([&parent_group_id, &group_id]) - .unwrap(); + .map(|res| res.unwrap()); let l0 = &mut parent_levels.l0; { for sub_level in &mut l0.sub_levels { @@ -895,7 +895,8 @@ impl HummockVersionCommon { let [parent_levels, cur_levels] = self .levels .get_many_mut([&parent_group_id, &group_id]) - .unwrap(); + .map(|res| res.unwrap()); + let l0 = &mut parent_levels.l0; { for sub_level in &mut l0.sub_levels { diff --git a/src/storage/hummock_sdk/src/compaction_group/mod.rs b/src/storage/hummock_sdk/src/compaction_group/mod.rs index 0bf3fcea1c4e7..dbe58018f15e3 100644 --- a/src/storage/hummock_sdk/src/compaction_group/mod.rs +++ b/src/storage/hummock_sdk/src/compaction_group/mod.rs @@ -44,6 +44,10 @@ impl From for CompactionGroupId { } } +/// The split will follow the following rules: +/// 1. Ssts with `split_key` will be split into two separate ssts and their `key_range` will be changed `sst_1`: [`sst.key_range.right`, `split_key`) `sst_2`: [`split_key`, `sst.key_range.right`]. +/// 2. Currently only `vnode` 0 and `vnode` max is supported. +/// 3. Due to the above rule, `vnode` max will be rewritten as `table_id` + 1, vnode 0 pub mod group_split { use std::cmp::Ordering; use std::collections::BTreeSet; @@ -61,11 +65,6 @@ pub mod group_split { use crate::sstable_info::SstableInfo; use crate::{can_concat, HummockEpoch, KeyComparator}; - /// The split will follow the following rules: - /// 1. Ssts with `split_key` will be split into two separate ssts and their `key_range` will be changed `sst_1`: [`sst.key_range.right`, `split_key`) `sst_2`: [`split_key`, `sst.key_range.right`]. - /// 2. Currently only `vnode` 0 and `vnode` max is supported. - /// 3. Due to the above rule, `vnode` max will be rewritten as `table_id` + 1, vnode 0 - // By default, the split key is constructed with vnode = 0 and epoch = MAX, so that we can split table_id to the right group pub fn build_split_key(table_id: StateTableId, vnode: VirtualNode) -> Bytes { build_split_full_key(table_id, vnode).encode().into() diff --git a/src/storage/hummock_sdk/src/key.rs b/src/storage/hummock_sdk/src/key.rs index 9685b0cff3ecc..3318756d66673 100644 --- a/src/storage/hummock_sdk/src/key.rs +++ b/src/storage/hummock_sdk/src/key.rs @@ -292,7 +292,6 @@ pub fn prev_epoch(epoch: &[u8]) -> Vec { /// compute the next full key of the given full key /// /// if the `user_key` has no successor key, the result will be a empty vec - pub fn next_full_key(full_key: &[u8]) -> Vec { let (user_key, epoch) = split_key_epoch(full_key); let prev_epoch = prev_epoch(epoch); @@ -315,7 +314,6 @@ pub fn next_full_key(full_key: &[u8]) -> Vec { /// compute the prev full key of the given full key /// /// if the `user_key` has no predecessor key, the result will be a empty vec - pub fn prev_full_key(full_key: &[u8]) -> Vec { let (user_key, epoch) = split_key_epoch(full_key); let next_epoch = next_epoch(epoch); @@ -532,7 +530,7 @@ impl EstimateSize for TableKey { } } -impl<'a> TableKey<&'a [u8]> { +impl TableKey<&[u8]> { pub fn copy_into>(&self) -> TableKey { TableKey(T::copy_from_slice(self.as_ref())) } @@ -646,7 +644,7 @@ impl<'a> UserKey<&'a [u8]> { } } -impl<'a, T: AsRef<[u8]> + Clone> UserKey<&'a T> { +impl + Clone> UserKey<&T> { pub fn cloned(self) -> UserKey { UserKey { table_id: self.table_id, @@ -941,7 +939,7 @@ impl EmptySliceRef for Vec { } const EMPTY_SLICE: &[u8] = b""; -impl<'a> EmptySliceRef for &'a [u8] { +impl EmptySliceRef for &[u8] { fn empty_slice_ref<'b>() -> &'b Self { &EMPTY_SLICE } diff --git a/src/storage/hummock_sdk/src/lib.rs b/src/storage/hummock_sdk/src/lib.rs index aae0c0c0971ab..366fc02310ebb 100644 --- a/src/storage/hummock_sdk/src/lib.rs +++ b/src/storage/hummock_sdk/src/lib.rs @@ -18,7 +18,6 @@ #![feature(map_many_mut)] #![feature(type_alias_impl_trait)] #![feature(impl_trait_in_assoc_type)] -#![feature(is_sorted)] #![feature(let_chains)] #![feature(btree_cursors)] #![feature(strict_overflow_ops)] diff --git a/src/storage/hummock_sdk/src/version.rs b/src/storage/hummock_sdk/src/version.rs index b106563cdc7ac..80276b09ffdff 100644 --- a/src/storage/hummock_sdk/src/version.rs +++ b/src/storage/hummock_sdk/src/version.rs @@ -526,7 +526,7 @@ impl HummockVersionDelta { pub fn newly_added_sst_infos<'a>( &'a self, select_group: Option<&'a HashSet>, - ) -> impl Iterator + 'a { + ) -> impl Iterator + 'a { self.group_deltas .iter() .filter_map(move |(cg_id, group_deltas)| { diff --git a/src/storage/hummock_test/src/bin/replay/main.rs b/src/storage/hummock_test/src/bin/replay/main.rs index 4dce34153f8a5..98ee3ba448ee7 100644 --- a/src/storage/hummock_test/src/bin/replay/main.rs +++ b/src/storage/hummock_test/src/bin/replay/main.rs @@ -40,8 +40,8 @@ use risingwave_hummock_trace::{ use risingwave_meta::hummock::test_utils::setup_compute_env; use risingwave_meta::hummock::MockHummockMetaClient; use risingwave_object_store::object::build_remote_object_store; -use risingwave_storage::filter_key_extractor::{ - FakeRemoteTableAccessor, RpcFilterKeyExtractorManager, +use risingwave_storage::compaction_catalog_manager::{ + CompactionCatalogManager, FakeRemoteTableAccessor, }; use risingwave_storage::hummock::{HummockStorage, SstableStore, SstableStoreConfig}; use risingwave_storage::monitor::{CompactorMetrics, HummockStateStoreMetrics, ObjectStoreMetrics}; @@ -166,16 +166,14 @@ async fn create_replay_hummock(r: Record, args: &Args) -> Result, @@ -111,7 +116,7 @@ pub(crate) mod tests { .unwrap(); register_tables_with_id_for_test( - hummock.filter_key_extractor_manager(), + hummock.compaction_catalog_manager_ref(), hummock_manager_ref, table_ids, ) @@ -197,7 +202,7 @@ pub(crate) mod tests { } } - fn get_compactor_context(storage: &HummockStorage) -> CompactorContext { + pub fn get_compactor_context(storage: &HummockStorage) -> CompactorContext { get_compactor_context_impl(storage.storage_opts().clone(), storage.sstable_store()) } @@ -240,16 +245,6 @@ pub(crate) mod tests { ) .await; - let rpc_filter_key_extractor_manager = match storage.filter_key_extractor_manager().clone() - { - FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - rpc_filter_key_extractor_manager, - ) => rpc_filter_key_extractor_manager, - FilterKeyExtractorManager::StaticFilterKeyExtractorManager(_) => unreachable!(), - }; - let filter_key_extractor_manager = FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - rpc_filter_key_extractor_manager, - ); let compact_ctx = get_compactor_context(&storage); let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), @@ -276,8 +271,13 @@ pub(crate) mod tests { ) .await; - // 2. get compact task + let compaction_catalog_agent_ref = storage + .compaction_catalog_manager_ref() + .acquire(vec![table_id]) + .await + .unwrap(); + // 2. get compact task let compaction_group_id = get_compaction_group_id_by_table_id(hummock_manager_ref.clone(), table_id).await; while let Some(compact_task) = hummock_manager_ref @@ -287,12 +287,12 @@ pub(crate) mod tests { { // 3. compact let (_tx, rx) = tokio::sync::oneshot::channel(); - let ((result_task, task_stats, object_timestamps), _) = compact( + let ((result_task, task_stats, object_timestamps), _) = compact_with_agent( compact_ctx.clone(), compact_task.clone(), rx, Box::new(sstable_object_id_manager.clone()), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref.clone(), ) .await; @@ -404,29 +404,6 @@ pub(crate) mod tests { } } - pub fn prepare_compactor_and_filter( - storage: &HummockStorage, - existing_table_id: u32, - ) -> (CompactorContext, FilterKeyExtractorManager) { - let rpc_filter_key_extractor_manager = match storage.filter_key_extractor_manager().clone() - { - FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - rpc_filter_key_extractor_manager, - ) => rpc_filter_key_extractor_manager, - FilterKeyExtractorManager::StaticFilterKeyExtractorManager(_) => unreachable!(), - }; - rpc_filter_key_extractor_manager.update( - existing_table_id, - Arc::new(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)), - ); - - let filter_key_extractor_manager = FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - rpc_filter_key_extractor_manager, - ); - - (get_compactor_context(storage), filter_key_extractor_manager) - } - #[tokio::test] async fn test_compaction_drop_all_key() { let (env, hummock_manager_ref, cluster_ctl_ref, worker_id) = setup_compute_env(8080).await; @@ -435,16 +412,18 @@ pub(crate) mod tests { worker_id as _, )); + let notification_client = get_notification_client_for_test( + env, + hummock_manager_ref.clone(), + cluster_ctl_ref, + worker_id, + ) + .await; + let existing_table_id: u32 = 1; let storage_existing_table_id = get_hummock_storage( hummock_meta_client.clone(), - get_notification_client_for_test( - env, - hummock_manager_ref.clone(), - cluster_ctl_ref, - worker_id, - ) - .await, + notification_client, &hummock_manager_ref, &[existing_table_id], ) @@ -483,8 +462,6 @@ pub(crate) mod tests { .get_sst_ids_by_group_id(compaction_group_id) .collect_vec() .is_empty()); - - // assert_eq!(0, current_version.num_levels(compaction_group_id)); } #[tokio::test] @@ -519,26 +496,17 @@ pub(crate) mod tests { let table_id_2 = storage_2.table_id(); let table_id_set = HashSet::from_iter([table_id_1, table_id_2]); - let rpc_filter_key_extractor_manager = - match global_storage.filter_key_extractor_manager().clone() { - FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - rpc_filter_key_extractor_manager, - ) => rpc_filter_key_extractor_manager, - FilterKeyExtractorManager::StaticFilterKeyExtractorManager(_) => unreachable!(), - }; - - rpc_filter_key_extractor_manager.update( - table_id_1.table_id(), - Arc::new(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)), + update_filter_key_extractor_for_table_ids( + global_storage.compaction_catalog_manager_ref(), + &[table_id_1.table_id(), table_id_2.table_id()], ); - rpc_filter_key_extractor_manager.update( - table_id_2.table_id(), - Arc::new(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)), - ); - let filter_key_extractor_manager = FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - rpc_filter_key_extractor_manager, - ); + let compaction_catalog_agent_ref = global_storage + .compaction_catalog_manager_ref() + .acquire(vec![table_id_1.table_id(), table_id_2.table_id()]) + .await + .unwrap(); + let compact_ctx = get_compactor_context_impl( global_storage.storage_opts().clone(), global_storage.sstable_store(), @@ -639,12 +607,12 @@ pub(crate) mod tests { // 4. compact let (_tx, rx) = tokio::sync::oneshot::channel(); - let ((result_task, task_stats, object_timestamps), _) = compact( + let ((result_task, task_stats, object_timestamps), _) = compact_with_agent( compact_ctx, compact_task.clone(), rx, Box::new(sstable_object_id_manager.clone()), - filter_key_extractor_manager, + compaction_catalog_agent_ref.clone(), ) .await; hummock_manager_ref @@ -738,14 +706,6 @@ pub(crate) mod tests { ) .await; - let rpc_filter_key_extractor_manager = match storage.filter_key_extractor_manager().clone() - { - FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - rpc_filter_key_extractor_manager, - ) => rpc_filter_key_extractor_manager, - FilterKeyExtractorManager::StaticFilterKeyExtractorManager(_) => unreachable!(), - }; - let compact_ctx = get_compactor_context(&storage); let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), @@ -754,13 +714,12 @@ pub(crate) mod tests { .clone() .sstable_id_remote_fetch_number, )); - rpc_filter_key_extractor_manager.update( - existing_table_id, - Arc::new(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)), - ); - let filter_key_extractor_manager = FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - rpc_filter_key_extractor_manager, - ); + + let compaction_catalog_agent_ref = storage + .compaction_catalog_manager_ref() + .acquire(vec![existing_table_id]) + .await + .unwrap(); // 1. add sstables let val = Bytes::from(b"0"[..].to_vec()); // 1 Byte value @@ -844,12 +803,12 @@ pub(crate) mod tests { // 3. compact let (_tx, rx) = tokio::sync::oneshot::channel(); - let ((result_task, task_stats, object_timestamps), _) = compact( + let ((result_task, task_stats, object_timestamps), _) = compact_with_agent( compact_ctx, compact_task.clone(), rx, Box::new(sstable_object_id_manager.clone()), - filter_key_extractor_manager, + compaction_catalog_agent_ref.clone(), ) .await; @@ -950,23 +909,21 @@ pub(crate) mod tests { ) .await; - let rpc_filter_key_extractor_manager = match storage.filter_key_extractor_manager().clone() - { - FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - rpc_filter_key_extractor_manager, - ) => rpc_filter_key_extractor_manager, - FilterKeyExtractorManager::StaticFilterKeyExtractorManager(_) => unreachable!(), - }; - - rpc_filter_key_extractor_manager.update( + let mut multi_filter_key_extractor = MultiFilterKeyExtractor::default(); + multi_filter_key_extractor.register( existing_table_id, - Arc::new(FilterKeyExtractorImpl::FixedLength( - FixedLengthFilterKeyExtractor::new(TABLE_PREFIX_LEN + key_prefix.len()), + FilterKeyExtractorImpl::FixedLength(FixedLengthFilterKeyExtractor::new( + TABLE_PREFIX_LEN + key_prefix.len(), )), ); - let filter_key_extractor_manager = FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - rpc_filter_key_extractor_manager, - ); + + let table_id_to_vnode = + HashMap::from_iter([(existing_table_id, VirtualNode::COUNT_FOR_TEST)]); + let compaction_catalog_agent_ref = Arc::new(CompactionCatalogAgent::new( + FilterKeyExtractorImpl::Multi(multi_filter_key_extractor), + table_id_to_vnode, + )); + let compact_ctx = get_compactor_context(&storage); let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), @@ -1043,12 +1000,12 @@ pub(crate) mod tests { // 3. compact let (_tx, rx) = tokio::sync::oneshot::channel(); - let ((result_task, task_stats, object_timestamps), _) = compact( + let ((result_task, task_stats, object_timestamps), _) = compact_with_agent( compact_ctx, compact_task.clone(), rx, Box::new(sstable_object_id_manager.clone()), - filter_key_extractor_manager, + compaction_catalog_agent_ref.clone(), ) .await; @@ -1156,8 +1113,7 @@ pub(crate) mod tests { &[existing_table_id], ) .await; - let (compact_ctx, filter_key_extractor_manager) = - prepare_compactor_and_filter(&storage, existing_table_id); + let compact_ctx = get_compactor_context(&storage); let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), storage @@ -1221,14 +1177,20 @@ pub(crate) mod tests { 129 ); + let compaction_catalog_agent_ref = storage + .compaction_catalog_manager_ref() + .acquire(vec![existing_table_id]) + .await + .unwrap(); + // 3. compact let (_tx, rx) = tokio::sync::oneshot::channel(); - let ((result_task, task_stats, object_timestamps), _) = compact( + let ((result_task, task_stats, object_timestamps), _) = compact_with_agent( compact_ctx, compact_task.clone(), rx, Box::new(sstable_object_id_manager.clone()), - filter_key_extractor_manager, + compaction_catalog_agent_ref.clone(), ) .await; @@ -1328,9 +1290,8 @@ pub(crate) mod tests { async fn run_fast_and_normal_runner( compact_ctx: CompactorContext, task: CompactTask, + compaction_catalog_agent_ref: CompactionCatalogAgentRef, ) -> (Vec, Vec) { - let multi_filter_key_extractor = - Arc::new(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)); let compaction_filter = DummyCompactionFilter {}; let slow_compact_runner = CompactorRunner::new( 0, @@ -1340,10 +1301,11 @@ pub(crate) mod tests { VecDeque::from_iter([5, 6, 7, 8, 9, 10, 11, 12, 13]), )), ); + let fast_compact_runner = FastCompactorRunner::new( compact_ctx.clone(), task.clone(), - multi_filter_key_extractor.clone(), + compaction_catalog_agent_ref.clone(), Box::new(SharedComapctorObjectIdManager::for_test( VecDeque::from_iter([22, 23, 24, 25, 26, 27, 28, 29]), )), @@ -1352,7 +1314,7 @@ pub(crate) mod tests { let (_, ret1, _) = slow_compact_runner .run( compaction_filter, - multi_filter_key_extractor, + compaction_catalog_agent_ref, Arc::new(TaskProgress::default()), ) .await @@ -1384,7 +1346,9 @@ pub(crate) mod tests { ) .await; hummock_manager_ref.get_new_sst_ids(10).await.unwrap(); - let (compact_ctx, _) = prepare_compactor_and_filter(&storage, existing_table_id); + let compact_ctx = get_compactor_context(&storage); + let compaction_catalog_agent_ref = + CompactionCatalogAgent::for_test(vec![existing_table_id]); let sstable_store = compact_ctx.sstable_store.clone(); let capacity = 256 * 1024; @@ -1433,7 +1397,9 @@ pub(crate) mod tests { gc_delete_keys: true, ..Default::default() }; - let (ret, fast_ret) = run_fast_and_normal_runner(compact_ctx.clone(), task).await; + let (ret, fast_ret) = + run_fast_and_normal_runner(compact_ctx.clone(), task, compaction_catalog_agent_ref) + .await; check_compaction_result(compact_ctx.sstable_store, ret, fast_ret, capacity).await; } @@ -1563,7 +1529,9 @@ pub(crate) mod tests { ) .await; hummock_manager_ref.get_new_sst_ids(10).await.unwrap(); - let (compact_ctx, _) = prepare_compactor_and_filter(&storage, existing_table_id); + let compact_ctx = get_compactor_context(&storage); + let compaction_catalog_agent_ref = + CompactionCatalogAgent::for_test(vec![existing_table_id]); let sstable_store = compact_ctx.sstable_store.clone(); let capacity = 256 * 1024; @@ -1585,7 +1553,6 @@ pub(crate) mod tests { ); let mut sst_infos = vec![]; let mut max_sst_file_size = 0; - for object_id in 1..3 { let mut builder = SstableBuilder::<_, BlockedXor16FilterBuilder>::new( object_id, @@ -1594,7 +1561,7 @@ pub(crate) mod tests { .create_sst_writer(object_id, SstableWriterOptions::default()), BlockedXor16FilterBuilder::create(opts.bloom_false_positive, opts.capacity / 16), opts.clone(), - Arc::new(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)), + compaction_catalog_agent_ref.clone(), None, ); let mut last_k: u64 = 1; @@ -1659,7 +1626,12 @@ pub(crate) mod tests { gc_delete_keys: true, ..Default::default() }; - let (ret, fast_ret) = run_fast_and_normal_runner(compact_ctx.clone(), task).await; + let (ret, fast_ret) = run_fast_and_normal_runner( + compact_ctx.clone(), + task, + compaction_catalog_agent_ref.clone(), + ) + .await; check_compaction_result(compact_ctx.sstable_store, ret, fast_ret, target_file_size).await; } @@ -1685,7 +1657,9 @@ pub(crate) mod tests { ) .await; hummock_manager_ref.get_new_sst_ids(10).await.unwrap(); - let (compact_ctx, _) = prepare_compactor_and_filter(&storage, existing_table_id); + let compact_ctx = get_compactor_context(&storage); + let compaction_catalog_agent_ref = + CompactionCatalogAgent::for_test(vec![existing_table_id]); let sstable_store = compact_ctx.sstable_store.clone(); let capacity = 256 * 1024; @@ -1707,7 +1681,6 @@ pub(crate) mod tests { ); let mut sst_infos = vec![]; let mut max_sst_file_size = 0; - for object_id in 1..3 { let mut builder = SstableBuilder::<_, BlockedXor16FilterBuilder>::new( object_id, @@ -1716,7 +1689,7 @@ pub(crate) mod tests { .create_sst_writer(object_id, SstableWriterOptions::default()), BlockedXor16FilterBuilder::create(opts.bloom_false_positive, opts.capacity / 16), opts.clone(), - Arc::new(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)), + compaction_catalog_agent_ref.clone(), None, ); let key_count = KEY_COUNT / VirtualNode::COUNT_FOR_TEST * 2; @@ -1799,7 +1772,7 @@ pub(crate) mod tests { table_infos: sst_infos, }, ], - existing_table_ids: vec![1], + existing_table_ids: vec![existing_table_id], task_id: 1, splits: vec![KeyRange::inf()], target_level: 6, @@ -1810,7 +1783,12 @@ pub(crate) mod tests { table_watermarks, ..Default::default() }; - let (ret, fast_ret) = run_fast_and_normal_runner(compact_ctx.clone(), task).await; + let (ret, fast_ret) = run_fast_and_normal_runner( + compact_ctx.clone(), + task, + compaction_catalog_agent_ref.clone(), + ) + .await; let mut fast_tables = Vec::with_capacity(fast_ret.len()); let mut normal_tables = Vec::with_capacity(ret.len()); let mut stats = StoreLocalStatistic::default(); @@ -1889,35 +1867,14 @@ pub(crate) mod tests { .await; // basic cg2 -> [1, 2] - let rpc_filter_key_extractor_manager = match storage.filter_key_extractor_manager().clone() - { - FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - rpc_filter_key_extractor_manager, - ) => rpc_filter_key_extractor_manager, - FilterKeyExtractorManager::StaticFilterKeyExtractorManager(_) => unreachable!(), - }; - let mut key = BytesMut::default(); key.put_u16(1); key.put_slice(b"key_prefix"); let key_prefix = key.freeze(); - rpc_filter_key_extractor_manager.update( - table_id_1.table_id(), - Arc::new(FilterKeyExtractorImpl::FixedLength( - FixedLengthFilterKeyExtractor::new(TABLE_PREFIX_LEN + key_prefix.len()), - )), - ); - rpc_filter_key_extractor_manager.update( - table_id_2.table_id(), - Arc::new(FilterKeyExtractorImpl::FixedLength( - FixedLengthFilterKeyExtractor::new(TABLE_PREFIX_LEN + key_prefix.len()), - )), - ); + let compaction_catalog_agent_ref = + CompactionCatalogAgent::for_test(vec![table_id_1.table_id(), table_id_2.table_id()]); - let filter_key_extractor_manager = FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - rpc_filter_key_extractor_manager, - ); let compact_ctx = get_compactor_context(&storage); let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), @@ -2027,7 +1984,7 @@ pub(crate) mod tests { level: usize, hummock_manager_ref: HummockManagerRef, compact_ctx: CompactorContext, - filter_key_extractor_manager: FilterKeyExtractorManager, + compaction_catalog_agent_ref: CompactionCatalogAgentRef, sstable_object_id_manager: Arc, ) { // compact left group @@ -2054,12 +2011,12 @@ pub(crate) mod tests { // 3. compact let (_tx, rx) = tokio::sync::oneshot::channel(); - let ((result_task, task_stats, object_timestamps), _) = compact( + let ((result_task, task_stats, object_timestamps), _) = compact_with_agent( compact_ctx, compact_task.clone(), rx, Box::new(sstable_object_id_manager.clone()), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref.clone(), ) .await; @@ -2105,7 +2062,7 @@ pub(crate) mod tests { 0, hummock_manager_ref.clone(), compact_ctx.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref.clone(), sstable_object_id_manager.clone(), ) .await; @@ -2115,7 +2072,7 @@ pub(crate) mod tests { 0, hummock_manager_ref.clone(), compact_ctx.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref.clone(), sstable_object_id_manager.clone(), ) .await; @@ -2143,7 +2100,7 @@ pub(crate) mod tests { 0, hummock_manager_ref.clone(), compact_ctx.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref.clone(), sstable_object_id_manager.clone(), ) .await; @@ -2153,7 +2110,7 @@ pub(crate) mod tests { 0, hummock_manager_ref.clone(), compact_ctx.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref.clone(), sstable_object_id_manager.clone(), ) .await; @@ -2187,7 +2144,7 @@ pub(crate) mod tests { 0, hummock_manager_ref.clone(), compact_ctx.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref.clone(), sstable_object_id_manager.clone(), ) .await; @@ -2248,7 +2205,7 @@ pub(crate) mod tests { 0, hummock_manager_ref.clone(), compact_ctx.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref.clone(), sstable_object_id_manager.clone(), ) .await; @@ -2258,7 +2215,7 @@ pub(crate) mod tests { 0, hummock_manager_ref.clone(), compact_ctx.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref.clone(), sstable_object_id_manager.clone(), ) .await; @@ -2268,7 +2225,7 @@ pub(crate) mod tests { level: usize, hummock_manager_ref: HummockManagerRef, compact_ctx: CompactorContext, - filter_key_extractor_manager: FilterKeyExtractorManager, + compaction_catalog_agent_ref: CompactionCatalogAgentRef, sstable_object_id_manager: Arc, ) { loop { @@ -2293,12 +2250,12 @@ pub(crate) mod tests { // 3. compact let (_tx, rx) = tokio::sync::oneshot::channel(); - let ((result_task, task_stats, object_timestamps), _) = compact( + let ((result_task, task_stats, object_timestamps), _) = compact_with_agent( compact_ctx.clone(), compact_task.clone(), rx, Box::new(sstable_object_id_manager.clone()), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref.clone(), ) .await; @@ -2346,7 +2303,7 @@ pub(crate) mod tests { 0, hummock_manager_ref.clone(), compact_ctx.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref.clone(), sstable_object_id_manager.clone(), ) .await; @@ -2356,7 +2313,7 @@ pub(crate) mod tests { 0, hummock_manager_ref.clone(), compact_ctx.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref.clone(), sstable_object_id_manager.clone(), ) .await; diff --git a/src/storage/hummock_test/src/sync_point_tests.rs b/src/storage/hummock_test/src/sync_point_tests.rs index 1d21b74e15738..065c9536006bb 100644 --- a/src/storage/hummock_test/src/sync_point_tests.rs +++ b/src/storage/hummock_test/src/sync_point_tests.rs @@ -31,16 +31,16 @@ use risingwave_meta::hummock::compaction::selector::ManualCompactionOption; use risingwave_meta::hummock::test_utils::{setup_compute_env, setup_compute_env_with_config}; use risingwave_meta::hummock::{HummockManagerRef, MockHummockMetaClient}; use risingwave_rpc_client::HummockMetaClient; -use risingwave_storage::filter_key_extractor::FilterKeyExtractorManager; -use risingwave_storage::hummock::compactor::compactor_runner::compact; +use risingwave_storage::compaction_catalog_manager::CompactionCatalogAgentRef; +use risingwave_storage::hummock::compactor::compactor_runner::compact_with_agent; use risingwave_storage::hummock::compactor::CompactorContext; use risingwave_storage::hummock::{CachePolicy, GetObjectId, SstableObjectIdManager}; use risingwave_storage::store::{LocalStateStore, NewLocalOptions, ReadOptions, StateStoreRead}; use risingwave_storage::StateStore; use serial_test::serial; -use super::compactor_tests::tests::{get_hummock_storage, prepare_compactor_and_filter}; -use crate::compactor_tests::tests::flush_and_commit; +use super::compactor_tests::tests::get_hummock_storage; +use crate::compactor_tests::tests::{flush_and_commit, get_compactor_context}; use crate::get_notification_client_for_test; use crate::local_state_store_test_utils::LocalStateStoreTestExt; use crate::test_utils::gen_key_from_bytes; @@ -178,7 +178,7 @@ async fn test_syncpoints_test_failpoints_fetch_ids() { pub async fn compact_once( hummock_manager_ref: HummockManagerRef, compact_ctx: CompactorContext, - filter_key_extractor_manager: FilterKeyExtractorManager, + compaction_catalog_agent_ref: CompactionCatalogAgentRef, sstable_object_id_manager: Arc, ) { // 2. get compact task @@ -201,12 +201,12 @@ pub async fn compact_once( compact_task.compaction_filter_mask = compaction_filter_flag.bits(); // 3. compact let (_tx, rx) = tokio::sync::oneshot::channel(); - let ((result_task, task_stats, object_timestamps), _) = compact( + let ((result_task, task_stats, object_timestamps), _) = compact_with_agent( compact_ctx, compact_task.clone(), rx, Box::new(sstable_object_id_manager), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref.clone(), ) .await; @@ -252,8 +252,13 @@ async fn test_syncpoints_get_in_delete_range_boundary() { &[existing_table_id], ) .await; - let (compact_ctx, filter_key_extractor_manager) = - prepare_compactor_and_filter(&storage, existing_table_id); + + let compact_ctx = get_compactor_context(&storage); + let compaction_catalog_agent_ref = storage + .compaction_catalog_manager_ref() + .acquire(vec![existing_table_id]) + .await + .unwrap(); let sstable_object_id_manager = Arc::new(SstableObjectIdManager::new( hummock_meta_client.clone(), @@ -316,10 +321,11 @@ async fn test_syncpoints_get_in_delete_range_boundary() { local.table_id(), ) .await; + compact_once( hummock_manager_ref.clone(), compact_ctx.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref.clone(), sstable_object_id_manager.clone(), ) .await; @@ -360,7 +366,7 @@ async fn test_syncpoints_get_in_delete_range_boundary() { compact_once( hummock_manager_ref.clone(), compact_ctx.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref.clone(), sstable_object_id_manager.clone(), ) .await; @@ -402,7 +408,7 @@ async fn test_syncpoints_get_in_delete_range_boundary() { compact_once( hummock_manager_ref.clone(), compact_ctx.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref.clone(), sstable_object_id_manager.clone(), ) .await; @@ -437,7 +443,7 @@ async fn test_syncpoints_get_in_delete_range_boundary() { compact_once( hummock_manager_ref.clone(), compact_ctx.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref.clone(), sstable_object_id_manager.clone(), ) .await; diff --git a/src/storage/hummock_test/src/test_utils.rs b/src/storage/hummock_test/src/test_utils.rs index 849e77a659f7a..c403917938fb5 100644 --- a/src/storage/hummock_test/src/test_utils.rs +++ b/src/storage/hummock_test/src/test_utils.rs @@ -17,6 +17,7 @@ use std::sync::Arc; use bytes::Bytes; use itertools::Itertools; use risingwave_common::catalog::TableId; +use risingwave_common::hash::VirtualNode; use risingwave_common_service::ObserverManager; use risingwave_hummock_sdk::compaction_group::StaticCompactionGroupId; use risingwave_hummock_sdk::key::TableKey; @@ -29,11 +30,10 @@ use risingwave_meta::hummock::{HummockManagerRef, MockHummockMetaClient}; use risingwave_meta::manager::MetaSrvEnv; use risingwave_pb::catalog::{PbTable, Table}; use risingwave_rpc_client::HummockMetaClient; -use risingwave_storage::error::StorageResult; -use risingwave_storage::filter_key_extractor::{ - FilterKeyExtractorImpl, FilterKeyExtractorManager, FullKeyFilterKeyExtractor, - RpcFilterKeyExtractorManager, +use risingwave_storage::compaction_catalog_manager::{ + CompactionCatalogManager, CompactionCatalogManagerRef, }; +use risingwave_storage::error::StorageResult; use risingwave_storage::hummock::backup_reader::BackupReader; use risingwave_storage::hummock::event_handler::HummockVersionUpdate; use risingwave_storage::hummock::iterator::test_utils::mock_sstable_store; @@ -71,7 +71,7 @@ pub async fn prepare_first_valid_version( let observer_manager = ObserverManager::new( notification_client, HummockObserverNode::new( - Arc::new(RpcFilterKeyExtractorManager::default()), + Arc::new(CompactionCatalogManager::default()), backup_manager, tx.clone(), write_limiter, @@ -145,7 +145,7 @@ pub async fn with_hummock_storage_v2( .unwrap(); register_tables_with_id_for_test( - hummock_storage.filter_key_extractor_manager(), + hummock_storage.compaction_catalog_manager_ref(), &hummock_manager_ref, &[table_id.table_id()], ) @@ -153,31 +153,28 @@ pub async fn with_hummock_storage_v2( (hummock_storage, meta_client) } + pub fn update_filter_key_extractor_for_table_ids( - filter_key_extractor_manager_ref: &FilterKeyExtractorManager, + compaction_catalog_manager_ref: CompactionCatalogManagerRef, table_ids: &[u32], ) { - let rpc_filter_key_extractor_manager = match filter_key_extractor_manager_ref { - FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - rpc_filter_key_extractor_manager, - ) => rpc_filter_key_extractor_manager, - FilterKeyExtractorManager::StaticFilterKeyExtractorManager(_) => unreachable!(), - }; - for table_id in table_ids { - rpc_filter_key_extractor_manager.update( - *table_id, - Arc::new(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)), - ) + let mock_table = PbTable { + id: *table_id, + read_prefix_len_hint: 0, + maybe_vnode_count: Some(VirtualNode::COUNT_FOR_TEST as u32), + ..Default::default() + }; + compaction_catalog_manager_ref.update(*table_id, mock_table); } } pub async fn register_tables_with_id_for_test( - filter_key_extractor_manager: &FilterKeyExtractorManager, + compaction_catalog_manager_ref: CompactionCatalogManagerRef, hummock_manager_ref: &HummockManagerRef, table_ids: &[u32], ) { - update_filter_key_extractor_for_table_ids(filter_key_extractor_manager, table_ids); + update_filter_key_extractor_for_table_ids(compaction_catalog_manager_ref, table_ids); register_table_ids_to_compaction_group( hummock_manager_ref, table_ids, @@ -187,28 +184,19 @@ pub async fn register_tables_with_id_for_test( } pub fn update_filter_key_extractor_for_tables( - filter_key_extractor_manager: &FilterKeyExtractorManager, + compaction_catalog_manager_ref: CompactionCatalogManagerRef, tables: &[PbTable], ) { - let rpc_filter_key_extractor_manager = match filter_key_extractor_manager { - FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - rpc_filter_key_extractor_manager, - ) => rpc_filter_key_extractor_manager, - FilterKeyExtractorManager::StaticFilterKeyExtractorManager(_) => unreachable!(), - }; for table in tables { - rpc_filter_key_extractor_manager.update( - table.id, - Arc::new(FilterKeyExtractorImpl::from_table(table)), - ) + compaction_catalog_manager_ref.update(table.id, table.clone()) } } pub async fn register_tables_with_catalog_for_test( - filter_key_extractor_manager: &FilterKeyExtractorManager, + compaction_catalog_manager_ref: CompactionCatalogManagerRef, hummock_manager_ref: &HummockManagerRef, tables: &[Table], ) { - update_filter_key_extractor_for_tables(filter_key_extractor_manager, tables); + update_filter_key_extractor_for_tables(compaction_catalog_manager_ref, tables); let table_ids = tables.iter().map(|t| t.id).collect_vec(); register_table_ids_to_compaction_group( hummock_manager_ref, @@ -233,7 +221,7 @@ impl HummockTestEnv { pub async fn register_table_id(&self, table_id: TableId) { register_tables_with_id_for_test( - self.storage.filter_key_extractor_manager(), + self.storage.compaction_catalog_manager_ref(), &self.manager, &[table_id.table_id()], ) @@ -243,7 +231,7 @@ impl HummockTestEnv { pub async fn register_table(&self, table: PbTable) { register_tables_with_catalog_for_test( - self.storage.filter_key_extractor_manager(), + self.storage.compaction_catalog_manager_ref(), &self.manager, &[table], ) diff --git a/src/storage/hummock_trace/src/lib.rs b/src/storage/hummock_trace/src/lib.rs index 48b0a71010a74..3a2f1dd643f88 100644 --- a/src/storage/hummock_trace/src/lib.rs +++ b/src/storage/hummock_trace/src/lib.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![feature(cursor_remaining)] +#![feature(cursor_split)] #![feature(trait_alias)] #![feature(coroutines)] diff --git a/src/storage/hummock_trace/src/read.rs b/src/storage/hummock_trace/src/read.rs index bf6563123044c..858e680f35aa3 100644 --- a/src/storage/hummock_trace/src/read.rs +++ b/src/storage/hummock_trace/src/read.rs @@ -185,7 +185,8 @@ mod test { } assert!(deserializer.deserialize(&mut buf).is_err()); - assert!(buf.is_empty()); + // https://github.com/rust-lang/rust/pull/109174 + assert!(buf.split().1.is_empty()); } #[test] diff --git a/src/storage/src/filter_key_extractor.rs b/src/storage/src/compaction_catalog_manager.rs similarity index 73% rename from src/storage/src/filter_key_extractor.rs rename to src/storage/src/compaction_catalog_manager.rs index e9326d37dcd8c..3133cae023300 100644 --- a/src/storage/src/filter_key_extractor.rs +++ b/src/storage/src/compaction_catalog_manager.rs @@ -16,13 +16,12 @@ use std::collections::{HashMap, HashSet}; use std::fmt::Debug; use std::sync::Arc; -use itertools::Itertools; use parking_lot::RwLock; use risingwave_common::catalog::ColumnDesc; -use risingwave_common::hash::VirtualNode; +use risingwave_common::hash::{VirtualNode, VnodeCountCompat}; use risingwave_common::util::row_serde::OrderedRowSerde; use risingwave_common::util::sort_util::OrderType; -use risingwave_hummock_sdk::info_in_release; +use risingwave_hummock_sdk::compaction_group::StateTableId; use risingwave_hummock_sdk::key::{get_table_id, TABLE_PREFIX_LEN}; use risingwave_pb::catalog::Table; use risingwave_rpc_client::error::{Result as RpcResult, RpcError}; @@ -51,7 +50,7 @@ impl FilterKeyExtractorImpl { if read_prefix_len == 0 || read_prefix_len > table_catalog.get_pk().len() { // for now frontend had not infer the table_id_to_filter_key_extractor, so we // use FullKeyFilterKeyExtractor - FilterKeyExtractorImpl::Dummy(DummyFilterKeyExtractor) + FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor) } else { FilterKeyExtractorImpl::Schema(SchemaFilterKeyExtractor::new(table_catalog)) } @@ -186,13 +185,11 @@ impl SchemaFilterKeyExtractor { #[derive(Default)] pub struct MultiFilterKeyExtractor { - id_to_filter_key_extractor: HashMap>, - // cached state - // last_filter_key_extractor_state: Mutex)>>, + id_to_filter_key_extractor: HashMap, } impl MultiFilterKeyExtractor { - pub fn register(&mut self, table_id: u32, filter_key_extractor: Arc) { + pub fn register(&mut self, table_id: u32, filter_key_extractor: FilterKeyExtractorImpl) { self.id_to_filter_key_extractor .insert(table_id, filter_key_extractor); } @@ -259,48 +256,71 @@ impl StateTableAccessor for FakeRemoteTableAccessor { ))) } } -struct FilterKeyExtractorManagerInner { - table_id_to_filter_key_extractor: RwLock>>, + +/// `CompactionCatalogManager` is a manager to manage all `Table` which used in compaction +pub struct CompactionCatalogManager { + // `table_id_to_catalog` is a map to store all `Table` which used in compaction + table_id_to_catalog: RwLock>, + // `table_accessor` is a accessor to fetch `Table` from meta when the table not found table_accessor: Box, } -impl FilterKeyExtractorManagerInner { - fn update(&self, table_id: u32, filter_key_extractor: Arc) { - self.table_id_to_filter_key_extractor - .write() - .insert(table_id, filter_key_extractor); +impl Default for CompactionCatalogManager { + fn default() -> Self { + Self::new(Box::::default()) + } +} + +impl CompactionCatalogManager { + pub fn new(table_accessor: Box) -> Self { + Self { + table_id_to_catalog: Default::default(), + table_accessor, + } + } +} + +impl CompactionCatalogManager { + /// `update` is used to update `Table` in `table_id_to_catalog` from notification + pub fn update(&self, table_id: u32, catalog: Table) { + self.table_id_to_catalog.write().insert(table_id, catalog); } - fn sync(&self, filter_key_extractor_map: HashMap>) { - let mut guard = self.table_id_to_filter_key_extractor.write(); + /// `sync` is used to sync all `Table` in `table_id_to_catalog` from notification whole snapshot + pub fn sync(&self, catalog_map: HashMap) { + let mut guard = self.table_id_to_catalog.write(); guard.clear(); - guard.extend(filter_key_extractor_map); + guard.extend(catalog_map); } - fn remove(&self, table_id: u32) { - self.table_id_to_filter_key_extractor - .write() - .remove(&table_id); + /// `remove` is used to remove `Table` in `table_id_to_catalog` by `table_id` + pub fn remove(&self, table_id: u32) { + self.table_id_to_catalog.write().remove(&table_id); } - async fn acquire( + /// `acquire` is used to acquire `CompactionCatalogAgent` by `table_ids` + /// if the table not found in `table_id_to_catalog`, it will fetch from meta + pub async fn acquire( &self, - mut table_id_set: HashSet, - ) -> HummockResult { - if table_id_set.is_empty() { + mut table_ids: Vec, + ) -> HummockResult { + if table_ids.is_empty() { // table_id_set is empty // the table in sst has been deleted // use full key as default - return Ok(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)); + return Err(HummockError::other("table_id_set is empty")); } let mut multi_filter_key_extractor = MultiFilterKeyExtractor::default(); + let mut table_id_to_vnode = HashMap::new(); { - let guard = self.table_id_to_filter_key_extractor.read(); - table_id_set.retain(|table_id| match guard.get(table_id) { - Some(filter_key_extractor) => { - multi_filter_key_extractor.register(*table_id, filter_key_extractor.clone()); + let guard = self.table_id_to_catalog.read(); + table_ids.retain(|table_id| match guard.get(table_id) { + Some(table_catalog) => { + multi_filter_key_extractor + .register(*table_id, FilterKeyExtractorImpl::from_table(table_catalog)); + table_id_to_vnode.insert(*table_id, table_catalog.vnode_count()); false } @@ -308,8 +328,7 @@ impl FilterKeyExtractorManagerInner { }); } - if !table_id_set.is_empty() { - let table_ids = table_id_set.iter().cloned().collect_vec(); + if !table_ids.is_empty() { let mut state_tables = self.table_accessor .get_tables(&table_ids) @@ -320,122 +339,117 @@ impl FilterKeyExtractorManagerInner { e.as_report() )) })?; - let mut guard = self.table_id_to_filter_key_extractor.write(); + + let mut guard = self.table_id_to_catalog.write(); for table_id in table_ids { if let Some(table) = state_tables.remove(&table_id) { - let key_extractor = Arc::new(FilterKeyExtractorImpl::from_table(&table)); - guard.insert(table_id, key_extractor.clone()); + let table_id = table.id; + let key_extractor = FilterKeyExtractorImpl::from_table(&table); + let vnode = table.vnode_count(); + guard.insert(table_id, table); multi_filter_key_extractor.register(table_id, key_extractor); + table_id_to_vnode.insert(table_id, vnode); } } } - Ok(FilterKeyExtractorImpl::Multi(multi_filter_key_extractor)) + Ok(Arc::new(CompactionCatalogAgent::new( + FilterKeyExtractorImpl::Multi(multi_filter_key_extractor), + table_id_to_vnode, + ))) } -} -/// `RpcFilterKeyExtractorManager` is a wrapper for inner, and provide a protected read and write -/// interface, its thread safe -pub struct RpcFilterKeyExtractorManager { - inner: FilterKeyExtractorManagerInner, -} + /// `build_compaction_catalog_agent` is used to build `CompactionCatalogAgent` by `table_catalogs` + pub fn build_compaction_catalog_agent( + table_catalogs: HashMap, + ) -> CompactionCatalogAgentRef { + let mut multi_filter_key_extractor = MultiFilterKeyExtractor::default(); + let mut table_id_to_vnode = HashMap::new(); + for (table_id, table_catalog) in table_catalogs { + multi_filter_key_extractor + .register(table_id, FilterKeyExtractorImpl::from_table(&table_catalog)); + table_id_to_vnode.insert(table_id, table_catalog.vnode_count()); + } -impl Default for RpcFilterKeyExtractorManager { - fn default() -> Self { - Self::new(Box::::default()) + Arc::new(CompactionCatalogAgent::new( + FilterKeyExtractorImpl::Multi(multi_filter_key_extractor), + table_id_to_vnode, + )) } } -impl RpcFilterKeyExtractorManager { - pub fn new(table_accessor: Box) -> Self { +/// `CompactionCatalogAgent` is a wrapper of `filter_key_extractor_manager` and `table_id_to_vnode` +/// The `CompactionCatalogAgent` belongs to a compaction task call, which we will build from the `table_ids` contained in a compact task and use it during the compaction. +/// The `CompactionCatalogAgent` can act as a agent for the `CompactionCatalogManager`, providing `extract` and `vnode_count` capabilities. +pub struct CompactionCatalogAgent { + filter_key_extractor_manager: FilterKeyExtractorImpl, + table_id_to_vnode: HashMap, +} + +impl CompactionCatalogAgent { + pub fn new( + filter_key_extractor_manager: FilterKeyExtractorImpl, + table_id_to_vnode: HashMap, + ) -> Self { Self { - inner: FilterKeyExtractorManagerInner { - table_id_to_filter_key_extractor: Default::default(), - table_accessor, - }, + filter_key_extractor_manager, + table_id_to_vnode, } } - /// Insert (`table_id`, `filter_key_extractor`) as mapping to `HashMap` for `acquire` - pub fn update(&self, table_id: u32, filter_key_extractor: Arc) { - info_in_release!("update key extractor of {}", table_id); - self.inner.update(table_id, filter_key_extractor); + pub fn dummy() -> Self { + Self { + filter_key_extractor_manager: FilterKeyExtractorImpl::Dummy(DummyFilterKeyExtractor), + table_id_to_vnode: Default::default(), + } } - /// Remove a mapping by `table_id` - pub fn remove(&self, table_id: u32) { - info_in_release!("remove key extractor of {}", table_id); - self.inner.remove(table_id); - } + pub fn for_test(table_ids: Vec) -> Arc { + let full_key_filter_key_extractor = + FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor); - /// Sync all filter key extractors by snapshot - pub fn sync(&self, filter_key_extractor_map: HashMap>) { - self.inner.sync(filter_key_extractor_map) - } + let table_id_to_vnode = table_ids + .into_iter() + .map(|table_id| (table_id, VirtualNode::COUNT_FOR_TEST)) + .collect(); - /// Acquire a `MultiFilterKeyExtractor` by `table_id_set` - /// Internally, try to get all `filter_key_extractor` from `hashmap`. Will block the caller if - /// `table_id` does not util version update (notify), and retry to get - async fn acquire(&self, table_id_set: HashSet) -> HummockResult { - self.inner.acquire(table_id_set).await + Arc::new(CompactionCatalogAgent::new( + full_key_filter_key_extractor, + table_id_to_vnode, + )) } } -#[derive(Clone)] -pub enum FilterKeyExtractorManager { - RpcFilterKeyExtractorManager(Arc), - StaticFilterKeyExtractorManager(Arc), -} - -impl FilterKeyExtractorManager { - pub async fn acquire( - &self, - table_id_set: HashSet, - ) -> HummockResult { - match self { - FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - rpc_filter_key_exactor_manager, - ) => rpc_filter_key_exactor_manager.acquire(table_id_set).await, - FilterKeyExtractorManager::StaticFilterKeyExtractorManager( - static_filter_key_extractor_manager, - ) => static_filter_key_extractor_manager.acquire(table_id_set), - } +impl CompactionCatalogAgent { + pub fn extract<'a>(&self, full_key: &'a [u8]) -> &'a [u8] { + self.filter_key_extractor_manager.extract(full_key) } -} -#[derive(Clone)] -pub struct StaticFilterKeyExtractorManager { - id_to_table: HashMap, -} + pub fn vnode_count(&self, table_id: StateTableId) -> usize { + *self.table_id_to_vnode.get(&table_id).unwrap_or_else(|| { + panic!( + "table_id not found {} all_table_ids {:?}", + table_id, + self.table_id_to_vnode.keys() + ) + }) + } -impl StaticFilterKeyExtractorManager { - pub fn new(id_to_table: HashMap) -> Self { - Self { id_to_table } + pub fn table_id_to_vnode_ref(&self) -> &HashMap { + &self.table_id_to_vnode } - fn acquire(&self, table_id_set: HashSet) -> HummockResult { - let mut multi_filter_key_extractor = MultiFilterKeyExtractor::default(); - for table_id in table_id_set { - if let Some(table) = self.id_to_table.get(&table_id) { - let key_extractor = Arc::new(FilterKeyExtractorImpl::from_table(table)); - multi_filter_key_extractor.register(table_id, key_extractor); - } else { - return Err(HummockError::other(format!( - "table {} is absent in id_to_table, need to request rpc list_tables to get the schema", table_id, - ))); - } - } - Ok(FilterKeyExtractorImpl::Multi(multi_filter_key_extractor)) + pub fn table_ids(&self) -> impl Iterator + '_ { + self.table_id_to_vnode.keys().cloned() } } -pub type FilterKeyExtractorManagerRef = Arc; +pub type CompactionCatalogManagerRef = Arc; +pub type CompactionCatalogAgentRef = Arc; #[cfg(test)] mod tests { - use std::collections::HashSet; use std::mem; - use std::sync::Arc; use bytes::{BufMut, BytesMut}; use itertools::Itertools; @@ -453,9 +467,8 @@ mod tests { use risingwave_pb::plan_common::PbColumnCatalog; use super::{DummyFilterKeyExtractor, FilterKeyExtractor, SchemaFilterKeyExtractor}; - use crate::filter_key_extractor::{ + use crate::compaction_catalog_manager::{ FilterKeyExtractorImpl, FullKeyFilterKeyExtractor, MultiFilterKeyExtractor, - RpcFilterKeyExtractorManager, }; const fn dummy_vnode() -> [u8; VirtualNode::SIZE] { VirtualNode::from_index(233).to_be_bytes() @@ -595,7 +608,7 @@ mod tests { let schema_filter_key_extractor = SchemaFilterKeyExtractor::new(&prost_table); multi_filter_key_extractor.register( 1, - Arc::new(FilterKeyExtractorImpl::Schema(schema_filter_key_extractor)), + FilterKeyExtractorImpl::Schema(schema_filter_key_extractor), ); let order_types: Vec = vec![OrderType::ascending(), OrderType::ascending()]; let schema = vec![DataType::Int64, DataType::Varchar]; @@ -632,7 +645,7 @@ mod tests { let schema_filter_key_extractor = SchemaFilterKeyExtractor::new(&prost_table); multi_filter_key_extractor.register( 2, - Arc::new(FilterKeyExtractorImpl::Schema(schema_filter_key_extractor)), + FilterKeyExtractorImpl::Schema(schema_filter_key_extractor), ); let order_types: Vec = vec![OrderType::ascending(), OrderType::ascending()]; let schema = vec![DataType::Int64, DataType::Varchar]; @@ -666,27 +679,26 @@ mod tests { } #[tokio::test] - async fn test_filter_key_extractor_manager() { - let filter_key_extractor_manager = Arc::new(RpcFilterKeyExtractorManager::default()); - - filter_key_extractor_manager.update( - 1, - Arc::new(FilterKeyExtractorImpl::Dummy(DummyFilterKeyExtractor)), - ); - - let remaining_table_id_set = HashSet::from([1]); - let multi_filter_key_extractor = filter_key_extractor_manager - .acquire(remaining_table_id_set) - .await - .unwrap(); + async fn test_compaction_catalog_manager_exception() { + let compaction_catalog_manager = super::CompactionCatalogManager::default(); - match multi_filter_key_extractor { - FilterKeyExtractorImpl::Multi(multi_filter_key_extractor) => { - assert_eq!(1, multi_filter_key_extractor.size()); + { + let ret = compaction_catalog_manager.acquire(vec![]).await; + assert!(ret.is_err()); + if let Err(e) = ret { + assert_eq!(e.to_string(), "Other error: table_id_set is empty"); } + } - _ => { - unreachable!() + { + // network error with FakeRemoteTableAccessor + let ret = compaction_catalog_manager.acquire(vec![1]).await; + assert!(ret.is_err()); + if let Err(e) = ret { + assert_eq!( + e.to_string(), + "Other error: request rpc list_tables for meta failed: fake accessor does not support fetch remote table" + ); } } } diff --git a/src/storage/src/hummock/compactor/compaction_utils.rs b/src/storage/src/hummock/compactor/compaction_utils.rs index 3b032123f426a..5e22d3e45701b 100644 --- a/src/storage/src/hummock/compactor/compaction_utils.rs +++ b/src/storage/src/hummock/compactor/compaction_utils.rs @@ -33,7 +33,7 @@ use risingwave_pb::hummock::{BloomFilterType, PbLevelType, PbTableSchema}; use tokio::time::Instant; pub use super::context::CompactorContext; -use crate::filter_key_extractor::FilterKeyExtractorImpl; +use crate::compaction_catalog_manager::CompactionCatalogAgentRef; use crate::hummock::compactor::{ ConcatSstableIterator, MultiCompactionFilter, StateCleanUpCompactionFilter, TaskProgress, TtlCompactionFilter, @@ -55,7 +55,7 @@ pub struct RemoteBuilderFactory { pub options: SstableBuilderOptions, pub policy: CachePolicy, pub remote_rpc_cost: Arc, - pub filter_key_extractor: Arc, + pub compaction_catalog_agent_ref: CompactionCatalogAgentRef, pub sstable_writer_factory: W, pub _phantom: PhantomData, } @@ -87,7 +87,7 @@ impl TableBuilderFactory for RemoteBu self.options.capacity / DEFAULT_ENTRY_SIZE + 1, ), self.options.clone(), - self.filter_key_extractor.clone(), + self.compaction_catalog_agent_ref.clone(), Some(self.limiter.clone()), ); Ok(builder) @@ -674,6 +674,6 @@ pub fn calculate_task_parallelism_impl( compaction_size: u64, max_sub_compaction: u32, ) -> usize { - let parallelism = (compaction_size + parallel_compact_size - 1) / parallel_compact_size; + let parallelism = compaction_size.div_ceil(parallel_compact_size); worker_num.min(parallelism.min(max_sub_compaction as u64) as usize) } diff --git a/src/storage/src/hummock/compactor/compactor_runner.rs b/src/storage/src/hummock/compactor/compactor_runner.rs index 57b2b929db411..f6b1ee832375f 100644 --- a/src/storage/src/hummock/compactor/compactor_runner.rs +++ b/src/storage/src/hummock/compactor/compactor_runner.rs @@ -24,6 +24,7 @@ use risingwave_hummock_sdk::compact::{ compact_task_to_string, estimate_memory_for_compact_task, statistics_compact_task, }; use risingwave_hummock_sdk::compact_task::CompactTask; +use risingwave_hummock_sdk::compaction_group::StateTableId; use risingwave_hummock_sdk::key::{FullKey, FullKeyTracker}; use risingwave_hummock_sdk::key_range::{KeyRange, KeyRangeCommon}; use risingwave_hummock_sdk::sstable_info::SstableInfo; @@ -40,7 +41,7 @@ use tokio::sync::oneshot::Receiver; use super::iterator::MonitoredCompactorIterator; use super::task_progress::TaskProgress; use super::{CompactionStatistics, TaskConfig}; -use crate::filter_key_extractor::{FilterKeyExtractorImpl, FilterKeyExtractorManager}; +use crate::compaction_catalog_manager::{CompactionCatalogAgentRef, CompactionCatalogManagerRef}; use crate::hummock::compactor::compaction_utils::{ build_multi_compaction_filter, estimate_task_output_capacity, generate_splits_for_task, metrics_report_for_task, optimize_by_copy_block, @@ -134,7 +135,7 @@ impl CompactorRunner { pub async fn run( &self, compaction_filter: impl CompactionFilter, - filter_key_extractor: Arc, + compaction_catalog_agent_ref: CompactionCatalogAgentRef, task_progress: Arc, ) -> HummockResult { let iter = self.build_sst_iter(task_progress.clone())?; @@ -143,7 +144,7 @@ impl CompactorRunner { .compact_key_range( iter, compaction_filter, - filter_key_extractor, + compaction_catalog_agent_ref, Some(task_progress), Some(self.compact_task.task_id), Some(self.split_index), @@ -302,12 +303,12 @@ pub fn partition_overlapping_sstable_infos( /// Handles a compaction task and reports its status to hummock manager. /// Always return `Ok` and let hummock manager handle errors. -pub async fn compact( +pub async fn compact_with_agent( compactor_context: CompactorContext, mut compact_task: CompactTask, mut shutdown_rx: Receiver<()>, object_id_getter: Box, - filter_key_extractor_manager: FilterKeyExtractorManager, + compaction_catalog_agent_ref: CompactionCatalogAgentRef, ) -> ( ( CompactTask, @@ -330,35 +331,6 @@ pub async fn compact( .start_timer(); let multi_filter = build_multi_compaction_filter(&compact_task); - - let existing_table_ids: HashSet = - HashSet::from_iter(compact_task.existing_table_ids.clone()); - let compact_table_ids = HashSet::from_iter( - compact_task - .input_ssts - .iter() - .flat_map(|level| level.table_infos.iter()) - .flat_map(|sst| sst.table_ids.clone()) - .filter(|table_id| existing_table_ids.contains(table_id)), - ); - - let multi_filter_key_extractor = match build_filter_key_extractor( - &compact_task, - filter_key_extractor_manager, - &compact_table_ids, - ) - .await - { - Some(multi_filter_key_extractor) => multi_filter_key_extractor, - None => { - let task_status = TaskStatus::ExecuteFailed; - return ( - compact_done(compact_task, context.clone(), vec![], task_status), - None, - ); - } - }; - let mut task_status = TaskStatus::Success; let optimize_by_copy_block = optimize_by_copy_block(&compact_task, &context); @@ -446,7 +418,7 @@ pub async fn compact( let runner = fast_compactor_runner::CompactorRunner::new( context.clone(), compact_task.clone(), - multi_filter_key_extractor.clone(), + compaction_catalog_agent_ref.clone(), object_id_getter.clone(), task_progress_guard.progress.clone(), ); @@ -490,7 +462,7 @@ pub async fn compact( } for (split_index, _) in compact_task.splits.iter().enumerate() { let filter = multi_filter.clone(); - let multi_filter_key_extractor = multi_filter_key_extractor.clone(); + let compaction_catalog_agent_ref = compaction_catalog_agent_ref.clone(); let compactor_runner = CompactorRunner::new( split_index, compactor_context.clone(), @@ -500,7 +472,7 @@ pub async fn compact( let task_progress = task_progress_guard.progress.clone(); let runner = async move { compactor_runner - .run(filter, multi_filter_key_extractor, task_progress) + .run(filter, compaction_catalog_agent_ref, task_progress) .await }; let traced = match context.await_tree_reg.as_ref() { @@ -587,6 +559,93 @@ pub async fn compact( ) } +/// Handles a compaction task and reports its status to hummock manager. +/// Always return `Ok` and let hummock manager handle errors. +pub async fn compact( + compactor_context: CompactorContext, + compact_task: CompactTask, + shutdown_rx: Receiver<()>, + object_id_getter: Box, + compaction_catalog_manager_ref: CompactionCatalogManagerRef, +) -> ( + ( + CompactTask, + HashMap, + HashMap, + ), + Option, +) { + let existing_table_ids: HashSet = + HashSet::from_iter(compact_task.existing_table_ids.clone()); + let compact_table_ids = Vec::from_iter( + compact_task + .input_ssts + .iter() + .flat_map(|level| level.table_infos.iter()) + .flat_map(|sst| sst.table_ids.clone()) + .filter(|table_id| existing_table_ids.contains(table_id)) + .sorted() + .unique(), + ); + + let compaction_catalog_agent_ref = match compaction_catalog_manager_ref + .acquire(compact_table_ids.clone()) + .await + { + Ok(compaction_catalog_agent_ref) => { + let acquire_table_ids: HashSet = + compaction_catalog_agent_ref.table_ids().collect(); + if acquire_table_ids.len() != compact_table_ids.len() { + let diff = compact_table_ids + .into_iter() + .collect::>() + .symmetric_difference(&acquire_table_ids) + .cloned() + .collect::>(); + tracing::warn!( + dif= ?diff, + "Some table ids are not acquired." + ); + return ( + compact_done( + compact_task, + compactor_context.clone(), + vec![], + TaskStatus::ExecuteFailed, + ), + None, + ); + } + + compaction_catalog_agent_ref + } + Err(e) => { + tracing::warn!( + error = %e.as_report(), + "Failed to acquire compaction catalog agent" + ); + return ( + compact_done( + compact_task, + compactor_context.clone(), + vec![], + TaskStatus::ExecuteFailed, + ), + None, + ); + } + }; + + compact_with_agent( + compactor_context, + compact_task, + shutdown_rx, + object_id_getter, + compaction_catalog_agent_ref, + ) + .await +} + /// Fills in the compact task and tries to report the task result to meta node. pub(crate) fn compact_done( mut compact_task: CompactTask, @@ -810,39 +869,6 @@ where Ok(compaction_statistics) } -async fn build_filter_key_extractor( - compact_task: &CompactTask, - filter_key_extractor_manager: FilterKeyExtractorManager, - compact_table_ids: &HashSet, -) -> Option> { - let multi_filter_key_extractor = match filter_key_extractor_manager - .acquire(compact_table_ids.clone()) - .await - { - Err(e) => { - tracing::error!(error = %e.as_report(), "Failed to fetch filter key extractor tables [{:?}], it may caused by some RPC error", compact_task.existing_table_ids); - return None; - } - Ok(extractor) => extractor, - }; - - if let FilterKeyExtractorImpl::Multi(multi) = &multi_filter_key_extractor { - let found_tables = multi.get_existing_table_ids(); - let removed_tables = compact_table_ids - .iter() - .filter(|table_id| !found_tables.contains(table_id)) - .collect_vec(); - if !removed_tables.is_empty() { - tracing::error!("Failed to fetch filter key extractor tables [{:?}. [{:?}] may be removed by meta-service. ", compact_table_ids, removed_tables); - return None; - } - } - - let multi_filter_key_extractor = Arc::new(multi_filter_key_extractor); - - Some(multi_filter_key_extractor) -} - #[cfg(test)] pub mod tests { use risingwave_hummock_sdk::can_concat; diff --git a/src/storage/src/hummock/compactor/fast_compactor_runner.rs b/src/storage/src/hummock/compactor/fast_compactor_runner.rs index 641d866e544b5..6cdf38261cdea 100644 --- a/src/storage/src/hummock/compactor/fast_compactor_runner.rs +++ b/src/storage/src/hummock/compactor/fast_compactor_runner.rs @@ -30,7 +30,7 @@ use risingwave_hummock_sdk::sstable_info::SstableInfo; use risingwave_hummock_sdk::table_stats::TableStats; use risingwave_hummock_sdk::{can_concat, compact_task_to_string, EpochWithGap, LocalSstableInfo}; -use crate::filter_key_extractor::FilterKeyExtractorImpl; +use crate::compaction_catalog_manager::CompactionCatalogAgentRef; use crate::hummock::block_stream::BlockDataStream; use crate::hummock::compactor::task_progress::TaskProgress; use crate::hummock::compactor::{ @@ -359,7 +359,7 @@ impl CompactorRunner { pub fn new( context: CompactorContext, task: CompactTask, - filter_key_extractor: Arc, + compaction_catalog_agent_ref: CompactionCatalogAgentRef, object_id_getter: Box, task_progress: Arc, ) -> Self { @@ -391,7 +391,7 @@ impl CompactorRunner { options, policy: task_config.cache_policy, remote_rpc_cost: get_id_time, - filter_key_extractor, + compaction_catalog_agent_ref: compaction_catalog_agent_ref.clone(), sstable_writer_factory: factory, _phantom: PhantomData, }; @@ -403,6 +403,7 @@ impl CompactorRunner { context .storage_opts .compactor_concurrent_uploading_sst_count, + compaction_catalog_agent_ref, ); assert_eq!( task.input_ssts.len(), diff --git a/src/storage/src/hummock/compactor/mod.rs b/src/storage/src/hummock/compactor/mod.rs index 3a4c487b217a8..772df4050b66e 100644 --- a/src/storage/src/hummock/compactor/mod.rs +++ b/src/storage/src/hummock/compactor/mod.rs @@ -80,8 +80,8 @@ use super::multi_builder::CapacitySplitTableBuilder; use super::{ GetObjectId, HummockResult, SstableBuilderOptions, SstableObjectIdManager, Xor16FilterBuilder, }; -use crate::filter_key_extractor::{ - FilterKeyExtractorImpl, FilterKeyExtractorManager, StaticFilterKeyExtractorManager, +use crate::compaction_catalog_manager::{ + CompactionCatalogAgentRef, CompactionCatalogManager, CompactionCatalogManagerRef, }; use crate::hummock::compactor::compaction_utils::calculate_task_parallelism; use crate::hummock::compactor::compactor_runner::{compact_and_build_sst, compact_done}; @@ -130,7 +130,7 @@ impl Compactor { &self, iter: impl HummockIterator, compaction_filter: impl CompactionFilter, - filter_key_extractor: Arc, + compaction_catalog_agent_ref: CompactionCatalogAgentRef, task_progress: Option>, task_id: Option, split_index: Option, @@ -155,7 +155,7 @@ impl Compactor { factory, iter, compaction_filter, - filter_key_extractor, + compaction_catalog_agent_ref, task_progress.clone(), self.object_id_getter.clone(), ) @@ -166,7 +166,7 @@ impl Compactor { factory, iter, compaction_filter, - filter_key_extractor, + compaction_catalog_agent_ref, task_progress.clone(), self.object_id_getter.clone(), ) @@ -230,7 +230,7 @@ impl Compactor { writer_factory: F, iter: impl HummockIterator, compaction_filter: impl CompactionFilter, - filter_key_extractor: Arc, + compaction_catalog_agent_ref: CompactionCatalogAgentRef, task_progress: Option>, object_id_getter: Box, ) -> HummockResult<(Vec, CompactionStatistics)> { @@ -240,7 +240,7 @@ impl Compactor { options: self.options.clone(), policy: self.task_config.cache_policy, remote_rpc_cost: self.get_id_time.clone(), - filter_key_extractor, + compaction_catalog_agent_ref: compaction_catalog_agent_ref.clone(), sstable_writer_factory: writer_factory, _phantom: PhantomData, }; @@ -253,6 +253,7 @@ impl Compactor { self.context .storage_opts .compactor_concurrent_uploading_sst_count, + compaction_catalog_agent_ref, ); let compaction_statistics = compact_and_build_sst( &mut sst_builder, @@ -280,7 +281,7 @@ pub fn start_compactor( compactor_context: CompactorContext, hummock_meta_client: Arc, sstable_object_id_manager: Arc, - filter_key_extractor_manager: FilterKeyExtractorManager, + compaction_catalog_manager_ref: CompactionCatalogManagerRef, ) -> (JoinHandle<()>, Sender<()>) { type CompactionShutdownMap = Arc>>>; let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel(); @@ -470,7 +471,7 @@ pub fn start_compactor( let meta_client = hummock_meta_client.clone(); let sstable_object_id_manager = sstable_object_id_manager.clone(); - let filter_key_extractor_manager = filter_key_extractor_manager.clone(); + let compaction_catalog_manager_ref = compaction_catalog_manager_ref.clone(); match event { ResponseEvent::CompactTask(compact_task) => { @@ -515,14 +516,16 @@ pub fn start_compactor( let (tx, rx) = tokio::sync::oneshot::channel(); let task_id = compact_task.task_id; shutdown.lock().unwrap().insert(task_id, tx); - let ((compact_task, table_stats, object_timestamps), _memory_tracker) = - compactor_runner::compact( - context.clone(), - compact_task, - rx, - Box::new(sstable_object_id_manager.clone()), - filter_key_extractor_manager.clone(), - ).await; + + let ((compact_task, table_stats, object_timestamps), _memory_tracker)= compactor_runner::compact( + context.clone(), + compact_task, + rx, + Box::new(sstable_object_id_manager.clone()), + compaction_catalog_manager_ref.clone(), + ) + .await; + shutdown.lock().unwrap().remove(&task_id); running_task_parallelism.fetch_sub(parallelism as u32, Ordering::SeqCst); @@ -703,16 +706,10 @@ pub fn start_shared_compactor( output_object_ids, task: dispatch_task, } = request.into_inner(); - let id_to_tables = tables.into_iter().fold(HashMap::new(), |mut acc, table| { + let table_id_to_catalog = tables.into_iter().fold(HashMap::new(), |mut acc, table| { acc.insert(table.id, table); acc }); - let static_filter_key_extractor_manager: Arc = - Arc::new(StaticFilterKeyExtractorManager::new(id_to_tables)); - let filter_key_extractor_manager = - FilterKeyExtractorManager::StaticFilterKeyExtractorManager( - static_filter_key_extractor_manager, - ); let mut output_object_ids_deque: VecDeque<_> = VecDeque::new(); output_object_ids_deque.extend(output_object_ids); @@ -725,12 +722,13 @@ pub fn start_shared_compactor( let task_id = compact_task.task_id; shutdown.lock().unwrap().insert(task_id, tx); - let ((compact_task, table_stats, object_timestamps), _memory_tracker)= compactor_runner::compact( + let compaction_catalog_agent_ref = CompactionCatalogManager::build_compaction_catalog_agent(table_id_to_catalog); + let ((compact_task, table_stats, object_timestamps), _memory_tracker)= compactor_runner::compact_with_agent( context.clone(), compact_task, rx, Box::new(shared_compactor_object_id_manager), - filter_key_extractor_manager.clone(), + compaction_catalog_agent_ref, ) .await; shutdown.lock().unwrap().remove(&task_id); diff --git a/src/storage/src/hummock/compactor/shared_buffer_compact.rs b/src/storage/src/hummock/compactor/shared_buffer_compact.rs index 6ca42e41e3d92..b7ae13b2c1067 100644 --- a/src/storage/src/hummock/compactor/shared_buffer_compact.rs +++ b/src/storage/src/hummock/compactor/shared_buffer_compact.rs @@ -32,7 +32,7 @@ use risingwave_pb::hummock::compact_task; use thiserror_ext::AsReport; use tracing::{error, warn}; -use crate::filter_key_extractor::{FilterKeyExtractorImpl, FilterKeyExtractorManager}; +use crate::compaction_catalog_manager::{CompactionCatalogAgentRef, CompactionCatalogManagerRef}; use crate::hummock::compactor::compaction_filter::DummyCompactionFilter; use crate::hummock::compactor::context::{await_tree_key, CompactorContext}; use crate::hummock::compactor::{check_flush_result, CompactOutput, Compactor}; @@ -57,14 +57,14 @@ pub async fn compact( context: CompactorContext, sstable_object_id_manager: SstableObjectIdManagerRef, payload: Vec, - filter_key_extractor_manager: FilterKeyExtractorManager, + compaction_catalog_manager_ref: CompactionCatalogManagerRef, ) -> HummockResult { let new_value_payload = payload.clone(); let new_value_future = async { compact_shared_buffer::( context.clone(), sstable_object_id_manager.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_manager_ref.clone(), new_value_payload, ) .map_ok(move |results| results.into_iter()) @@ -84,7 +84,7 @@ pub async fn compact( compact_shared_buffer::( context.clone(), sstable_object_id_manager.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_manager_ref.clone(), old_value_payload, ) .await @@ -109,29 +109,26 @@ pub async fn compact( async fn compact_shared_buffer( context: CompactorContext, sstable_object_id_manager: SstableObjectIdManagerRef, - filter_key_extractor_manager: FilterKeyExtractorManager, + compaction_catalog_manager_ref: CompactionCatalogManagerRef, mut payload: Vec, ) -> HummockResult> { if !IS_NEW_VALUE { assert!(payload.iter().all(|imm| imm.has_old_value())); } // Local memory compaction looks at all key ranges. - - let mut existing_table_ids: HashSet = payload + let existing_table_ids: HashSet = payload .iter() .map(|imm| imm.table_id.table_id) .dedup() .collect(); assert!(!existing_table_ids.is_empty()); - let multi_filter_key_extractor = filter_key_extractor_manager - .acquire(existing_table_ids.clone()) + let compaction_catalog_agent_ref = compaction_catalog_manager_ref + .acquire(existing_table_ids.iter().copied().collect()) .await?; - if let FilterKeyExtractorImpl::Multi(multi) = &multi_filter_key_extractor { - existing_table_ids = multi.get_existing_table_ids(); - } - let multi_filter_key_extractor = Arc::new(multi_filter_key_extractor); - + let existing_table_ids = compaction_catalog_agent_ref + .table_ids() + .collect::>(); payload.retain(|imm| { let ret = existing_table_ids.contains(&imm.table_id.table_id); if !ret { @@ -167,7 +164,7 @@ async fn compact_shared_buffer( forward_iters.push(imm.clone().into_directed_iter::()); } let compaction_executor = context.compaction_executor.clone(); - let multi_filter_key_extractor = multi_filter_key_extractor.clone(); + let compaction_catalog_agent_ref = compaction_catalog_agent_ref.clone(); let handle = compaction_executor.spawn({ static NEXT_SHARED_BUFFER_COMPACT_ID: LazyLock = LazyLock::new(|| AtomicUsize::new(0)); @@ -187,7 +184,7 @@ async fn compact_shared_buffer( }); let future = compactor.run( MergeIterator::new(forward_iters), - multi_filter_key_extractor, + compaction_catalog_agent_ref, ); if let Some(root) = tree_root { root.instrument(future).left_future() @@ -550,7 +547,7 @@ impl SharedBufferCompactRunner { pub async fn run( self, iter: impl HummockIterator, - filter_key_extractor: Arc, + compaction_catalog_agent_ref: CompactionCatalogAgentRef, ) -> HummockResult { let dummy_compaction_filter = DummyCompactionFilter {}; let (ssts, table_stats_map) = self @@ -558,7 +555,7 @@ impl SharedBufferCompactRunner { .compact_key_range( iter, dummy_compaction_filter, - filter_key_extractor, + compaction_catalog_agent_ref, None, None, None, diff --git a/src/storage/src/hummock/event_handler/hummock_event_handler.rs b/src/storage/src/hummock/event_handler/hummock_event_handler.rs index 7a33ed81b4373..908bb45a43fc4 100644 --- a/src/storage/src/hummock/event_handler/hummock_event_handler.rs +++ b/src/storage/src/hummock/event_handler/hummock_event_handler.rs @@ -37,7 +37,7 @@ use tracing::{debug, error, info, trace, warn}; use super::refiller::{CacheRefillConfig, CacheRefiller}; use super::{LocalInstanceGuard, LocalInstanceId, ReadVersionMappingType}; -use crate::filter_key_extractor::FilterKeyExtractorManager; +use crate::compaction_catalog_manager::CompactionCatalogManagerRef; use crate::hummock::compactor::{await_tree_key, compact, CompactorContext}; use crate::hummock::event_handler::refiller::{CacheRefillerEvent, SpawnRefillTask}; use crate::hummock::event_handler::uploader::{ @@ -207,14 +207,14 @@ pub struct HummockEventHandler { async fn flush_imms( payload: Vec, compactor_context: CompactorContext, - filter_key_extractor_manager: FilterKeyExtractorManager, + compaction_catalog_manager_ref: CompactionCatalogManagerRef, sstable_object_id_manager: Arc, ) -> HummockResult { compact( compactor_context, sstable_object_id_manager, payload, - filter_key_extractor_manager, + compaction_catalog_manager_ref, ) .verbose_instrument_await("shared_buffer_compact") .await @@ -225,7 +225,7 @@ impl HummockEventHandler { version_update_rx: UnboundedReceiver, pinned_version: PinnedVersion, compactor_context: CompactorContext, - filter_key_extractor_manager: FilterKeyExtractorManager, + compaction_catalog_manager_ref: CompactionCatalogManagerRef, sstable_object_id_manager: Arc, state_store_metrics: Arc, ) -> Self { @@ -251,7 +251,7 @@ impl HummockEventHandler { let upload_task_latency = upload_task_latency.clone(); let wait_poll_latency = wait_poll_latency.clone(); let upload_compactor_context = upload_compactor_context.clone(); - let filter_key_extractor_manager = filter_key_extractor_manager.clone(); + let compaction_catalog_manager_ref = compaction_catalog_manager_ref.clone(); let sstable_object_id_manager = sstable_object_id_manager.clone(); spawn({ let future = async move { @@ -262,7 +262,7 @@ impl HummockEventHandler { .flat_map(|imms| imms.into_iter()) .collect(), upload_compactor_context.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_manager_ref.clone(), sstable_object_id_manager.clone(), ) .await?; diff --git a/src/storage/src/hummock/event_handler/uploader/mod.rs b/src/storage/src/hummock/event_handler/uploader/mod.rs index 6a20f96008db6..8326b3b876890 100644 --- a/src/storage/src/hummock/event_handler/uploader/mod.rs +++ b/src/storage/src/hummock/event_handler/uploader/mod.rs @@ -655,6 +655,7 @@ impl TableUnsyncData { self.unsync_epochs.insert(epoch, ()); } + #[expect(clippy::type_complexity)] fn sync( &mut self, epoch: HummockEpoch, diff --git a/src/storage/src/hummock/event_handler/uploader/test_utils.rs b/src/storage/src/hummock/event_handler/uploader/test_utils.rs index 2f711ede7cefc..6eb41bda52071 100644 --- a/src/storage/src/hummock/event_handler/uploader/test_utils.rs +++ b/src/storage/src/hummock/event_handler/uploader/test_utils.rs @@ -271,6 +271,7 @@ impl HummockUploader { } } +#[expect(clippy::type_complexity)] pub(crate) fn prepare_uploader_order_test( config: &StorageOpts, skip_schedule: bool, diff --git a/src/storage/src/hummock/iterator/merge_inner.rs b/src/storage/src/hummock/iterator/merge_inner.rs index 54221ee8b70a9..68fbc7876f1e9 100644 --- a/src/storage/src/hummock/iterator/merge_inner.rs +++ b/src/storage/src/hummock/iterator/merge_inner.rs @@ -172,7 +172,7 @@ impl<'a, T: Ord> PeekMutGuard<'a, T> { } } -impl<'a, T: Ord> Deref for PeekMutGuard<'a, T> { +impl Deref for PeekMutGuard<'_, T> { type Target = T; fn deref(&self) -> &Self::Target { @@ -180,13 +180,13 @@ impl<'a, T: Ord> Deref for PeekMutGuard<'a, T> { } } -impl<'a, T: Ord> DerefMut for PeekMutGuard<'a, T> { +impl DerefMut for PeekMutGuard<'_, T> { fn deref_mut(&mut self) -> &mut Self::Target { self.peek.as_mut().expect("should not be None") } } -impl<'a, T: Ord> Drop for PeekMutGuard<'a, T> { +impl Drop for PeekMutGuard<'_, T> { /// When the guard is dropped, if `pop` or `used` is not called before it is dropped, we will /// call `PeekMut::pop` on the `PeekMut` and recycle the node to the unused list. fn drop(&mut self) { diff --git a/src/storage/src/hummock/iterator/mod.rs b/src/storage/src/hummock/iterator/mod.rs index a205baac0aa96..e0a1d433892d1 100644 --- a/src/storage/src/hummock/iterator/mod.rs +++ b/src/storage/src/hummock/iterator/mod.rs @@ -129,7 +129,7 @@ pub trait HummockIterator: Send { fn seek<'a>( &'a mut self, key: FullKey<&'a [u8]>, - ) -> impl Future> + Send + '_; + ) -> impl Future> + Send; /// take local statistic info from iterator to report metrics. fn collect_local_statistic(&self, _stats: &mut StoreLocalStatistic); diff --git a/src/storage/src/hummock/observer_manager.rs b/src/storage/src/hummock/observer_manager.rs index a9171005aeaa9..7ba86a6f08977 100644 --- a/src/storage/src/hummock/observer_manager.rs +++ b/src/storage/src/hummock/observer_manager.rs @@ -12,9 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::HashMap; -use std::sync::Arc; - use risingwave_common_service::ObserverState; use risingwave_hummock_sdk::version::{HummockVersion, HummockVersionDelta}; use risingwave_hummock_trace::TraceSpan; @@ -24,13 +21,13 @@ use risingwave_pb::meta::subscribe_response::{Info, Operation}; use risingwave_pb::meta::SubscribeResponse; use tokio::sync::mpsc::UnboundedSender; -use crate::filter_key_extractor::{FilterKeyExtractorImpl, FilterKeyExtractorManagerRef}; +use crate::compaction_catalog_manager::CompactionCatalogManagerRef; use crate::hummock::backup_reader::BackupReaderRef; use crate::hummock::event_handler::HummockVersionUpdate; use crate::hummock::write_limiter::WriteLimiterRef; pub struct HummockObserverNode { - filter_key_extractor_manager: FilterKeyExtractorManagerRef, + compaction_catalog_manager: CompactionCatalogManagerRef, backup_reader: BackupReaderRef, write_limiter: WriteLimiterRef, version_update_sender: UnboundedSender, @@ -140,13 +137,13 @@ impl ObserverState for HummockObserverNode { impl HummockObserverNode { pub fn new( - filter_key_extractor_manager: FilterKeyExtractorManagerRef, + compaction_catalog_manager: CompactionCatalogManagerRef, backup_reader: BackupReaderRef, version_update_sender: UnboundedSender, write_limiter: WriteLimiterRef, ) -> Self { Self { - filter_key_extractor_manager, + compaction_catalog_manager, backup_reader, version_update_sender, version: 0, @@ -155,25 +152,19 @@ impl HummockObserverNode { } fn handle_catalog_snapshot(&mut self, tables: Vec
) { - let all_filter_key_extractors: HashMap> = tables - .iter() - .map(|t| (t.id, Arc::new(FilterKeyExtractorImpl::from_table(t)))) - .collect(); - self.filter_key_extractor_manager - .sync(all_filter_key_extractors); + self.compaction_catalog_manager + .sync(tables.into_iter().map(|t| (t.id, t)).collect()); } fn handle_catalog_notification(&mut self, operation: Operation, table_catalog: Table) { match operation { Operation::Add | Operation::Update => { - self.filter_key_extractor_manager.update( - table_catalog.id, - Arc::new(FilterKeyExtractorImpl::from_table(&table_catalog)), - ); + self.compaction_catalog_manager + .update(table_catalog.id, table_catalog); } Operation::Delete => { - self.filter_key_extractor_manager.remove(table_catalog.id); + self.compaction_catalog_manager.remove(table_catalog.id); } _ => panic!("receive an unsupported notify {:?}", operation), diff --git a/src/storage/src/hummock/sstable/builder.rs b/src/storage/src/hummock/sstable/builder.rs index 24e7e14e02e0f..0ad46e5e000a2 100644 --- a/src/storage/src/hummock/sstable/builder.rs +++ b/src/storage/src/hummock/sstable/builder.rs @@ -12,11 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::BTreeSet; +use std::collections::{BTreeSet, HashMap}; use std::sync::Arc; use std::time::SystemTime; use bytes::{Bytes, BytesMut}; +use risingwave_hummock_sdk::compaction_group::StateTableId; use risingwave_hummock_sdk::key::{user_key, FullKey, MAX_KEY_LEN}; use risingwave_hummock_sdk::key_range::KeyRange; use risingwave_hummock_sdk::sstable_info::SstableInfo; @@ -29,7 +30,10 @@ use super::{ BlockBuilder, BlockBuilderOptions, BlockMeta, SstableMeta, SstableWriter, DEFAULT_BLOCK_SIZE, DEFAULT_ENTRY_SIZE, DEFAULT_RESTART_INTERVAL, VERSION, }; -use crate::filter_key_extractor::{FilterKeyExtractorImpl, FullKeyFilterKeyExtractor}; +use crate::compaction_catalog_manager::{ + CompactionCatalogAgent, CompactionCatalogAgentRef, FilterKeyExtractorImpl, + FullKeyFilterKeyExtractor, +}; use crate::hummock::sstable::{utils, FilterBuilder}; use crate::hummock::value::HummockValue; use crate::hummock::{ @@ -98,7 +102,8 @@ pub struct SstableBuilder { writer: W, /// Current block builder. block_builder: BlockBuilder, - filter_key_extractor: Arc, + + compaction_catalog_agent_ref: CompactionCatalogAgentRef, /// Block metadata vec. block_metas: Vec, @@ -126,13 +131,23 @@ pub struct SstableBuilder { } impl SstableBuilder { - pub fn for_test(sstable_id: u64, writer: W, options: SstableBuilderOptions) -> Self { + pub fn for_test( + sstable_id: u64, + writer: W, + options: SstableBuilderOptions, + table_id_to_vnode: HashMap, + ) -> Self { + let compaction_catalog_agent_ref = Arc::new(CompactionCatalogAgent::new( + FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor), + table_id_to_vnode, + )); + Self::new( sstable_id, writer, Xor16FilterBuilder::new(options.capacity / DEFAULT_ENTRY_SIZE + 1), options, - Arc::new(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)), + compaction_catalog_agent_ref, None, ) } @@ -144,7 +159,7 @@ impl SstableBuilder { writer: W, filter_builder: F, options: SstableBuilderOptions, - filter_key_extractor: Arc, + compaction_catalog_agent_ref: CompactionCatalogAgentRef, memory_limiter: Option>, ) -> Self { Self { @@ -163,7 +178,7 @@ impl SstableBuilder { raw_value: BytesMut::new(), last_full_key: vec![], sstable_id, - filter_key_extractor, + compaction_catalog_agent_ref, table_stats: Default::default(), last_table_stats: Default::default(), epoch_set: BTreeSet::default(), @@ -340,7 +355,7 @@ impl SstableBuilder { let table_id = full_key.user_key.table_id.table_id(); let mut extract_key = user_key(&self.raw_key); - extract_key = self.filter_key_extractor.extract(extract_key); + extract_key = self.compaction_catalog_agent_ref.extract(extract_key); // add bloom_filter check if !extract_key.is_empty() { self.filter_builder.add_key(extract_key, table_id); @@ -689,7 +704,7 @@ impl SstableBuilderOutputStats { #[cfg(test)] pub(super) mod tests { - use std::collections::Bound; + use std::collections::{Bound, HashMap}; use risingwave_common::catalog::TableId; use risingwave_common::hash::VirtualNode; @@ -698,7 +713,9 @@ pub(super) mod tests { use super::*; use crate::assert_bytes_eq; - use crate::filter_key_extractor::{DummyFilterKeyExtractor, MultiFilterKeyExtractor}; + use crate::compaction_catalog_manager::{ + CompactionCatalogAgent, DummyFilterKeyExtractor, MultiFilterKeyExtractor, + }; use crate::hummock::iterator::test_utils::mock_sstable_store; use crate::hummock::sstable::xor_filter::BlockedXor16FilterBuilder; use crate::hummock::test_utils::{ @@ -718,7 +735,8 @@ pub(super) mod tests { ..Default::default() }; - let b = SstableBuilder::for_test(0, mock_sst_writer(&opt), opt); + let table_id_to_vnode = HashMap::from_iter(vec![(0, VirtualNode::COUNT_FOR_TEST)]); + let b = SstableBuilder::for_test(0, mock_sst_writer(&opt), opt, table_id_to_vnode); b.finish().await.unwrap(); } @@ -726,7 +744,9 @@ pub(super) mod tests { #[tokio::test] async fn test_basic() { let opt = default_builder_opt_for_test(); - let mut b = SstableBuilder::for_test(0, mock_sst_writer(&opt), opt); + + let table_id_to_vnode = HashMap::from_iter(vec![(0, VirtualNode::COUNT_FOR_TEST)]); + let mut b = SstableBuilder::for_test(0, mock_sst_writer(&opt), opt, table_id_to_vnode); for i in 0..TEST_KEYS_COUNT { b.add_for_test( @@ -815,24 +835,30 @@ pub(super) mod tests { .clone() .create_sst_writer(object_id, writer_opts); let mut filter = MultiFilterKeyExtractor::default(); - filter.register( - 1, - Arc::new(FilterKeyExtractorImpl::Dummy(DummyFilterKeyExtractor)), - ); + filter.register(1, FilterKeyExtractorImpl::Dummy(DummyFilterKeyExtractor)); filter.register( 2, - Arc::new(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)), - ); - filter.register( - 3, - Arc::new(FilterKeyExtractorImpl::Dummy(DummyFilterKeyExtractor)), + FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor), ); + filter.register(3, FilterKeyExtractorImpl::Dummy(DummyFilterKeyExtractor)); + + let table_id_to_vnode = HashMap::from_iter(vec![ + (1, VirtualNode::COUNT_FOR_TEST), + (2, VirtualNode::COUNT_FOR_TEST), + (3, VirtualNode::COUNT_FOR_TEST), + ]); + + let compaction_catalog_agent_ref = Arc::new(CompactionCatalogAgent::new( + FilterKeyExtractorImpl::Multi(filter), + table_id_to_vnode, + )); + let mut builder = SstableBuilder::new( object_id, writer, BlockedXor16FilterBuilder::new(1024), opts, - Arc::new(FilterKeyExtractorImpl::Multi(filter)), + compaction_catalog_agent_ref, None, ); diff --git a/src/storage/src/hummock/sstable/multi_builder.rs b/src/storage/src/hummock/sstable/multi_builder.rs index 45870257bc04e..f03bcf057c81f 100644 --- a/src/storage/src/hummock/sstable/multi_builder.rs +++ b/src/storage/src/hummock/sstable/multi_builder.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering::SeqCst; use std::sync::Arc; @@ -21,11 +21,13 @@ use bytes::Bytes; use futures::stream::FuturesUnordered; use futures::StreamExt; use num_integer::Integer; +use risingwave_common::catalog::TableId; use risingwave_common::hash::VirtualNode; use risingwave_hummock_sdk::key::{FullKey, UserKey}; use risingwave_hummock_sdk::LocalSstableInfo; use tokio::task::JoinHandle; +use crate::compaction_catalog_manager::CompactionCatalogAgentRef; use crate::hummock::compactor::task_progress::TaskProgress; use crate::hummock::sstable::filter::FilterBuilder; use crate::hummock::sstable_store::SstableStoreRef; @@ -78,6 +80,8 @@ where concurrent_upload_join_handle: FuturesUnordered, concurrent_uploading_sst_count: Option, + + compaction_catalog_agent_ref: CompactionCatalogAgentRef, } impl CapacitySplitTableBuilder @@ -92,6 +96,7 @@ where task_progress: Option>, table_vnode_partition: BTreeMap, concurrent_uploading_sst_count: Option, + compaction_catalog_agent_ref: CompactionCatalogAgentRef, ) -> Self { // TODO(var-vnode): should use value from caller let vnode_count = VirtualNode::COUNT_FOR_COMPAT; @@ -109,10 +114,14 @@ where largest_vnode_in_current_partition: vnode_count - 1, concurrent_upload_join_handle: FuturesUnordered::new(), concurrent_uploading_sst_count, + compaction_catalog_agent_ref, } } - pub fn for_test(builder_factory: F) -> Self { + pub fn for_test( + builder_factory: F, + compaction_catalog_agent_ref: CompactionCatalogAgentRef, + ) -> Self { Self { builder_factory, sst_outputs: Vec::new(), @@ -126,6 +135,7 @@ where largest_vnode_in_current_partition: VirtualNode::MAX_FOR_TEST.to_index(), concurrent_upload_join_handle: FuturesUnordered::new(), concurrent_uploading_sst_count: None, + compaction_catalog_agent_ref, } } @@ -222,6 +232,11 @@ where let new_vnode_partition_count = self.table_vnode_partition.get(&user_key.table_id.table_id); + self.vnode_count = self + .compaction_catalog_agent_ref + .vnode_count(user_key.table_id.table_id); + self.largest_vnode_in_current_partition = self.vnode_count - 1; + if new_vnode_partition_count.is_some() || self.table_vnode_partition.contains_key(&self.last_table_id) { @@ -366,7 +381,11 @@ impl TableBuilderFactory for LocalTableBuilderFactory { .sstable_store .clone() .create_sst_writer(id, writer_options); - let builder = SstableBuilder::for_test(id, writer, self.options.clone()); + let table_id_to_vnode = HashMap::from_iter(vec![( + TableId::default().table_id(), + VirtualNode::COUNT_FOR_TEST, + )]); + let builder = SstableBuilder::for_test(id, writer, self.options.clone(), table_id_to_vnode); Ok(builder) } @@ -378,6 +397,9 @@ mod tests { use risingwave_common::util::epoch::{test_epoch, EpochExt}; use super::*; + use crate::compaction_catalog_manager::{ + CompactionCatalogAgent, FilterKeyExtractorImpl, FullKeyFilterKeyExtractor, + }; use crate::hummock::iterator::test_utils::mock_sstable_store; use crate::hummock::test_utils::{default_builder_opt_for_test, test_key_of, test_user_key_of}; use crate::hummock::DEFAULT_RESTART_INTERVAL; @@ -394,7 +416,9 @@ mod tests { ..Default::default() }; let builder_factory = LocalTableBuilderFactory::new(1001, mock_sstable_store().await, opts); - let builder = CapacitySplitTableBuilder::for_test(builder_factory); + let compaction_catalog_agent_ref = Arc::new(CompactionCatalogAgent::dummy()); + let builder = + CapacitySplitTableBuilder::for_test(builder_factory, compaction_catalog_agent_ref); let results = builder.finish().await.unwrap(); assert!(results.is_empty()); } @@ -410,8 +434,11 @@ mod tests { bloom_false_positive: 0.1, ..Default::default() }; + let compaction_catalog_agent_ref = CompactionCatalogAgent::for_test(vec![0]); + let builder_factory = LocalTableBuilderFactory::new(1001, mock_sstable_store().await, opts); - let mut builder = CapacitySplitTableBuilder::for_test(builder_factory); + let mut builder = + CapacitySplitTableBuilder::for_test(builder_factory, compaction_catalog_agent_ref); for i in 0..table_capacity { builder @@ -434,11 +461,11 @@ mod tests { #[tokio::test] async fn test_table_seal() { let opts = default_builder_opt_for_test(); - let mut builder = CapacitySplitTableBuilder::for_test(LocalTableBuilderFactory::new( - 1001, - mock_sstable_store().await, - opts, - )); + let compaction_catalog_agent_ref = CompactionCatalogAgent::for_test(vec![0]); + let mut builder = CapacitySplitTableBuilder::for_test( + LocalTableBuilderFactory::new(1001, mock_sstable_store().await, opts), + compaction_catalog_agent_ref, + ); let mut epoch = test_epoch(100); macro_rules! add { @@ -478,11 +505,11 @@ mod tests { #[tokio::test] async fn test_initial_not_allowed_split() { let opts = default_builder_opt_for_test(); - let mut builder = CapacitySplitTableBuilder::for_test(LocalTableBuilderFactory::new( - 1001, - mock_sstable_store().await, - opts, - )); + let compaction_catalog_agent_ref = CompactionCatalogAgent::for_test(vec![0]); + let mut builder = CapacitySplitTableBuilder::for_test( + LocalTableBuilderFactory::new(1001, mock_sstable_store().await, opts), + compaction_catalog_agent_ref, + ); builder .add_full_key_for_test(test_key_of(0).to_ref(), HummockValue::put(b"v"), false) .await @@ -501,25 +528,28 @@ mod tests { ..Default::default() }; - let table_partition_vnode = - BTreeMap::from([(1_u32, 4_u32), (2_u32, 4_u32), (3_u32, 4_u32)]); - - let mut builder = CapacitySplitTableBuilder::new( - LocalTableBuilderFactory::new(1001, mock_sstable_store().await, opts), - Arc::new(CompactorMetrics::unused()), - None, - table_partition_vnode, - None, - ); - - let mut table_key = VirtualNode::from_index(0).to_be_bytes().to_vec(); - table_key.extend_from_slice("a".as_bytes()); + { + let table_partition_vnode = + BTreeMap::from([(1_u32, 4_u32), (2_u32, 4_u32), (3_u32, 4_u32)]); + + let compaction_catalog_agent_ref = + CompactionCatalogAgent::for_test(vec![0, 1, 2, 3, 4, 5]); + let mut builder = CapacitySplitTableBuilder::new( + LocalTableBuilderFactory::new(1001, mock_sstable_store().await, opts.clone()), + Arc::new(CompactorMetrics::unused()), + None, + table_partition_vnode, + None, + compaction_catalog_agent_ref, + ); + + let mut table_key = VirtualNode::from_index(0).to_be_bytes().to_vec(); + table_key.extend_from_slice("a".as_bytes()); - let switch_builder = - builder.check_switch_builder(&UserKey::for_test(TableId::from(1), &table_key)); - assert!(switch_builder); + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(1), &table_key)); + assert!(switch_builder); - { let mut table_key = VirtualNode::from_index(62).to_be_bytes().to_vec(); table_key.extend_from_slice("a".as_bytes()); let switch_builder = @@ -537,19 +567,119 @@ mod tests { let switch_builder = builder.check_switch_builder(&UserKey::for_test(TableId::from(1), &table_key)); assert!(switch_builder); + + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(2), &table_key)); + assert!(switch_builder); + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(3), &table_key)); + assert!(switch_builder); + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(4), &table_key)); + assert!(switch_builder); + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(5), &table_key)); + assert!(!switch_builder); } - let switch_builder = - builder.check_switch_builder(&UserKey::for_test(TableId::from(2), &table_key)); - assert!(switch_builder); - let switch_builder = - builder.check_switch_builder(&UserKey::for_test(TableId::from(3), &table_key)); - assert!(switch_builder); - let switch_builder = - builder.check_switch_builder(&UserKey::for_test(TableId::from(4), &table_key)); - assert!(switch_builder); - let switch_builder = - builder.check_switch_builder(&UserKey::for_test(TableId::from(5), &table_key)); - assert!(!switch_builder); + { + // Test different table vnode count + let table_partition_vnode = + BTreeMap::from([(1_u32, 4_u32), (2_u32, 4_u32), (3_u32, 4_u32)]); + + let table_id_to_vnode = HashMap::from_iter(vec![(1, 64), (2, 128), (3, 256)]); + let compaction_catalog_agent_ref = Arc::new(CompactionCatalogAgent::new( + FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor), + table_id_to_vnode, + )); + + let mut builder = CapacitySplitTableBuilder::new( + LocalTableBuilderFactory::new(1001, mock_sstable_store().await, opts), + Arc::new(CompactorMetrics::unused()), + None, + table_partition_vnode, + None, + compaction_catalog_agent_ref, + ); + + let mut table_key = VirtualNode::from_index(0).to_be_bytes().to_vec(); + table_key.extend_from_slice("a".as_bytes()); + + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(1), &table_key)); + assert!(switch_builder); + + let mut table_key = VirtualNode::from_index(15).to_be_bytes().to_vec(); + table_key.extend_from_slice("a".as_bytes()); + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(1), &table_key)); + assert!(!switch_builder); + + let mut table_key = VirtualNode::from_index(16).to_be_bytes().to_vec(); + table_key.extend_from_slice("a".as_bytes()); + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(1), &table_key)); + assert!(switch_builder); + + let mut table_key = VirtualNode::from_index(0).to_be_bytes().to_vec(); + table_key.extend_from_slice("a".as_bytes()); + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(2), &table_key)); + assert!(switch_builder); + + let mut table_key = VirtualNode::from_index(16).to_be_bytes().to_vec(); + table_key.extend_from_slice("a".as_bytes()); + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(2), &table_key)); + assert!(!switch_builder); + + let mut table_key = VirtualNode::from_index(31).to_be_bytes().to_vec(); + table_key.extend_from_slice("a".as_bytes()); + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(2), &table_key)); + assert!(!switch_builder); + + let mut table_key = VirtualNode::from_index(32).to_be_bytes().to_vec(); + table_key.extend_from_slice("a".as_bytes()); + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(2), &table_key)); + assert!(switch_builder); + + let mut table_key = VirtualNode::from_index(64).to_be_bytes().to_vec(); + table_key.extend_from_slice("a".as_bytes()); + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(2), &table_key)); + assert!(switch_builder); + + let mut table_key = VirtualNode::from_index(0).to_be_bytes().to_vec(); + table_key.extend_from_slice("a".as_bytes()); + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(3), &table_key)); + assert!(switch_builder); + + let mut table_key = VirtualNode::from_index(16).to_be_bytes().to_vec(); + table_key.extend_from_slice("a".as_bytes()); + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(3), &table_key)); + assert!(!switch_builder); + + let mut table_key = VirtualNode::from_index(32).to_be_bytes().to_vec(); + table_key.extend_from_slice("a".as_bytes()); + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(3), &table_key)); + assert!(!switch_builder); + + let mut table_key = VirtualNode::from_index(63).to_be_bytes().to_vec(); + table_key.extend_from_slice("a".as_bytes()); + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(3), &table_key)); + assert!(!switch_builder); + + let mut table_key = VirtualNode::from_index(64).to_be_bytes().to_vec(); + table_key.extend_from_slice("a".as_bytes()); + let switch_builder = + builder.check_switch_builder(&UserKey::for_test(TableId::from(3), &table_key)); + assert!(switch_builder); + } } } diff --git a/src/storage/src/hummock/sstable/utils.rs b/src/storage/src/hummock/sstable/utils.rs index 4f4693d5e60b8..e9211ed0e32c9 100644 --- a/src/storage/src/hummock/sstable/utils.rs +++ b/src/storage/src/hummock/sstable/utils.rs @@ -60,7 +60,6 @@ pub fn xxhash64_checksum(data: &[u8]) -> u64 { } /// Verifies the checksum of the data equals the given checksum with xxhash64. - pub fn xxhash64_verify(data: &[u8], checksum: u64) -> HummockResult<()> { let data_checksum = xxhash64_checksum(data); if data_checksum != checksum { diff --git a/src/storage/src/hummock/sstable/xor_filter.rs b/src/storage/src/hummock/sstable/xor_filter.rs index 1df4333fca459..8120184b17247 100644 --- a/src/storage/src/hummock/sstable/xor_filter.rs +++ b/src/storage/src/hummock/sstable/xor_filter.rs @@ -442,13 +442,18 @@ impl Clone for XorFilterReader { #[cfg(test)] mod tests { + use std::collections::HashMap; + use foyer::CacheContext; use rand::RngCore; + use risingwave_common::hash::VirtualNode; use risingwave_common::util::epoch::test_epoch; use risingwave_hummock_sdk::EpochWithGap; use super::*; - use crate::filter_key_extractor::{FilterKeyExtractorImpl, FullKeyFilterKeyExtractor}; + use crate::compaction_catalog_manager::{ + CompactionCatalogAgent, FilterKeyExtractorImpl, FullKeyFilterKeyExtractor, + }; use crate::hummock::iterator::test_utils::mock_sstable_store; use crate::hummock::sstable::{SstableBuilder, SstableBuilderOptions}; use crate::hummock::test_utils::{test_user_key_of, test_value_of, TEST_KEYS_COUNT}; @@ -475,12 +480,19 @@ mod tests { let writer = sstable_store .clone() .create_sst_writer(object_id, writer_opts); + + let table_id_to_vnode = HashMap::from_iter(vec![(0, VirtualNode::COUNT_FOR_TEST)]); + let compaction_catalog_agent_ref = Arc::new(CompactionCatalogAgent::new( + FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor), + table_id_to_vnode, + )); + let mut builder = SstableBuilder::new( object_id, writer, BlockedXor16FilterBuilder::create(0.01, 2048), opts, - Arc::new(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)), + compaction_catalog_agent_ref, None, ); let mut rng = rand::thread_rng(); diff --git a/src/storage/src/hummock/store/hummock_storage.rs b/src/storage/src/hummock/store/hummock_storage.rs index 665b064181687..073dd9e1dc1c4 100644 --- a/src/storage/src/hummock/store/hummock_storage.rs +++ b/src/storage/src/hummock/store/hummock_storage.rs @@ -38,8 +38,10 @@ use tokio::sync::oneshot; use super::local_hummock_storage::LocalHummockStorage; use super::version::{read_filter_for_version, CommittedVersion, HummockVersionReader}; +use crate::compaction_catalog_manager::{ + CompactionCatalogManager, CompactionCatalogManagerRef, FakeRemoteTableAccessor, +}; use crate::error::StorageResult; -use crate::filter_key_extractor::{FilterKeyExtractorManager, RpcFilterKeyExtractorManager}; use crate::hummock::backup_reader::{BackupReader, BackupReaderRef}; use crate::hummock::compactor::{ new_compaction_await_tree_reg_ref, CompactionAwaitTreeRegRef, CompactorContext, @@ -90,7 +92,7 @@ pub struct HummockStorage { context: CompactorContext, - filter_key_extractor_manager: FilterKeyExtractorManager, + compaction_catalog_manager_ref: CompactionCatalogManagerRef, sstable_object_id_manager: SstableObjectIdManagerRef, @@ -148,7 +150,7 @@ impl HummockStorage { sstable_store: SstableStoreRef, hummock_meta_client: Arc, notification_client: impl NotificationClient, - filter_key_extractor_manager: Arc, + compaction_catalog_manager_ref: CompactionCatalogManagerRef, state_store_metrics: Arc, compactor_metrics: Arc, await_tree_config: Option, @@ -170,7 +172,7 @@ impl HummockStorage { let observer_manager = ObserverManager::new( notification_client, HummockObserverNode::new( - filter_key_extractor_manager.clone(), + compaction_catalog_manager_ref.clone(), backup_reader.clone(), version_update_tx.clone(), write_limiter.clone(), @@ -191,9 +193,6 @@ impl HummockStorage { hummock_meta_client.clone(), options.max_version_pinning_duration_sec, )); - let filter_key_extractor_manager = FilterKeyExtractorManager::RpcFilterKeyExtractorManager( - filter_key_extractor_manager.clone(), - ); let await_tree_reg = await_tree_config.map(new_compaction_await_tree_reg_ref); @@ -208,7 +207,7 @@ impl HummockStorage { version_update_rx, pinned_version, compactor_context.clone(), - filter_key_extractor_manager.clone(), + compaction_catalog_manager_ref.clone(), sstable_object_id_manager.clone(), state_store_metrics.clone(), ); @@ -217,7 +216,7 @@ impl HummockStorage { let instance = Self { context: compactor_context, - filter_key_extractor_manager: filter_key_extractor_manager.clone(), + compaction_catalog_manager_ref: compaction_catalog_manager_ref.clone(), sstable_object_id_manager, buffer_tracker: hummock_event_handler.buffer_tracker().clone(), version_update_notifier_tx: hummock_event_handler.version_update_notifier_tx(), @@ -536,8 +535,8 @@ impl HummockStorage { &self.sstable_object_id_manager } - pub fn filter_key_extractor_manager(&self) -> &FilterKeyExtractorManager { - &self.filter_key_extractor_manager + pub fn compaction_catalog_manager_ref(&self) -> CompactionCatalogManagerRef { + self.compaction_catalog_manager_ref.clone() } pub fn get_memory_limiter(&self) -> Arc { @@ -763,12 +762,16 @@ impl HummockStorage { hummock_meta_client: Arc, notification_client: impl NotificationClient, ) -> HummockResult { + let compaction_catalog_manager = Arc::new(CompactionCatalogManager::new(Box::new( + FakeRemoteTableAccessor {}, + ))); + Self::new( options, sstable_store, hummock_meta_client, notification_client, - Arc::new(RpcFilterKeyExtractorManager::default()), + compaction_catalog_manager, Arc::new(HummockStateStoreMetrics::unused()), Arc::new(CompactorMetrics::unused()), None, diff --git a/src/storage/src/hummock/store/local_hummock_storage.rs b/src/storage/src/hummock/store/local_hummock_storage.rs index d0082f21b31f9..a2fcc5a2c786f 100644 --- a/src/storage/src/hummock/store/local_hummock_storage.rs +++ b/src/storage/src/hummock/store/local_hummock_storage.rs @@ -58,7 +58,6 @@ use crate::store::*; /// `LocalHummockStorage` is a handle for a state table shard to access data from and write data to /// the hummock state backend. It is created via `HummockStorage::new_local`. - pub struct LocalHummockStorage { mem_table: MemTable, @@ -781,7 +780,7 @@ impl<'a> HummockStorageIteratorInner<'a> { } } -impl<'a> Drop for HummockStorageIteratorInner<'a> { +impl Drop for HummockStorageIteratorInner<'_> { fn drop(&mut self) { self.inner .collect_local_statistic(&mut self.stats_guard.local_stats); @@ -863,7 +862,7 @@ impl<'a> HummockStorageRevIteratorInner<'a> { } } -impl<'a> Drop for HummockStorageRevIteratorInner<'a> { +impl Drop for HummockStorageRevIteratorInner<'_> { fn drop(&mut self) { self.inner .collect_local_statistic(&mut self.stats_guard.local_stats); diff --git a/src/storage/src/hummock/store/version.rs b/src/storage/src/hummock/store/version.rs index 33bc9fc49f753..d38424820c703 100644 --- a/src/storage/src/hummock/store/version.rs +++ b/src/storage/src/hummock/store/version.rs @@ -165,8 +165,8 @@ impl StagingVersion { table_id: TableId, table_key_range: &'a TableKeyRange, ) -> ( - impl Iterator + 'a, - impl Iterator + 'a, + impl Iterator + 'a, + impl Iterator + 'a, ) { let (ref left, ref right) = table_key_range; let left = left.as_ref().map(|key| TableKey(key.0.as_ref())); diff --git a/src/storage/src/hummock/test_utils.rs b/src/storage/src/hummock/test_utils.rs index ee3da3088dbd5..45d18027c1ebe 100644 --- a/src/storage/src/hummock/test_utils.rs +++ b/src/storage/src/hummock/test_utils.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::collections::HashMap; use std::sync::Arc; use bytes::Bytes; @@ -33,7 +34,9 @@ use super::iterator::test_utils::iterator_test_table_key_of; use super::{ HummockResult, InMemWriter, SstableMeta, SstableWriterOptions, DEFAULT_RESTART_INTERVAL, }; -use crate::filter_key_extractor::{FilterKeyExtractorImpl, FullKeyFilterKeyExtractor}; +use crate::compaction_catalog_manager::{ + CompactionCatalogAgent, FilterKeyExtractorImpl, FullKeyFilterKeyExtractor, +}; use crate::hummock::shared_buffer::shared_buffer_batch::{ SharedBufferBatch, SharedBufferItem, SharedBufferValue, }; @@ -153,7 +156,11 @@ pub async fn gen_test_sstable_data( opts: SstableBuilderOptions, kv_iter: impl Iterator>, HummockValue>)>, ) -> (Bytes, SstableMeta) { - let mut b = SstableBuilder::for_test(0, mock_sst_writer(&opts), opts); + let table_id_to_vnode = HashMap::from_iter(vec![( + TableId::default().table_id(), + VirtualNode::COUNT_FOR_TEST, + )]); + let mut b = SstableBuilder::for_test(0, mock_sst_writer(&opts), opts, table_id_to_vnode); for (key, value) in kv_iter { b.add_for_test(key.to_ref(), value.as_slice()) .await @@ -231,12 +238,22 @@ pub async fn gen_test_sstable_impl + Clone + Default + Eq, F: Fil let writer = sstable_store .clone() .create_sst_writer(object_id, writer_opts); + + let table_id_to_vnode = HashMap::from_iter(vec![( + TableId::default().table_id(), + VirtualNode::COUNT_FOR_TEST, + )]); + let compaction_catalog_agent_ref = Arc::new(CompactionCatalogAgent::new( + FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor), + table_id_to_vnode, + )); + let mut b = SstableBuilder::<_, F>::new( object_id, writer, F::create(opts.bloom_false_positive, opts.capacity / 16), opts, - Arc::new(FilterKeyExtractorImpl::FullKey(FullKeyFilterKeyExtractor)), + compaction_catalog_agent_ref, None, ); diff --git a/src/storage/src/lib.rs b/src/storage/src/lib.rs index 779062767c7ae..c23d17a8de307 100644 --- a/src/storage/src/lib.rs +++ b/src/storage/src/lib.rs @@ -27,7 +27,6 @@ #![feature(type_changing_struct_update)] #![test_runner(risingwave_test_runner::test_runner::run_failpont_tests)] #![feature(assert_matches)] -#![feature(is_sorted)] #![feature(btree_extract_if)] #![feature(exact_size_is_empty)] #![cfg_attr(coverage, feature(coverage_attribute))] @@ -52,7 +51,7 @@ pub mod opts; pub mod store_impl; pub mod table; -pub mod filter_key_extractor; +pub mod compaction_catalog_manager; pub mod mem_table; #[cfg(test)] #[cfg(feature = "failpoints")] diff --git a/src/storage/src/store_impl.rs b/src/storage/src/store_impl.rs index ae9f630f304cb..2008047378479 100644 --- a/src/storage/src/store_impl.rs +++ b/src/storage/src/store_impl.rs @@ -26,8 +26,8 @@ use risingwave_common_service::RpcNotificationClient; use risingwave_hummock_sdk::HummockSstableObjectId; use risingwave_object_store::object::build_remote_object_store; +use crate::compaction_catalog_manager::{CompactionCatalogManager, RemoteTableAccessor}; use crate::error::StorageResult; -use crate::filter_key_extractor::{RemoteTableAccessor, RpcFilterKeyExtractorManager}; use crate::hummock::hummock_meta_client::MonitoredHummockMetaClient; use crate::hummock::{ Block, BlockCacheEventListener, HummockError, HummockStorage, RecentFilter, Sstable, @@ -764,15 +764,17 @@ impl StateStoreImpl { })); let notification_client = RpcNotificationClient::new(hummock_meta_client.get_inner().clone()); - let key_filter_manager = Arc::new(RpcFilterKeyExtractorManager::new(Box::new( - RemoteTableAccessor::new(hummock_meta_client.get_inner().clone()), - ))); + let compaction_catalog_manager_ref = + Arc::new(CompactionCatalogManager::new(Box::new( + RemoteTableAccessor::new(hummock_meta_client.get_inner().clone()), + ))); + let inner = HummockStorage::new( opts.clone(), sstable_store, hummock_meta_client.clone(), notification_client, - key_filter_manager, + compaction_catalog_manager_ref, state_store_metrics.clone(), compactor_metrics.clone(), await_tree_config, @@ -844,11 +846,13 @@ pub mod boxed_state_store { use crate::store::*; use crate::store_impl::AsHummock; + #[expect(elided_named_lifetimes)] // false positive #[async_trait::async_trait] pub trait DynamicDispatchedStateStoreIter: Send { async fn try_next(&mut self) -> StorageResult>>; } + #[expect(elided_named_lifetimes)] // false positive #[async_trait::async_trait] impl> DynamicDispatchedStateStoreIter for I { async fn try_next(&mut self) -> StorageResult>> { @@ -954,12 +958,14 @@ pub mod boxed_state_store { read_options: ReadOptions, ) -> StorageResult>; + #[expect(elided_named_lifetimes)] // false positive async fn iter( &self, key_range: TableKeyRange, read_options: ReadOptions, ) -> StorageResult>; + #[expect(elided_named_lifetimes)] // false positive async fn rev_iter( &self, key_range: TableKeyRange, @@ -1002,6 +1008,7 @@ pub mod boxed_state_store { self.get(key, read_options).await } + #[expect(elided_named_lifetimes)] // false positive async fn iter( &self, key_range: TableKeyRange, @@ -1010,6 +1017,7 @@ pub mod boxed_state_store { Ok(Box::new(self.iter(key_range, read_options).await?)) } + #[expect(elided_named_lifetimes)] // false positive async fn rev_iter( &self, key_range: TableKeyRange, diff --git a/src/storage/src/table/merge_sort.rs b/src/storage/src/table/merge_sort.rs index 5f94656b7f651..57d5e780fbd65 100644 --- a/src/storage/src/table/merge_sort.rs +++ b/src/storage/src/table/merge_sort.rs @@ -54,7 +54,7 @@ struct Node<'a, S, R> { peeked: Box + 'a + Send + Sync>, } -impl<'a, S, R> PartialEq for Node<'a, S, R> { +impl PartialEq for Node<'_, S, R> { fn eq(&self, other: &Self) -> bool { match self.peeked.vnode_key() == other.peeked.vnode_key() { true => unreachable!("primary key from different iters should be unique"), @@ -62,15 +62,15 @@ impl<'a, S, R> PartialEq for Node<'a, S, R> { } } } -impl<'a, S, R> Eq for Node<'a, S, R> {} +impl Eq for Node<'_, S, R> {} -impl<'a, S, R> PartialOrd for Node<'a, S, R> { +impl PartialOrd for Node<'_, S, R> { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl<'a, S, R> Ord for Node<'a, S, R> { +impl Ord for Node<'_, S, R> { fn cmp(&self, other: &Self) -> std::cmp::Ordering { // The heap is a max heap, so we need to reverse the order. self.peeked diff --git a/src/stream/src/cache/managed_lru.rs b/src/stream/src/cache/managed_lru.rs index 7e1b2aa391306..d498c20e2794a 100644 --- a/src/stream/src/cache/managed_lru.rs +++ b/src/stream/src/cache/managed_lru.rs @@ -196,7 +196,7 @@ impl<'a, V: EstimateSize> MutGuard<'a, V> { } } -impl<'a, V: EstimateSize> Drop for MutGuard<'a, V> { +impl Drop for MutGuard<'_, V> { fn drop(&mut self) { let new_value_size = self.inner.estimated_size(); if new_value_size != self.old_value_size { @@ -209,7 +209,7 @@ impl<'a, V: EstimateSize> Drop for MutGuard<'a, V> { } } -impl<'a, V: EstimateSize> Deref for MutGuard<'a, V> { +impl Deref for MutGuard<'_, V> { type Target = V; fn deref(&self) -> &Self::Target { @@ -217,7 +217,7 @@ impl<'a, V: EstimateSize> Deref for MutGuard<'a, V> { } } -impl<'a, V: EstimateSize> DerefMut for MutGuard<'a, V> { +impl DerefMut for MutGuard<'_, V> { fn deref_mut(&mut self) -> &mut Self::Target { self.inner } diff --git a/src/stream/src/common/log_store_impl/kv_log_store/serde.rs b/src/stream/src/common/log_store_impl/kv_log_store/serde.rs index 6e020536cfb99..4bfe82aa7c41d 100644 --- a/src/stream/src/common/log_store_impl/kv_log_store/serde.rs +++ b/src/stream/src/common/log_store_impl/kv_log_store/serde.rs @@ -1191,6 +1191,7 @@ mod tests { ) } + #[expect(clippy::type_complexity)] fn gen_single_test_stream( serde: LogStoreRowSerde, seq_id: &mut SeqIdType, diff --git a/src/stream/src/common/state_cache/ordered.rs b/src/stream/src/common/state_cache/ordered.rs index 8a47d1b7ecafe..02797eabad764 100644 --- a/src/stream/src/common/state_cache/ordered.rs +++ b/src/stream/src/common/state_cache/ordered.rs @@ -41,7 +41,10 @@ impl Default for OrderedStateCache } impl StateCache for OrderedStateCache { - type Filler<'a> = &'a mut Self where Self: 'a; + type Filler<'a> + = &'a mut Self + where + Self: 'a; type Key = K; type Value = V; diff --git a/src/stream/src/common/state_cache/top_n.rs b/src/stream/src/common/state_cache/top_n.rs index fef7017d75129..6ca988bf2740e 100644 --- a/src/stream/src/common/state_cache/top_n.rs +++ b/src/stream/src/common/state_cache/top_n.rs @@ -108,7 +108,10 @@ impl TopNStateCache { } impl StateCache for TopNStateCache { - type Filler<'a> = &'a mut Self where Self: 'a; + type Filler<'a> + = &'a mut Self + where + Self: 'a; type Key = K; type Value = V; diff --git a/src/stream/src/executor/aggregation/agg_state_cache.rs b/src/stream/src/executor/aggregation/agg_state_cache.rs index 8a202ed6dd1dd..dc04b1313ed0b 100644 --- a/src/stream/src/executor/aggregation/agg_state_cache.rs +++ b/src/stream/src/executor/aggregation/agg_state_cache.rs @@ -161,7 +161,7 @@ where cache_filler: C::Filler<'filler>, } -impl<'filler, C> AggStateCacheFiller for GenericAggStateCacheFiller<'filler, C> +impl AggStateCacheFiller for GenericAggStateCacheFiller<'_, C> where C: StateCache, { diff --git a/src/stream/src/executor/mview/materialize.rs b/src/stream/src/executor/mview/materialize.rs index a5dc24d5cd74b..c2cbef8e6114e 100644 --- a/src/stream/src/executor/mview/materialize.rs +++ b/src/stream/src/executor/mview/materialize.rs @@ -702,11 +702,9 @@ impl MaterializeCache { ConflictBehavior::Overwrite | ConflictBehavior::IgnoreConflict | ConflictBehavior::DoUpdateIfNotNull => { - match self.force_get(&key) { - Some(old_row) => { - fixed_changes().delete(key.clone(), old_row.row.clone()); - } - None => (), // delete a nonexistent value + // delete a nonexistent value + if let Some(old_row) = self.force_get(&key) { + fixed_changes().delete(key.clone(), old_row.row.clone()); }; } _ => unreachable!(), diff --git a/src/stream/src/executor/nested_loop_temporal_join.rs b/src/stream/src/executor/nested_loop_temporal_join.rs index 55d21b468a777..842fa5254f8d5 100644 --- a/src/stream/src/executor/nested_loop_temporal_join.rs +++ b/src/stream/src/executor/nested_loop_temporal_join.rs @@ -98,6 +98,7 @@ async fn phase1_handle_chunk( } impl NestedLoopTemporalJoinExecutor { + #[expect(dead_code)] #[expect(clippy::too_many_arguments)] pub fn new( ctx: ActorContextRef, diff --git a/src/stream/src/executor/over_window/frame_finder.rs b/src/stream/src/executor/over_window/frame_finder.rs index 4438611b5d370..9cffb6e26d903 100644 --- a/src/stream/src/executor/over_window/frame_finder.rs +++ b/src/stream/src/executor/over_window/frame_finder.rs @@ -17,7 +17,7 @@ use std::ops::Bound; -use delta_btree_map::DeltaBTreeMap; +use delta_btree_map::{CursorWithDelta, DeltaBTreeMap}; use itertools::Itertools; use risingwave_common::row::OwnedRow; use risingwave_common::types::{Datum, Sentinelled, ToDatumRef}; @@ -284,6 +284,14 @@ fn find_curr_for_rows_frame<'cache, const LEFT: bool>( } else { part_with_delta.upper_bound(Bound::Included(delta_key)) }; + let pointed_key = |cursor: CursorWithDelta<'cache, CacheKey, OwnedRow>| { + if LEFT { + cursor.peek_next().map(|(k, _)| k) + } else { + cursor.peek_prev().map(|(k, _)| k) + } + }; + let n_rows_to_move = if LEFT { frame_bounds.n_following_rows().unwrap() } else { @@ -291,8 +299,7 @@ fn find_curr_for_rows_frame<'cache, const LEFT: bool>( }; if n_rows_to_move == 0 { - return cursor - .key() + return pointed_key(cursor) .or_else(|| { if LEFT { part_with_delta.last_key() @@ -304,28 +311,16 @@ fn find_curr_for_rows_frame<'cache, const LEFT: bool>( } for _ in 0..n_rows_to_move { - // Note that we have to move before check, to handle situation where the - // cursor is at ghost position at first. - if LEFT { - cursor.move_prev(); - } else { - cursor.move_next(); - } - if cursor.position().is_ghost() { + let res = if LEFT { cursor.prev() } else { cursor.next() }; + if res.is_none() { + // we reach the end break; } } - cursor - .key() - .or_else(|| { - // Note the difference between this with the `n_rows_to_move == 0` case. - if LEFT { - part_with_delta.first_key() - } else { - part_with_delta.last_key() - } - }) - .unwrap() + + // We always have a valid key here, because `part_with_delta` must not be empty, + // and `n_rows_to_move` is always larger than 0 when we reach here. + pointed_key(cursor).unwrap() } fn find_boundary_for_rows_frame<'cache, const LEFT: bool>( @@ -350,8 +345,18 @@ fn find_boundary_for_rows_frame<'cache, const LEFT: bool>( // have `curr_key` which definitely exists in the `part_with_delta`. We just find // the cursor pointing to it and move the cursor to frame boundary. - let mut cursor = part_with_delta.find(curr_key).unwrap(); - assert!(!cursor.position().is_ghost()); + let mut cursor = if LEFT { + part_with_delta.before(curr_key).unwrap() + } else { + part_with_delta.after(curr_key).unwrap() + }; + let pointed_key = |cursor: CursorWithDelta<'cache, CacheKey, OwnedRow>| { + if LEFT { + cursor.peek_next().map(|(k, _)| k) + } else { + cursor.peek_prev().map(|(k, _)| k) + } + }; let n_rows_to_move = if LEFT { frame_bounds.n_preceding_rows().unwrap() @@ -360,25 +365,16 @@ fn find_boundary_for_rows_frame<'cache, const LEFT: bool>( }; for _ in 0..n_rows_to_move { - if LEFT { - cursor.move_prev(); - } else { - cursor.move_next(); - } - if cursor.position().is_ghost() { + let res = if LEFT { cursor.prev() } else { cursor.next() }; + if res.is_none() { + // we reach the end break; } } - cursor - .key() - .or_else(|| { - if LEFT { - part_with_delta.first_key() - } else { - part_with_delta.last_key() - } - }) - .unwrap() + + // We always have a valid key here, because `cursor` must point to a valid key + // at the beginning. + pointed_key(cursor).unwrap() } /// Given a pair of left and right state keys, calculate the leftmost (smallest) and rightmost @@ -497,11 +493,15 @@ fn find_for_range_frames<'cache, const LEFT: bool>( // the curr key. prev_key } else { - // If cursor is in ghost position, it simply means that the search key is larger + // If there's nothing on the left, it simply means that the search key is larger // than any existing key. Returning the last key in this case does no harm. Especially, // if the last key is largest sentinel, the caller should extend the cache rightward // to get possible entries with the same order value into the cache. - cursor.key().or_else(|| part_with_delta.last_key()).unwrap() + cursor + .peek_next() + .map(|(k, _)| k) + .or_else(|| part_with_delta.last_key()) + .unwrap() } } else { let cursor = part_with_delta.upper_bound(Bound::Included(&search_key)); @@ -511,7 +511,8 @@ fn find_for_range_frames<'cache, const LEFT: bool>( next_key } else { cursor - .key() + .peek_prev() + .map(|(k, _)| k) .or_else(|| part_with_delta.first_key()) .unwrap() } diff --git a/src/stream/src/executor/over_window/over_partition.rs b/src/stream/src/executor/over_window/over_partition.rs index c2f8ea895dccb..dd90105024284 100644 --- a/src/stream/src/executor/over_window/over_partition.rs +++ b/src/stream/src/executor/over_window/over_partition.rs @@ -19,7 +19,7 @@ use std::collections::BTreeMap; use std::marker::PhantomData; use std::ops::{Bound, RangeInclusive}; -use delta_btree_map::{Change, DeltaBTreeMap, PositionType}; +use delta_btree_map::{Change, DeltaBTreeMap}; use educe::Educe; use futures_async_stream::for_await; use risingwave_common::array::stream_record::Record; @@ -470,12 +470,11 @@ impl<'a, S: StateStore> OverPartition<'a, S> { // Populate window states with the affected range of rows. { let mut cursor = part_with_delta - .find(first_frame_start) + .before(first_frame_start) .expect("first frame start key must exist"); - while { - let (key, row) = cursor - .key_value() - .expect("cursor must be valid until `last_frame_end`"); + + while let Some((key, row)) = cursor.next() { + accessed_entry_count += 1; for (call, state) in calls.iter().zip_eq_fast(states.iter_mut()) { // TODO(rc): batch appending @@ -488,28 +487,28 @@ impl<'a, S: StateStore> OverPartition<'a, S> { .into(), ); } - accessed_entry_count += 1; - cursor.move_next(); - key != last_frame_end - } {} + if key == last_frame_end { + break; + } + } } // Slide to the first affected key. We can safely pass in `first_curr_key` here // because it definitely exists in the states by the definition of affected range. states.just_slide_to(first_curr_key.as_normal_expect())?; - let mut curr_key_cursor = part_with_delta.find(first_curr_key).unwrap(); + let mut curr_key_cursor = part_with_delta.before(first_curr_key).unwrap(); assert_eq!( states.curr_key(), - curr_key_cursor.key().map(CacheKey::as_normal_expect) + curr_key_cursor + .peek_next() + .map(|(k, _)| k) + .map(CacheKey::as_normal_expect) ); // Slide and generate changes. - while { - let (key, row) = curr_key_cursor - .key_value() - .expect("cursor must be valid until `last_curr_key`"); - let mut should_continue = true; + while let Some((key, row)) = curr_key_cursor.next() { + let mut should_stop = false; let output = states.slide_no_evict_hint()?; compute_count += 1; @@ -524,11 +523,11 @@ impl<'a, S: StateStore> OverPartition<'a, S> { // all the following rows, so we need to check the `order_key`. if key.as_normal_expect().order_key > last_delta_key.order_key { // there won't be any more changes after this point, we can stop early - should_continue = false; + should_stop = true; } } else if key.as_normal_expect() >= last_delta_key { // there won't be any more changes after this point, we can stop early - should_continue = false; + should_stop = true; } } } @@ -542,29 +541,23 @@ impl<'a, S: StateStore> OverPartition<'a, S> { .collect(), ); - match curr_key_cursor.position() { - PositionType::Ghost => unreachable!(), - PositionType::Snapshot | PositionType::DeltaUpdate => { - // update - let old_row = snapshot.get(key).unwrap().clone(); - if old_row != new_row { - part_changes.insert( - key.as_normal_expect().clone(), - Record::Update { old_row, new_row }, - ); - } - } - PositionType::DeltaInsert => { - // insert - part_changes - .insert(key.as_normal_expect().clone(), Record::Insert { new_row }); + if let Some(old_row) = snapshot.get(key).cloned() { + // update + if old_row != new_row { + part_changes.insert( + key.as_normal_expect().clone(), + Record::Update { old_row, new_row }, + ); } + } else { + // insert + part_changes.insert(key.as_normal_expect().clone(), Record::Insert { new_row }); } - curr_key_cursor.move_next(); - - should_continue && key != last_curr_key - } {} + if should_stop || key == last_curr_key { + break; + } + } } self.stats.accessed_entry_count += accessed_entry_count; diff --git a/src/stream/src/executor/project.rs b/src/stream/src/executor/project.rs index d94daa926d97b..40cd7cb0340ef 100644 --- a/src/stream/src/executor/project.rs +++ b/src/stream/src/executor/project.rs @@ -40,6 +40,8 @@ struct Inner { nondecreasing_expr_indices: Vec, /// Last seen values of nondecreasing expressions, buffered to periodically produce watermarks. last_nondec_expr_values: Vec>, + /// Whether the stream is paused. + is_paused: bool, /// Whether there are likely no-op updates in the output chunks, so that eliminating them with /// `StreamChunk::eliminate_adjacent_noop_update` could be beneficial. @@ -65,6 +67,7 @@ impl ProjectExecutor { watermark_derivations, nondecreasing_expr_indices, last_nondec_expr_values: vec![None; n_nondecreasing_exprs], + is_paused: false, noop_update_hint, }, } @@ -132,8 +135,13 @@ impl Inner { #[try_stream(ok = Message, error = StreamExecutorError)] async fn execute(mut self, input: Executor) { + let mut input = input.execute(); + let first_barrier = expect_first_barrier(&mut input).await?; + self.is_paused = first_barrier.is_pause_on_startup(); + yield Message::Barrier(first_barrier); + #[for_await] - for msg in input.execute() { + for msg in input { let msg = msg?; match msg { Message::Watermark(w) => { @@ -163,21 +171,36 @@ impl Inner { } None => continue, }, - barrier @ Message::Barrier(_) => { - for (&expr_idx, value) in self - .nondecreasing_expr_indices - .iter() - .zip_eq_fast(&mut self.last_nondec_expr_values) - { - if let Some(value) = std::mem::take(value) { - yield Message::Watermark(Watermark::new( - expr_idx, - self.exprs[expr_idx].return_type(), - value, - )) + Message::Barrier(barrier) => { + if !self.is_paused { + for (&expr_idx, value) in self + .nondecreasing_expr_indices + .iter() + .zip_eq_fast(&mut self.last_nondec_expr_values) + { + if let Some(value) = std::mem::take(value) { + yield Message::Watermark(Watermark::new( + expr_idx, + self.exprs[expr_idx].return_type(), + value, + )) + } + } + } + + if let Some(mutation) = barrier.mutation.as_deref() { + match mutation { + Mutation::Pause => { + self.is_paused = true; + } + Mutation::Resume => { + self.is_paused = false; + } + _ => (), } } - yield barrier; + + yield Message::Barrier(barrier); } } } diff --git a/src/stream/src/executor/project_set.rs b/src/stream/src/executor/project_set.rs index 8f5c0e533bfbd..dff51a39255cf 100644 --- a/src/stream/src/executor/project_set.rs +++ b/src/stream/src/executor/project_set.rs @@ -93,6 +93,11 @@ impl Execute for ProjectSetExecutor { impl Inner { #[try_stream(ok = Message, error = StreamExecutorError)] async fn execute(self, input: Executor) { + let mut input = input.execute(); + let first_barrier = expect_first_barrier(&mut input).await?; + let mut is_paused = first_barrier.is_pause_on_startup(); + yield Message::Barrier(first_barrier); + assert!(!self.select_list.is_empty()); // First column will be `projected_row_id`, which represents the index in the // output table @@ -104,8 +109,9 @@ impl Inner { let mut builder = StreamChunkBuilder::new(self.chunk_size, data_types); let mut last_nondec_expr_values = vec![None; self.nondecreasing_expr_indices.len()]; + #[for_await] - for msg in input.execute() { + for msg in input { match msg? { Message::Watermark(watermark) => { let watermarks = self.handle_watermark(watermark).await?; @@ -113,21 +119,36 @@ impl Inner { yield Message::Watermark(watermark) } } - m @ Message::Barrier(_) => { - for (&expr_idx, value) in self - .nondecreasing_expr_indices - .iter() - .zip_eq_fast(&mut last_nondec_expr_values) - { - if let Some(value) = std::mem::take(value) { - yield Message::Watermark(Watermark::new( - expr_idx + PROJ_ROW_ID_OFFSET, - self.select_list[expr_idx].return_type(), - value, - )) + Message::Barrier(barrier) => { + if !is_paused { + for (&expr_idx, value) in self + .nondecreasing_expr_indices + .iter() + .zip_eq_fast(&mut last_nondec_expr_values) + { + if let Some(value) = std::mem::take(value) { + yield Message::Watermark(Watermark::new( + expr_idx + PROJ_ROW_ID_OFFSET, + self.select_list[expr_idx].return_type(), + value, + )) + } } } - yield m + + if let Some(mutation) = barrier.mutation.as_deref() { + match mutation { + Mutation::Pause => { + is_paused = true; + } + Mutation::Resume => { + is_paused = false; + } + _ => (), + } + } + + yield Message::Barrier(barrier); } Message::Chunk(chunk) => { let mut results = Vec::with_capacity(self.select_list.len()); diff --git a/src/stream/src/executor/sink.rs b/src/stream/src/executor/sink.rs index 3847e36f9a95c..206b8c4989817 100644 --- a/src/stream/src/executor/sink.rs +++ b/src/stream/src/executor/sink.rs @@ -295,7 +295,10 @@ impl SinkExecutor { match msg? { Message::Watermark(w) => yield Message::Watermark(w), Message::Chunk(chunk) => { - assert!(!is_paused, "Should not receive any data after pause"); + assert!( + !is_paused, + "Actor {actor_id} should not receive any data after pause" + ); log_writer.write_chunk(chunk.clone()).await?; yield Message::Chunk(chunk); } @@ -505,6 +508,7 @@ impl SinkExecutor { log_reader.init().await?; + #[expect(irrefutable_let_patterns)] // false positive while let Err(e) = sink .new_log_sinker(sink_writer_param.clone()) .and_then(|log_sinker| log_sinker.consume_log_and_sink(&mut log_reader)) diff --git a/src/stream/src/executor/source/fs_source_executor.rs b/src/stream/src/executor/source/fs_source_executor.rs index 70f0ce5f4f24b..0f3115c46c62f 100644 --- a/src/stream/src/executor/source/fs_source_executor.rs +++ b/src/stream/src/executor/source/fs_source_executor.rs @@ -364,8 +364,10 @@ impl FsSourceExecutor { let barrier_stream = barrier_to_message_stream(barrier_receiver).boxed(); let mut stream = StreamReaderWithPause::::new(barrier_stream, source_chunk_reader); + let mut command_paused = false; if start_with_paused { stream.pause_stream(); + command_paused = true; } yield Message::Barrier(barrier); @@ -394,7 +396,10 @@ impl FsSourceExecutor { Message::Barrier(barrier) => { last_barrier_time = Instant::now(); if self_paused { - stream.resume_stream(); + // command_paused has a higher priority. + if !command_paused { + stream.resume_stream(); + } self_paused = false; } let epoch = barrier.epoch; @@ -405,8 +410,14 @@ impl FsSourceExecutor { self.apply_split_change(&source_desc, &mut stream, actor_splits) .await? } - Mutation::Pause => stream.pause_stream(), - Mutation::Resume => stream.resume_stream(), + Mutation::Pause => { + command_paused = true; + stream.pause_stream() + } + Mutation::Resume => { + command_paused = false; + stream.resume_stream() + } Mutation::Update(UpdateMutation { actor_splits, .. }) => { self.apply_split_change( &source_desc, diff --git a/src/stream/src/executor/source/source_backfill_executor.rs b/src/stream/src/executor/source/source_backfill_executor.rs index 34f9eb12d692a..55806150bb87b 100644 --- a/src/stream/src/executor/source/source_backfill_executor.rs +++ b/src/stream/src/executor/source/source_backfill_executor.rs @@ -406,6 +406,7 @@ impl SourceBackfillExecutorInner { type PausedReader = Option; let mut paused_reader: PausedReader = None; + let mut command_paused = false; macro_rules! pause_reader { () => { @@ -422,6 +423,7 @@ impl SourceBackfillExecutorInner { // If the first barrier requires us to pause on startup, pause the stream. if barrier.is_pause_on_startup() { + command_paused = true; pause_reader!(); } @@ -503,11 +505,16 @@ impl SourceBackfillExecutorInner { last_barrier_time = Instant::now(); if self_paused { - backfill_stream = select_with_strategy( - input.by_ref().map(Either::Left), - paused_reader.take().expect("no paused reader to resume"), - select_strategy, - ); + // command_paused has a higher priority. + if !command_paused { + backfill_stream = select_with_strategy( + input.by_ref().map(Either::Left), + paused_reader + .take() + .expect("no paused reader to resume"), + select_strategy, + ); + } self_paused = false; } @@ -515,16 +522,28 @@ impl SourceBackfillExecutorInner { if let Some(ref mutation) = barrier.mutation.as_deref() { match mutation { Mutation::Pause => { - pause_reader!(); + // pause_reader should not be invoked consecutively more than once. + if !command_paused { + pause_reader!(); + command_paused = true; + } else { + tracing::warn!(command_paused, "unexpected pause"); + } } Mutation::Resume => { - backfill_stream = select_with_strategy( - input.by_ref().map(Either::Left), - paused_reader - .take() - .expect("no paused reader to resume"), - select_strategy, - ); + // pause_reader.take should not be invoked consecutively more than once. + if command_paused { + backfill_stream = select_with_strategy( + input.by_ref().map(Either::Left), + paused_reader + .take() + .expect("no paused reader to resume"), + select_strategy, + ); + command_paused = false; + } else { + tracing::warn!(command_paused, "unexpected resume"); + } } Mutation::SourceChangeSplit(actor_splits) => { tracing::info!( @@ -641,6 +660,13 @@ impl SourceBackfillExecutorInner { let chunk = msg?; if last_barrier_time.elapsed().as_millis() > max_wait_barrier_time_ms { + assert!(!command_paused, "command_paused should be false"); + // pause_reader should not be invoked consecutively more than once. + if !self_paused { + pause_reader!(); + } else { + tracing::warn!(self_paused, "unexpected self pause"); + } // Exceeds the max wait barrier time, the source will be paused. // Currently we can guarantee the // source is not paused since it received stream @@ -651,7 +677,6 @@ impl SourceBackfillExecutorInner { self.info.identity, last_barrier_time.elapsed() ); - pause_reader!(); // Only update `max_wait_barrier_time_ms` to capture // `barrier_interval_ms` diff --git a/src/stream/src/executor/source/source_executor.rs b/src/stream/src/executor/source/source_executor.rs index 6d5cf710d3bb0..6e583caf739af 100644 --- a/src/stream/src/executor/source/source_executor.rs +++ b/src/stream/src/executor/source/source_executor.rs @@ -503,6 +503,7 @@ impl SourceExecutor { let barrier_stream = barrier_to_message_stream(barrier_receiver).boxed(); let mut stream = StreamReaderWithPause::::new(barrier_stream, source_chunk_reader); + let mut command_paused = false; // - For shared source, pause until there's a MV. // - If the first barrier requires us to pause on startup, pause the stream. @@ -513,6 +514,7 @@ impl SourceExecutor { "source paused on startup" ); stream.pause_stream(); + command_paused = true; } yield Message::Barrier(barrier); @@ -548,8 +550,11 @@ impl SourceExecutor { last_barrier_time = Instant::now(); if self_paused { - stream.resume_stream(); self_paused = false; + // command_paused has a higher priority. + if !command_paused { + stream.resume_stream(); + } } let epoch = barrier.epoch; @@ -564,9 +569,14 @@ impl SourceExecutor { if let Some(mutation) = barrier.mutation.as_deref() { match mutation { - // XXX: Is it possible that the stream is self_paused, and we have pause mutation now? In this case, it will panic. - Mutation::Pause => stream.pause_stream(), - Mutation::Resume => stream.resume_stream(), + Mutation::Pause => { + command_paused = true; + stream.pause_stream() + } + Mutation::Resume => { + command_paused = false; + stream.resume_stream() + } Mutation::SourceChangeSplit(actor_splits) => { tracing::info!( actor_id = self.actor_ctx.id, diff --git a/src/stream/src/executor/watermark_filter.rs b/src/stream/src/executor/watermark_filter.rs index 01497c37fdab5..47a5448435b84 100644 --- a/src/stream/src/executor/watermark_filter.rs +++ b/src/stream/src/executor/watermark_filter.rs @@ -100,6 +100,7 @@ impl WatermarkFilterExecutor { let first_barrier = expect_first_barrier(&mut input).await?; let prev_epoch = first_barrier.epoch.prev; table.init_epoch(first_barrier.epoch); + let mut is_paused = first_barrier.is_pause_on_startup(); // The first barrier message should be propagated. yield Message::Barrier(first_barrier); @@ -113,7 +114,9 @@ impl WatermarkFilterExecutor { let mut last_checkpoint_watermark = None; - if let Some(watermark) = current_watermark.clone() { + if let Some(watermark) = current_watermark.clone() + && !is_paused + { yield Message::Watermark(Watermark::new( event_time_col_idx, watermark_type.clone(), @@ -240,8 +243,20 @@ impl WatermarkFilterExecutor { } } } - table.commit(barrier.epoch).await?; + + if let Some(mutation) = barrier.mutation.as_deref() { + match mutation { + Mutation::Pause => { + is_paused = true; + } + Mutation::Resume => { + is_paused = false; + } + _ => (), + } + } + yield Message::Barrier(barrier); if need_update_global_max_watermark { @@ -253,7 +268,7 @@ impl WatermarkFilterExecutor { .await?; } - if is_checkpoint { + if is_checkpoint && !is_paused { if idle_input { barrier_num_during_idle += 1; diff --git a/src/stream/src/from_proto/stream_cdc_scan.rs b/src/stream/src/from_proto/stream_cdc_scan.rs index 3c81ecb80e859..18a47ae8a461f 100644 --- a/src/stream/src/from_proto/stream_cdc_scan.rs +++ b/src/stream/src/from_proto/stream_cdc_scan.rs @@ -79,9 +79,9 @@ impl ExecutorBuilder for StreamCdcScanExecutorBuilder { .columns .iter() .filter(|col| { - !col.additional_column + col.additional_column .as_ref() - .is_some_and(|a_col| a_col.column_type.is_some()) + .is_none_or(|a_col| a_col.column_type.is_none()) }) .map(Into::into) .collect(); diff --git a/src/stream/src/lib.rs b/src/stream/src/lib.rs index 577b829945620..06893b16c2455 100644 --- a/src/stream/src/lib.rs +++ b/src/stream/src/lib.rs @@ -34,7 +34,6 @@ #![feature(exact_size_is_empty)] #![feature(impl_trait_in_assoc_type)] #![feature(test)] -#![feature(is_sorted)] #![feature(btree_cursors)] #![feature(assert_matches)] #![feature(try_blocks)] diff --git a/src/stream/tests/integration_tests/project_set.rs b/src/stream/tests/integration_tests/project_set.rs index 543f710b61b75..ce0bef7a832de 100644 --- a/src/stream/tests/integration_tests/project_set.rs +++ b/src/stream/tests/integration_tests/project_set.rs @@ -13,6 +13,7 @@ // limitations under the License. use multimap::MultiMap; +use risingwave_common::util::epoch::test_epoch; use risingwave_expr::table_function::repeat; use risingwave_stream::executor::ProjectSetExecutor; use risingwave_stream::task::ActorEvalErrorReport; @@ -60,6 +61,7 @@ fn create_executor() -> (MessageSender, BoxedMessageStream) { async fn test_project_set() { let (mut tx, mut project_set) = create_executor(); + tx.push_barrier(test_epoch(1), false); tx.push_chunk(StreamChunk::from_pretty( " I I + 1 4 @@ -76,6 +78,7 @@ async fn test_project_set() { check_until_pending( &mut project_set, expect_test::expect![[r#" + - !barrier 1 - !chunk |- +---+---+---+---+---+---+ | + | 0 | 5 | 2 | 1 | 2 | diff --git a/src/tests/simulation/src/client.rs b/src/tests/simulation/src/client.rs index 84717583eb422..65ec200b05c37 100644 --- a/src/tests/simulation/src/client.rs +++ b/src/tests/simulation/src/client.rs @@ -53,7 +53,7 @@ where stmts_iter: core::iter::Rev>, } -impl<'a, 'b> SetStmtsIterator<'a, 'b> { +impl<'a> SetStmtsIterator<'a, '_> { fn new(stmts: &'a SetStmts) -> Self { Self { _stmts: stmts, diff --git a/src/tests/sqlsmith/src/lib.rs b/src/tests/sqlsmith/src/lib.rs index 1c31041a7e484..cc16414f73cef 100644 --- a/src/tests/sqlsmith/src/lib.rs +++ b/src/tests/sqlsmith/src/lib.rs @@ -276,7 +276,7 @@ CREATE TABLE t3(v1 int, v2 bool, v3 smallint); wildcard_idx: None, constraints: [], with_options: [], - source_schema: None, + format_encode: None, source_watermarks: [], append_only: false, on_conflict: None, @@ -324,7 +324,7 @@ CREATE TABLE t3(v1 int, v2 bool, v3 smallint); wildcard_idx: None, constraints: [], with_options: [], - source_schema: None, + format_encode: None, source_watermarks: [], append_only: false, on_conflict: None, @@ -383,7 +383,7 @@ CREATE TABLE t3(v1 int, v2 bool, v3 smallint); wildcard_idx: None, constraints: [], with_options: [], - source_schema: None, + format_encode: None, source_watermarks: [], append_only: false, on_conflict: None, @@ -518,7 +518,7 @@ CREATE TABLE t4(v1 int PRIMARY KEY, v2 smallint PRIMARY KEY, v3 bool PRIMARY KEY wildcard_idx: None, constraints: [], with_options: [], - source_schema: None, + format_encode: None, source_watermarks: [], append_only: false, on_conflict: None, @@ -573,7 +573,7 @@ CREATE TABLE t4(v1 int PRIMARY KEY, v2 smallint PRIMARY KEY, v3 bool PRIMARY KEY wildcard_idx: None, constraints: [], with_options: [], - source_schema: None, + format_encode: None, source_watermarks: [], append_only: false, on_conflict: None, @@ -635,7 +635,7 @@ CREATE TABLE t4(v1 int PRIMARY KEY, v2 smallint PRIMARY KEY, v3 bool PRIMARY KEY wildcard_idx: None, constraints: [], with_options: [], - source_schema: None, + format_encode: None, source_watermarks: [], append_only: false, on_conflict: None, @@ -715,7 +715,7 @@ CREATE TABLE t4(v1 int PRIMARY KEY, v2 smallint PRIMARY KEY, v3 bool PRIMARY KEY wildcard_idx: None, constraints: [], with_options: [], - source_schema: None, + format_encode: None, source_watermarks: [], append_only: false, on_conflict: None, diff --git a/src/tests/sqlsmith/src/sql_gen/agg.rs b/src/tests/sqlsmith/src/sql_gen/agg.rs index 177603ddb333a..90f9dad3aaedf 100644 --- a/src/tests/sqlsmith/src/sql_gen/agg.rs +++ b/src/tests/sqlsmith/src/sql_gen/agg.rs @@ -24,7 +24,7 @@ use risingwave_sqlparser::ast::{ use crate::sql_gen::types::AGG_FUNC_TABLE; use crate::sql_gen::{SqlGenerator, SqlGeneratorContext}; -impl<'a, R: Rng> SqlGenerator<'a, R> { +impl SqlGenerator<'_, R> { pub fn gen_agg(&mut self, ret: &DataType) -> Expr { let funcs = match AGG_FUNC_TABLE.get(ret) { None => return self.gen_simple_scalar(ret), diff --git a/src/tests/sqlsmith/src/sql_gen/cast.rs b/src/tests/sqlsmith/src/sql_gen/cast.rs index 464d6e39d86f3..983333c6ca4f2 100644 --- a/src/tests/sqlsmith/src/sql_gen/cast.rs +++ b/src/tests/sqlsmith/src/sql_gen/cast.rs @@ -21,7 +21,7 @@ use risingwave_sqlparser::ast::Expr; use crate::sql_gen::types::{data_type_to_ast_data_type, EXPLICIT_CAST_TABLE}; use crate::sql_gen::{SqlGenerator, SqlGeneratorContext}; -impl<'a, R: Rng> SqlGenerator<'a, R> { +impl SqlGenerator<'_, R> { pub(crate) fn gen_explicit_cast( &mut self, ret: &DataType, diff --git a/src/tests/sqlsmith/src/sql_gen/expr.rs b/src/tests/sqlsmith/src/sql_gen/expr.rs index ee4c7ac2ee9ee..4625727f67dca 100644 --- a/src/tests/sqlsmith/src/sql_gen/expr.rs +++ b/src/tests/sqlsmith/src/sql_gen/expr.rs @@ -28,7 +28,7 @@ static STRUCT_FIELD_NAMES: [&str; 26] = [ "t", "u", "v", "w", "x", "y", "z", ]; -impl<'a, R: Rng> SqlGenerator<'a, R> { +impl SqlGenerator<'_, R> { /// In generating expression, there are two execution modes: /// 1) Can have Aggregate expressions (`can_agg` = true) /// We can have aggregate of all bound columns (those present in GROUP BY and otherwise). diff --git a/src/tests/sqlsmith/src/sql_gen/functions.rs b/src/tests/sqlsmith/src/sql_gen/functions.rs index 8cd1645ec1f5b..7e78334de9faa 100644 --- a/src/tests/sqlsmith/src/sql_gen/functions.rs +++ b/src/tests/sqlsmith/src/sql_gen/functions.rs @@ -25,7 +25,7 @@ use risingwave_sqlparser::ast::{ use crate::sql_gen::types::{FUNC_TABLE, IMPLICIT_CAST_TABLE, INVARIANT_FUNC_SET}; use crate::sql_gen::{SqlGenerator, SqlGeneratorContext}; -impl<'a, R: Rng> SqlGenerator<'a, R> { +impl SqlGenerator<'_, R> { pub fn gen_func(&mut self, ret: &DataType, context: SqlGeneratorContext) -> Expr { match self.rng.gen_bool(0.1) { true => self.gen_special_func(ret, context), diff --git a/src/tests/sqlsmith/src/sql_gen/query.rs b/src/tests/sqlsmith/src/sql_gen/query.rs index dcead4275c7ad..cfbfdb70cbf8e 100644 --- a/src/tests/sqlsmith/src/sql_gen/query.rs +++ b/src/tests/sqlsmith/src/sql_gen/query.rs @@ -30,7 +30,7 @@ use crate::sql_gen::utils::create_table_with_joins_from_table; use crate::sql_gen::{Column, SqlGenerator, SqlGeneratorContext, Table}; /// Generators -impl<'a, R: Rng> SqlGenerator<'a, R> { +impl SqlGenerator<'_, R> { /// Generates query expression and returns its /// query schema as well. pub(crate) fn gen_query(&mut self) -> (Query, Vec) { diff --git a/src/tests/sqlsmith/src/sql_gen/relation.rs b/src/tests/sqlsmith/src/sql_gen/relation.rs index 6e6db4e40493d..71242febe780c 100644 --- a/src/tests/sqlsmith/src/sql_gen/relation.rs +++ b/src/tests/sqlsmith/src/sql_gen/relation.rs @@ -33,7 +33,7 @@ fn create_equi_expr(left: String, right: String) -> Expr { create_binary_expr(BinaryOperator::Eq, left, right) } -impl<'a, R: Rng> SqlGenerator<'a, R> { +impl SqlGenerator<'_, R> { /// A relation specified in the FROM clause. pub(crate) fn gen_from_relation(&mut self) -> (TableWithJoins, Vec
) { match self.rng.gen_range(1..=4) { diff --git a/src/tests/sqlsmith/src/sql_gen/scalar.rs b/src/tests/sqlsmith/src/sql_gen/scalar.rs index 93cdf6518e73e..62cd7218dcc90 100644 --- a/src/tests/sqlsmith/src/sql_gen/scalar.rs +++ b/src/tests/sqlsmith/src/sql_gen/scalar.rs @@ -24,7 +24,7 @@ use risingwave_sqlparser::ast::{Array, DataType as AstDataType, Expr, Value}; use crate::sql_gen::expr::typed_null; use crate::sql_gen::SqlGenerator; -impl<'a, R: Rng> SqlGenerator<'a, R> { +impl SqlGenerator<'_, R> { /// Generates integer scalar expression. /// Bound: [start, end). /// Type: `DataType`. diff --git a/src/tests/sqlsmith/src/sql_gen/time_window.rs b/src/tests/sqlsmith/src/sql_gen/time_window.rs index a4d619b574526..d8e783df5bd11 100644 --- a/src/tests/sqlsmith/src/sql_gen/time_window.rs +++ b/src/tests/sqlsmith/src/sql_gen/time_window.rs @@ -22,7 +22,7 @@ use risingwave_sqlparser::ast::{ use crate::sql_gen::utils::{create_args, create_table_alias}; use crate::sql_gen::{Column, Expr, SqlGenerator, Table}; -impl<'a, R: Rng> SqlGenerator<'a, R> { +impl SqlGenerator<'_, R> { /// Generates time window functions. pub(crate) fn gen_time_window_func(&mut self) -> (TableFactor, Table) { match self.flip_coin() { diff --git a/src/tests/sqlsmith/src/sql_gen/utils.rs b/src/tests/sqlsmith/src/sql_gen/utils.rs index 0e36507e86169..a00d61456bd54 100644 --- a/src/tests/sqlsmith/src/sql_gen/utils.rs +++ b/src/tests/sqlsmith/src/sql_gen/utils.rs @@ -25,7 +25,7 @@ use crate::sql_gen::{Column, Expr, Ident, ObjectName, SqlGenerator, Table}; type Context = (Vec, Vec
); /// Context utils -impl<'a, R: Rng> SqlGenerator<'a, R> { +impl SqlGenerator<'_, R> { pub(crate) fn add_relations_to_context(&mut self, mut tables: Vec
) { for rel in &tables { let mut bound_columns = rel.get_qualified_columns(); @@ -53,7 +53,7 @@ impl<'a, R: Rng> SqlGenerator<'a, R> { } /// Gen utils -impl<'a, R: Rng> SqlGenerator<'a, R> { +impl SqlGenerator<'_, R> { pub(crate) fn gen_table_name_with_prefix(&mut self, prefix: &str) -> String { format!("{}_{}", prefix, &self.gen_relation_id()) } diff --git a/src/utils/delta_btree_map/src/lib.rs b/src/utils/delta_btree_map/src/lib.rs index 1f7e1a77190a0..afb49fcda469f 100644 --- a/src/utils/delta_btree_map/src/lib.rs +++ b/src/utils/delta_btree_map/src/lib.rs @@ -15,7 +15,7 @@ #![feature(btree_cursors)] use std::cmp::Ordering; -use std::collections::BTreeMap; +use std::collections::{btree_map, BTreeMap}; use std::ops::Bound; use educe::Educe; @@ -28,6 +28,9 @@ use enum_as_inner::EnumAsInner; pub struct DeltaBTreeMap<'a, K: Ord, V> { snapshot: &'a BTreeMap, delta: &'a BTreeMap>, + + first_key: Option<&'a K>, + last_key: Option<&'a K>, } #[derive(Debug, Clone, Copy, PartialEq, Eq, EnumAsInner)] @@ -37,8 +40,29 @@ pub enum Change { } impl<'a, K: Ord, V> DeltaBTreeMap<'a, K, V> { + /// Create a new [`DeltaBTreeMap`] from the given snapshot and delta. + /// Best case time complexity: O(1), worst case time complexity: O(m), where m is `delta.len()`. pub fn new(snapshot: &'a BTreeMap, delta: &'a BTreeMap>) -> Self { - Self { snapshot, delta } + let first_key = { + let cursor = CursorWithDelta { + ss_cursor: snapshot.lower_bound(Bound::Unbounded), + dt_cursor: delta.lower_bound(Bound::Unbounded), + }; + cursor.peek_next().map(|(key, _)| key) + }; + let last_key = { + let cursor = CursorWithDelta { + ss_cursor: snapshot.upper_bound(Bound::Unbounded), + dt_cursor: delta.upper_bound(Bound::Unbounded), + }; + cursor.peek_prev().map(|(key, _)| key) + }; + Self { + snapshot, + delta, + first_key, + last_key, + } } /// Get a reference to the snapshot. @@ -51,238 +75,170 @@ impl<'a, K: Ord, V> DeltaBTreeMap<'a, K, V> { self.delta } - /// Get the first key in the updated version of the snapshot. + /// Get the first key in the updated version of the snapshot. Complexity: O(1). pub fn first_key(&self) -> Option<&'a K> { - let cursor = CursorWithDelta { - snapshot: self.snapshot, - delta: self.delta, - curr_key_value: None, - }; - cursor.peek_next().map(|(key, _)| key) + self.first_key } - /// Get the last key in the updated version of the snapshot. + /// Get the last key in the updated version of the snapshot. Complexity: O(1). pub fn last_key(&self) -> Option<&'a K> { - let cursor = CursorWithDelta { - snapshot: self.snapshot, - delta: self.delta, - curr_key_value: None, - }; - cursor.peek_prev().map(|(key, _)| key) + self.last_key } - /// Get a [`CursorWithDelta`] pointing to the element corresponding to the given key. + /// Get a [`CursorWithDelta`] pointing at the gap before the given given key. /// If the given key is not found in either the snapshot or the delta, `None` is returned. - pub fn find(&self, key: &K) -> Option> { - let ss_cursor = self.snapshot.lower_bound(Bound::Included(key)); - let dt_cursor = self.delta.lower_bound(Bound::Included(key)); - let ss_cursor_kv = ss_cursor.peek_next(); - let dt_cursor_kv = dt_cursor.peek_next(); - let curr_key_value = if dt_cursor_kv.map(|(k, _)| k) == Some(key) { - match dt_cursor_kv.unwrap() { - (key, Change::Insert(value)) => (key, value), - (_key, Change::Delete) => { - // the key is deleted - return None; - } - } - } else if ss_cursor_kv.map(|(k, _)| k) == Some(key) { - ss_cursor_kv.unwrap() - } else { - // the key doesn't exist + pub fn before(&self, key: &K) -> Option> { + let cursor = self.lower_bound(Bound::Included(key)); + if cursor.peek_next().map(|(k, _)| k) != Some(key) { return None; - }; - Some(CursorWithDelta { - snapshot: self.snapshot, - delta: self.delta, - curr_key_value: Some(curr_key_value), - }) + } + Some(cursor) + } + + /// Get a [`CursorWithDelta`] pointing at the gap after the given given key. + /// If the given key is not found in either the snapshot or the delta, `None` is returned. + pub fn after(&self, key: &K) -> Option> { + let cursor = self.upper_bound(Bound::Included(key)); + if cursor.peek_prev().map(|(k, _)| k) != Some(key) { + return None; + } + Some(cursor) } - /// Get a [`CursorWithDelta`] pointing to the first element that is above the given bound. + /// Get a [`CursorWithDelta`] pointing at the gap before the smallest key greater than the given bound. pub fn lower_bound(&self, bound: Bound<&K>) -> CursorWithDelta<'a, K, V> { - // the implementation is very similar to `CursorWithDelta::peek_next` - let mut ss_cursor = self.snapshot.lower_bound(bound); - let mut dt_cursor = self.delta.lower_bound(bound); - let next_ss_entry = || ss_cursor.next(); - let next_dt_entry = || dt_cursor.next(); - let curr_key_value = - CursorWithDelta::peek_impl(PeekDirection::Next, next_ss_entry, next_dt_entry); + let ss_cursor = self.snapshot.lower_bound(bound); + let dt_cursor = self.delta.lower_bound(bound); CursorWithDelta { - snapshot: self.snapshot, - delta: self.delta, - curr_key_value, + ss_cursor, + dt_cursor, } } - /// Get a [`CursorWithDelta`] pointing to the first element that is below the given bound. + /// Get a [`CursorWithDelta`] pointing at the gap after the greatest key smaller than the given bound. pub fn upper_bound(&self, bound: Bound<&K>) -> CursorWithDelta<'a, K, V> { - // the implementation is very similar to `CursorWithDelta::peek_prev` - let mut ss_cursor = self.snapshot.upper_bound(bound); - let mut dt_cursor = self.delta.upper_bound(bound); - let prev_ss_entry = || ss_cursor.prev(); - let prev_dt_entry = || dt_cursor.prev(); - let curr_key_value = - CursorWithDelta::peek_impl(PeekDirection::Prev, prev_ss_entry, prev_dt_entry); + let ss_cursor = self.snapshot.upper_bound(bound); + let dt_cursor = self.delta.upper_bound(bound); CursorWithDelta { - snapshot: self.snapshot, - delta: self.delta, - curr_key_value, + ss_cursor, + dt_cursor, } } } /// Cursor that can iterate back and forth over the updated version of the snapshot. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +/// +/// A cursor always points at the gap of items in the map. For example: +/// +/// ```text +/// | Foo | Bar | +/// ^ ^ ^ +/// 1 2 3 +/// ``` +/// +/// The cursor can be at position 1, 2, or 3. +/// If it's at position 1, `peek_prev` will return `None`, and `peek_next` will return `Foo`. +/// If it's at position 3, `peek_prev` will return `Bar`, and `peek_next` will return `None`. +#[derive(Debug, Clone)] pub struct CursorWithDelta<'a, K: Ord, V> { - snapshot: &'a BTreeMap, - delta: &'a BTreeMap>, - curr_key_value: Option<(&'a K, &'a V)>, + ss_cursor: btree_map::Cursor<'a, K, V>, + dt_cursor: btree_map::Cursor<'a, K, Change>, } -/// Type of cursor position. [`PositionType::Ghost`] is a special position between the first and -/// the last item, where the key and value are `None`. -#[derive(Debug, Clone, Copy, PartialEq, Eq, EnumAsInner)] -pub enum PositionType { - Ghost, - Snapshot, - DeltaUpdate, - DeltaInsert, -} +impl<'a, K: Ord, V> CursorWithDelta<'a, K, V> { + pub fn peek_prev(&self) -> Option<(&'a K, &'a V)> { + self.peek::() + } -#[derive(PartialEq, Eq)] -enum PeekDirection { - Next, - Prev, -} + pub fn peek_next(&self) -> Option<(&'a K, &'a V)> { + self.peek::() + } -impl<'a, K: Ord, V> CursorWithDelta<'a, K, V> { - /// Get the cursor position type. - pub fn position(&self) -> PositionType { - let Some((key, _)) = self.curr_key_value else { - return PositionType::Ghost; - }; - if self.delta.contains_key(key) { - assert!(matches!(self.delta.get(key).unwrap(), Change::Insert(_))); - if self.snapshot.contains_key(key) { - PositionType::DeltaUpdate - } else { - PositionType::DeltaInsert - } - } else { - assert!(self.snapshot.contains_key(key)); - PositionType::Snapshot - } + pub fn prev(&mut self) -> Option<(&'a K, &'a V)> { + self.r#move::() } - /// Get the key pointed by the cursor. - pub fn key(&self) -> Option<&'a K> { - self.curr_key_value.map(|(k, _)| k) + #[allow(clippy::should_implement_trait)] + pub fn next(&mut self) -> Option<(&'a K, &'a V)> { + self.r#move::() } - /// Get the value pointed by the cursor. - pub fn value(&self) -> Option<&'a V> { - self.curr_key_value.map(|(_, v)| v) + fn peek(&self) -> Option<(&'a K, &'a V)> { + let mut ss_cursor = self.ss_cursor.clone(); + let mut dt_cursor = self.dt_cursor.clone(); + let res = Self::move_impl::(&mut ss_cursor, &mut dt_cursor); + res } - /// Get the key-value pair pointed by the cursor. - pub fn key_value(&self) -> Option<(&'a K, &'a V)> { - self.curr_key_value + fn r#move(&mut self) -> Option<(&'a K, &'a V)> { + let mut ss_cursor = self.ss_cursor.clone(); + let mut dt_cursor = self.dt_cursor.clone(); + let res = Self::move_impl::(&mut ss_cursor, &mut dt_cursor); + self.ss_cursor = ss_cursor; + self.dt_cursor = dt_cursor; + res } - fn peek_impl( - direction: PeekDirection, - mut next_ss_entry: impl FnMut() -> Option<(&'a K, &'a V)>, - mut next_dt_entry: impl FnMut() -> Option<(&'a K, &'a Change)>, + fn move_impl( + ss_cursor: &mut btree_map::Cursor<'a, K, V>, + dt_cursor: &mut btree_map::Cursor<'a, K, Change>, ) -> Option<(&'a K, &'a V)> { loop { - match (next_ss_entry(), next_dt_entry()) { + let ss_peek = if NEXT { + ss_cursor.peek_next() + } else { + ss_cursor.peek_prev() + }; + let dt_peek = if NEXT { + dt_cursor.peek_next() + } else { + dt_cursor.peek_prev() + }; + + let in_delta = match (ss_peek, dt_peek) { (None, None) => return None, - (None, Some((key, change))) => return Some((key, change.as_insert().unwrap())), - (Some((key, value)), None) => return Some((key, value)), - (Some((ss_key, ss_value)), Some((dt_key, dt_change))) => match ss_key.cmp(dt_key) { - Ordering::Less => { - if direction == PeekDirection::Next { - return Some((ss_key, ss_value)); + (None, Some(_)) => true, + (Some(_), None) => false, + (Some((ss_key, _)), Some((dt_key, dt_change))) => match ss_key.cmp(dt_key) { + Ordering::Less => !NEXT, // if NEXT { in snapshot } else { in delta } + Ordering::Greater => NEXT, // if NEXT { in delta } else { in snapshot } + Ordering::Equal => { + if NEXT { + ss_cursor.next().unwrap(); } else { - return Some((dt_key, dt_change.as_insert().unwrap())); + ss_cursor.prev().unwrap(); } - } - Ordering::Greater => { - if direction == PeekDirection::Next { - return Some((dt_key, dt_change.as_insert().unwrap())); - } else { - return Some((ss_key, ss_value)); + match dt_change { + Change::Insert(_) => true, // in delta + Change::Delete => { + if NEXT { + dt_cursor.next().unwrap(); + } else { + dt_cursor.prev().unwrap(); + } + continue; + } } } - Ordering::Equal => match dt_change { - Change::Insert(v) => return Some((ss_key, v)), - Change::Delete => continue, - }, }, + }; + + if in_delta { + let (key, change) = if NEXT { + dt_cursor.next().unwrap() + } else { + dt_cursor.prev().unwrap() + }; + return Some((key, change.as_insert().unwrap())); + } else { + return if NEXT { + ss_cursor.next() + } else { + ss_cursor.prev() + }; } } } - - /// Peek the next key-value pair. - pub fn peek_next(&self) -> Option<(&'a K, &'a V)> { - if let Some(key) = self.key() { - let mut ss_cursor = self.snapshot.lower_bound(Bound::Included(key)); - let mut dt_cursor = self.delta.lower_bound(Bound::Included(key)); - // either one of `ss_cursor.key()` and `dt_cursor.key()` == `Some(key)`, or both are - if ss_cursor.peek_next().map(|(k, _)| k) == Some(key) { - ss_cursor.next(); - } - if dt_cursor.peek_next().map(|(k, _)| k) == Some(key) { - dt_cursor.next(); - } - let next_ss_entry = || ss_cursor.next(); - let next_dt_entry = || dt_cursor.next(); - Self::peek_impl(PeekDirection::Next, next_ss_entry, next_dt_entry) - } else { - // we are at the ghost position, now let's go back to the beginning - let mut ss_iter = self.snapshot.iter(); - let mut dt_iter = self.delta.iter(); - Self::peek_impl(PeekDirection::Next, || ss_iter.next(), || dt_iter.next()) - } - } - - /// Peek the previous key-value pair. - pub fn peek_prev(&self) -> Option<(&'a K, &'a V)> { - if let Some(key) = self.key() { - let mut ss_cursor = self.snapshot.upper_bound(Bound::Included(key)); - let mut dt_cursor = self.delta.upper_bound(Bound::Included(key)); - // either one of `ss_cursor.key()` and `dt_cursor.key()` == `Some(key)`, or both are - if ss_cursor.peek_prev().map(|(k, _)| k) == Some(key) { - ss_cursor.prev(); - } - if dt_cursor.peek_prev().map(|(k, _)| k) == Some(key) { - dt_cursor.prev(); - } - let next_ss_entry = || ss_cursor.prev(); - let next_dt_entry = || dt_cursor.prev(); - Self::peek_impl(PeekDirection::Prev, next_ss_entry, next_dt_entry) - } else { - // we are at the ghost position, now let's go back to the end - let mut ss_iter = self.snapshot.iter(); - let mut dt_iter = self.delta.iter(); - Self::peek_impl( - PeekDirection::Prev, - || ss_iter.next_back(), - || dt_iter.next_back(), - ) - } - } - - /// Move the cursor to the next position. - pub fn move_next(&mut self) { - self.curr_key_value = self.peek_next(); - } - - /// Move the cursor to the previous position. - pub fn move_prev(&mut self) { - self.curr_key_value = self.peek_prev(); - } } #[cfg(test)] @@ -297,9 +253,10 @@ mod tests { assert_eq!(delta_map.first_key(), None); assert_eq!(delta_map.last_key(), None); - assert_eq!(delta_map.find(&1), None); - assert_eq!(delta_map.lower_bound(Bound::Included(&1)).key(), None); - assert_eq!(delta_map.upper_bound(Bound::Included(&1)).key(), None); + assert!(delta_map.before(&1).is_none()); + assert!(delta_map.after(&1).is_none()); + assert_eq!(delta_map.lower_bound(Bound::Included(&1)).peek_next(), None); + assert_eq!(delta_map.upper_bound(Bound::Included(&1)).peek_prev(), None); let mut map = BTreeMap::new(); map.insert(1, "1"); @@ -310,9 +267,9 @@ mod tests { let delta_map = DeltaBTreeMap::new(&map, &delta); assert_eq!(delta_map.first_key(), None); assert_eq!(delta_map.last_key(), None); - assert_eq!(delta_map.find(&1), None); - assert_eq!(delta_map.find(&2), None); - assert_eq!(delta_map.find(&3), None); + assert!(delta_map.before(&1).is_none()); + assert!(delta_map.before(&2).is_none()); + assert!(delta_map.before(&3).is_none()); } #[test] @@ -326,40 +283,46 @@ mod tests { assert_eq!(delta_map.first_key(), Some(&1)); assert_eq!(delta_map.last_key(), Some(&5)); - assert_eq!(delta_map.find(&100), None); - assert_eq!(delta_map.lower_bound(Bound::Included(&1)).key(), Some(&1)); - assert_eq!(delta_map.lower_bound(Bound::Excluded(&3)).key(), Some(&5)); - assert_eq!(delta_map.upper_bound(Bound::Included(&1)).key(), Some(&1)); - assert_eq!(delta_map.upper_bound(Bound::Excluded(&3)).key(), Some(&2)); - - let mut cursor = delta_map.find(&2).unwrap(); - assert_eq!(cursor.position(), PositionType::Snapshot); - assert_eq!(cursor.key(), Some(&2)); - assert_eq!(cursor.value(), Some(&"2")); - assert_eq!(cursor.key_value(), Some((&2, &"2"))); - assert_eq!(cursor.peek_next(), Some((&5, &"5"))); - assert_eq!(cursor.peek_prev(), Some((&1, &"1"))); - cursor.move_next(); - assert_eq!(cursor.key(), Some(&5)); - assert_eq!(cursor.value(), Some(&"5")); - cursor.move_next(); - assert_eq!(cursor.position(), PositionType::Ghost); - assert_eq!(cursor.key(), None); - assert_eq!(cursor.value(), None); - cursor.move_prev(); - assert_eq!(cursor.key(), Some(&5)); - assert_eq!(cursor.value(), Some(&"5")); - cursor.move_prev(); - cursor.move_prev(); - assert_eq!(cursor.key(), Some(&1)); - assert_eq!(cursor.value(), Some(&"1")); - assert_eq!(cursor.peek_prev(), None); + assert!(delta_map.before(&100).is_none()); + assert_eq!( + delta_map.lower_bound(Bound::Included(&1)).peek_next(), + Some((&1, &"1")) + ); + assert_eq!( + delta_map.lower_bound(Bound::Excluded(&3)).peek_next(), + Some((&5, &"5")) + ); + assert_eq!( + delta_map.upper_bound(Bound::Included(&1)).peek_prev(), + Some((&1, &"1")) + ); + assert_eq!( + delta_map.upper_bound(Bound::Excluded(&3)).peek_prev(), + Some((&2, &"2")) + ); + + let mut cursor = delta_map.before(&2).unwrap(); assert_eq!(cursor.peek_next(), Some((&2, &"2"))); - cursor.move_prev(); - assert_eq!(cursor.key(), None); - assert_eq!(cursor.value(), None); + assert_eq!(cursor.peek_prev(), Some((&1, &"1"))); + let (key, value) = cursor.next().unwrap(); + assert_eq!(key, &2); + assert_eq!(value, &"2"); + assert_eq!(cursor.peek_next(), Some((&5, &"5"))); + assert_eq!(cursor.peek_prev(), Some((&2, &"2"))); + cursor.next(); + assert_eq!(cursor.peek_next(), None); + cursor.next(); + assert_eq!(cursor.peek_next(), None); assert_eq!(cursor.peek_prev(), Some((&5, &"5"))); + cursor.prev(); + assert_eq!(cursor.peek_next(), Some((&5, &"5"))); + cursor.prev(); + cursor.prev(); assert_eq!(cursor.peek_next(), Some((&1, &"1"))); + assert_eq!(cursor.peek_prev(), None); + cursor.prev(); + assert_eq!(cursor.peek_next(), Some((&1, &"1"))); + assert_eq!(cursor.peek_prev(), None); } #[test] @@ -372,35 +335,42 @@ mod tests { assert_eq!(delta_map.first_key(), Some(&1)); assert_eq!(delta_map.last_key(), Some(&2)); - assert_eq!(delta_map.find(&100), None); - assert_eq!(delta_map.lower_bound(Bound::Included(&1)).key(), Some(&1)); - assert_eq!(delta_map.lower_bound(Bound::Excluded(&1)).key(), Some(&2)); - assert_eq!(delta_map.upper_bound(Bound::Included(&1)).key(), Some(&1)); - assert_eq!(delta_map.upper_bound(Bound::Excluded(&10)).key(), Some(&2)); - - let mut cursor = delta_map.find(&2).unwrap(); - assert_eq!(cursor.position(), PositionType::DeltaInsert); - assert_eq!(cursor.key(), Some(&2)); - assert_eq!(cursor.value(), Some(&"2")); - assert_eq!(cursor.key_value(), Some((&2, &"2"))); + assert!(delta_map.before(&100).is_none()); + assert_eq!( + delta_map.lower_bound(Bound::Included(&1)).peek_next(), + Some((&1, &"1")) + ); + assert_eq!( + delta_map.lower_bound(Bound::Excluded(&1)).peek_next(), + Some((&2, &"2")) + ); + assert_eq!( + delta_map.upper_bound(Bound::Included(&1)).peek_prev(), + Some((&1, &"1")) + ); + assert_eq!( + delta_map.upper_bound(Bound::Excluded(&10)).peek_prev(), + Some((&2, &"2")) + ); + + let mut cursor = delta_map.before(&2).unwrap(); + assert_eq!(cursor.peek_next(), Some((&2, &"2"))); + assert_eq!(cursor.peek_prev(), Some((&1, &"1"))); + cursor.next(); + assert_eq!(cursor.peek_next(), None); + assert_eq!(cursor.peek_prev(), Some((&2, &"2"))); + cursor.next(); assert_eq!(cursor.peek_next(), None); + assert_eq!(cursor.peek_prev(), Some((&2, &"2"))); + cursor.prev(); + assert_eq!(cursor.peek_next(), Some((&2, &"2"))); assert_eq!(cursor.peek_prev(), Some((&1, &"1"))); - cursor.move_next(); - assert_eq!(cursor.key(), None); - assert_eq!(cursor.value(), None); - cursor.move_prev(); - assert_eq!(cursor.key(), Some(&2)); - assert_eq!(cursor.value(), Some(&"2")); - cursor.move_prev(); - assert_eq!(cursor.key(), Some(&1)); - assert_eq!(cursor.value(), Some(&"1")); + cursor.prev(); + assert_eq!(cursor.peek_next(), Some((&1, &"1"))); assert_eq!(cursor.peek_prev(), None); - assert_eq!(cursor.peek_next(), Some((&2, &"2"))); - cursor.move_prev(); - assert_eq!(cursor.key(), None); - assert_eq!(cursor.value(), None); - assert_eq!(cursor.peek_prev(), Some((&2, &"2"))); + cursor.prev(); assert_eq!(cursor.peek_next(), Some((&1, &"1"))); + assert_eq!(cursor.peek_prev(), None); } #[test] @@ -414,26 +384,37 @@ mod tests { assert_eq!(delta_map.first_key(), Some(&3)); assert_eq!(delta_map.last_key(), Some(&3)); - assert_eq!(delta_map.find(&1), None); - assert_eq!(delta_map.find(&2), None); - assert_eq!(delta_map.lower_bound(Bound::Included(&1)).key(), Some(&3)); - assert_eq!(delta_map.lower_bound(Bound::Excluded(&0)).key(), Some(&3)); - assert_eq!(delta_map.upper_bound(Bound::Included(&1)).key(), None); - assert_eq!(delta_map.upper_bound(Bound::Excluded(&10)).key(), Some(&3)); - - let mut cursor = delta_map.find(&3).unwrap(); - assert_eq!(cursor.position(), PositionType::Snapshot); - assert_eq!(cursor.key(), Some(&3)); - assert_eq!(cursor.value(), Some(&"3")); - assert_eq!(cursor.key_value(), Some((&3, &"3"))); + assert!(delta_map.before(&1).is_none()); + assert!(delta_map.before(&2).is_none()); + assert_eq!( + delta_map.lower_bound(Bound::Included(&1)).peek_next(), + Some((&3, &"3")) + ); + assert_eq!( + delta_map.lower_bound(Bound::Excluded(&0)).peek_next(), + Some((&3, &"3")) + ); + assert_eq!(delta_map.upper_bound(Bound::Included(&1)).peek_prev(), None); + assert_eq!( + delta_map.upper_bound(Bound::Excluded(&10)).peek_prev(), + Some((&3, &"3")) + ); + + let mut cursor = delta_map.before(&3).unwrap(); + assert_eq!(cursor.peek_next(), Some((&3, &"3"))); + assert_eq!(cursor.peek_prev(), None); + cursor.next(); assert_eq!(cursor.peek_next(), None); + assert_eq!(cursor.peek_prev(), Some((&3, &"3"))); + cursor.prev(); + assert_eq!(cursor.peek_next(), Some((&3, &"3"))); + assert_eq!(cursor.peek_prev(), None); + cursor.prev(); + assert_eq!(cursor.peek_next(), Some((&3, &"3"))); assert_eq!(cursor.peek_prev(), None); - cursor.move_next(); - assert_eq!(cursor.key(), None); - assert_eq!(cursor.value(), None); - cursor.move_prev(); - assert_eq!(cursor.key(), Some(&3)); - assert_eq!(cursor.value(), Some(&"3")); + cursor.next(); + assert_eq!(cursor.peek_next(), None); + assert_eq!(cursor.peek_prev(), Some((&3, &"3"))); } #[test] @@ -447,15 +428,30 @@ mod tests { assert_eq!(delta_map.first_key(), Some(&1)); assert_eq!(delta_map.last_key(), Some(&1)); - assert_eq!(delta_map.find(&2), None); - assert_eq!(delta_map.find(&3), None); - assert_eq!(delta_map.lower_bound(Bound::Included(&1)).key(), Some(&1)); - assert_eq!(delta_map.lower_bound(Bound::Excluded(&1)).key(), None); - assert_eq!(delta_map.upper_bound(Bound::Included(&3)).key(), Some(&1)); - assert_eq!(delta_map.upper_bound(Bound::Excluded(&3)).key(), Some(&1)); - - let cursor = delta_map.find(&1).unwrap(); - assert_eq!(cursor.position(), PositionType::Snapshot); + assert!(delta_map.before(&2).is_none()); + assert!(delta_map.before(&3).is_none()); + assert_eq!( + delta_map.lower_bound(Bound::Included(&1)).peek_next(), + Some((&1, &"1")) + ); + assert_eq!(delta_map.lower_bound(Bound::Excluded(&1)).peek_next(), None); + assert_eq!( + delta_map.upper_bound(Bound::Included(&3)).peek_prev(), + Some((&1, &"1")) + ); + assert_eq!( + delta_map.upper_bound(Bound::Excluded(&3)).peek_prev(), + Some((&1, &"1")) + ); + + let mut cursor = delta_map.before(&1).unwrap(); + assert_eq!(cursor.peek_next(), Some((&1, &"1"))); + cursor.next(); + assert_eq!(cursor.peek_next(), None); + assert_eq!(cursor.peek_prev(), Some((&1, &"1"))); + cursor.prev(); + assert_eq!(cursor.peek_next(), Some((&1, &"1"))); + assert_eq!(cursor.peek_prev(), None); } #[test] @@ -470,11 +466,11 @@ mod tests { assert_eq!(delta_map.first_key(), None); assert_eq!(delta_map.last_key(), None); - assert_eq!(delta_map.find(&1), None); - assert_eq!(delta_map.find(&2), None); - assert_eq!(delta_map.find(&3), None); - assert_eq!(delta_map.lower_bound(Bound::Included(&1)).key(), None); - assert_eq!(delta_map.upper_bound(Bound::Excluded(&3)).key(), None); + assert!(delta_map.before(&1).is_none()); + assert!(delta_map.before(&2).is_none()); + assert!(delta_map.before(&3).is_none()); + assert_eq!(delta_map.lower_bound(Bound::Included(&1)).peek_next(), None); + assert_eq!(delta_map.upper_bound(Bound::Excluded(&3)).peek_prev(), None); } #[test] @@ -488,22 +484,30 @@ mod tests { assert_eq!(delta_map.first_key(), Some(&1)); assert_eq!(delta_map.last_key(), Some(&3)); - assert_eq!(delta_map.find(&10), None); - assert_eq!(delta_map.lower_bound(Bound::Included(&1)).key(), Some(&1)); - assert_eq!(delta_map.lower_bound(Bound::Excluded(&1)).key(), Some(&2)); - assert_eq!(delta_map.upper_bound(Bound::Included(&2)).key(), Some(&2)); - assert_eq!(delta_map.upper_bound(Bound::Excluded(&2)).key(), Some(&1)); - - let mut cursor = delta_map.find(&2).unwrap(); - assert_eq!(cursor.position(), PositionType::DeltaInsert); - assert_eq!(cursor.key(), Some(&2)); - assert_eq!(cursor.value(), Some(&"2")); - assert_eq!(cursor.key_value(), Some((&2, &"2"))); - assert_eq!(cursor.peek_next(), Some((&3, &"3"))); + assert!(delta_map.before(&10).is_none()); + assert_eq!( + delta_map.lower_bound(Bound::Included(&1)).peek_next(), + Some((&1, &"1")) + ); + assert_eq!( + delta_map.lower_bound(Bound::Excluded(&1)).peek_next(), + Some((&2, &"2")) + ); + assert_eq!( + delta_map.upper_bound(Bound::Included(&2)).peek_prev(), + Some((&2, &"2")) + ); + assert_eq!( + delta_map.upper_bound(Bound::Excluded(&2)).peek_prev(), + Some((&1, &"1")) + ); + + let mut cursor = delta_map.before(&2).unwrap(); + assert_eq!(cursor.peek_next(), Some((&2, &"2"))); assert_eq!(cursor.peek_prev(), Some((&1, &"1"))); - cursor.move_next(); - assert_eq!(cursor.key(), Some(&3)); - assert_eq!(cursor.value(), Some(&"3")); + cursor.next(); + assert_eq!(cursor.peek_next(), Some((&3, &"3"))); + assert_eq!(cursor.peek_prev(), Some((&2, &"2"))); } #[test] @@ -518,19 +522,15 @@ mod tests { assert_eq!(delta_map.first_key(), Some(&1)); assert_eq!(delta_map.last_key(), Some(&3)); - let mut cursor = delta_map.find(&1).unwrap(); - assert_eq!(cursor.position(), PositionType::DeltaUpdate); - assert_eq!(cursor.key(), Some(&1)); - assert_eq!(cursor.value(), Some(&"1 new")); - assert_eq!(cursor.key_value(), Some((&1, &"1 new"))); + let mut cursor = delta_map.before(&1).unwrap(); + assert_eq!(cursor.peek_next(), Some((&1, &"1 new"))); + assert_eq!(cursor.peek_prev(), None); + cursor.next(); assert_eq!(cursor.peek_next(), Some((&3, &"3"))); + assert_eq!(cursor.peek_prev(), Some((&1, &"1 new"))); + cursor.prev(); + assert_eq!(cursor.peek_next(), Some((&1, &"1 new"))); assert_eq!(cursor.peek_prev(), None); - cursor.move_next(); - assert_eq!(cursor.key(), Some(&3)); - assert_eq!(cursor.value(), Some(&"3")); - cursor.move_prev(); - assert_eq!(cursor.key(), Some(&1)); - assert_eq!(cursor.value(), Some(&"1 new")); } #[test] @@ -545,19 +545,15 @@ mod tests { assert_eq!(delta_map.first_key(), Some(&1)); assert_eq!(delta_map.last_key(), Some(&3)); - let mut cursor = delta_map.find(&3).unwrap(); - assert_eq!(cursor.position(), PositionType::DeltaUpdate); - assert_eq!(cursor.key(), Some(&3)); - assert_eq!(cursor.value(), Some(&"3 new")); - assert_eq!(cursor.key_value(), Some((&3, &"3 new"))); + let mut cursor = delta_map.before(&3).unwrap(); + assert_eq!(cursor.peek_next(), Some((&3, &"3 new"))); + assert_eq!(cursor.peek_prev(), Some((&1, &"1"))); + cursor.next(); assert_eq!(cursor.peek_next(), None); + assert_eq!(cursor.peek_prev(), Some((&3, &"3 new"))); + cursor.prev(); + assert_eq!(cursor.peek_next(), Some((&3, &"3 new"))); assert_eq!(cursor.peek_prev(), Some((&1, &"1"))); - cursor.move_next(); - assert_eq!(cursor.key(), None); - assert_eq!(cursor.value(), None); - cursor.move_prev(); - assert_eq!(cursor.key(), Some(&3)); - assert_eq!(cursor.value(), Some(&"3 new")); } #[test] @@ -575,50 +571,58 @@ mod tests { assert_eq!(delta_map.first_key(), Some(&0)); assert_eq!(delta_map.last_key(), Some(&4)); - assert_eq!(delta_map.find(&-1), None); - assert_eq!(delta_map.find(&3), None); - assert_eq!(delta_map.find(&10), None); - assert_eq!(delta_map.lower_bound(Bound::Included(&0)).key(), Some(&0)); - assert_eq!(delta_map.lower_bound(Bound::Excluded(&0)).key(), Some(&1)); - assert_eq!(delta_map.lower_bound(Bound::Included(&3)).key(), Some(&4)); - assert_eq!(delta_map.upper_bound(Bound::Included(&5)).key(), Some(&4)); - assert_eq!(delta_map.upper_bound(Bound::Excluded(&4)).key(), Some(&2)); - assert_eq!(delta_map.upper_bound(Bound::Excluded(&2)).key(), Some(&1)); - - let mut cursor = delta_map.find(&0).unwrap(); - assert_eq!(cursor.position(), PositionType::DeltaInsert); - assert_eq!(cursor.key_value(), Some((&0, &"0"))); - cursor.move_next(); - assert_eq!(cursor.position(), PositionType::DeltaUpdate); - assert_eq!(cursor.key_value(), Some((&1, &"1 new"))); - cursor.move_next(); - assert_eq!(cursor.position(), PositionType::Snapshot); - assert_eq!(cursor.key_value(), Some((&2, &"2"))); - cursor.move_next(); - assert_eq!(cursor.position(), PositionType::DeltaInsert); - assert_eq!(cursor.key_value(), Some((&4, &"4"))); - cursor.move_next(); - assert_eq!(cursor.position(), PositionType::Ghost); - assert_eq!(cursor.key_value(), None); - cursor.move_next(); - assert_eq!(cursor.position(), PositionType::DeltaInsert); - assert_eq!(cursor.key_value(), Some((&0, &"0"))); - cursor.move_prev(); - assert_eq!(cursor.position(), PositionType::Ghost); - cursor.move_prev(); - assert_eq!(cursor.position(), PositionType::DeltaInsert); - assert_eq!(cursor.key_value(), Some((&4, &"4"))); - cursor.move_prev(); - assert_eq!(cursor.position(), PositionType::Snapshot); - assert_eq!(cursor.key_value(), Some((&2, &"2"))); - cursor.move_prev(); - assert_eq!(cursor.position(), PositionType::DeltaUpdate); - assert_eq!(cursor.key_value(), Some((&1, &"1 new"))); - cursor.move_prev(); - assert_eq!(cursor.position(), PositionType::DeltaInsert); - assert_eq!(cursor.key_value(), Some((&0, &"0"))); - cursor.move_prev(); - assert_eq!(cursor.position(), PositionType::Ghost); + assert!(delta_map.before(&-1).is_none()); + assert!(delta_map.before(&3).is_none()); + assert!(delta_map.before(&10).is_none()); + assert_eq!( + delta_map.lower_bound(Bound::Included(&0)).peek_next(), + Some((&0, &"0")) + ); + assert_eq!( + delta_map.lower_bound(Bound::Excluded(&0)).peek_next(), + Some((&1, &"1 new")) + ); + assert_eq!( + delta_map.lower_bound(Bound::Included(&3)).peek_next(), + Some((&4, &"4")) + ); + assert_eq!( + delta_map.upper_bound(Bound::Included(&5)).peek_prev(), + Some((&4, &"4")) + ); + assert_eq!( + delta_map.upper_bound(Bound::Excluded(&4)).peek_prev(), + Some((&2, &"2")) + ); + assert_eq!( + delta_map.upper_bound(Bound::Excluded(&2)).peek_prev(), + Some((&1, &"1 new")) + ); + + let mut cursor = delta_map.before(&0).unwrap(); + assert_eq!(cursor.peek_next(), Some((&0, &"0"))); + cursor.next(); + assert_eq!(cursor.peek_next(), Some((&1, &"1 new"))); + cursor.next(); + assert_eq!(cursor.peek_next(), Some((&2, &"2"))); + cursor.next(); + assert_eq!(cursor.peek_next(), Some((&4, &"4"))); + cursor.next(); + assert_eq!(cursor.peek_next(), None); + cursor.next(); + assert_eq!(cursor.peek_next(), None); + cursor.prev(); + assert_eq!(cursor.peek_next(), Some((&4, &"4"))); + cursor.prev(); + assert_eq!(cursor.peek_next(), Some((&2, &"2"))); + cursor.prev(); + assert_eq!(cursor.peek_next(), Some((&1, &"1 new"))); + cursor.prev(); + assert_eq!(cursor.peek_next(), Some((&0, &"0"))); + assert_eq!(cursor.peek_prev(), None); + cursor.prev(); + assert_eq!(cursor.peek_next(), Some((&0, &"0"))); + assert_eq!(cursor.peek_prev(), None); } #[test] @@ -640,19 +644,17 @@ mod tests { assert_eq!(delta_map.first_key(), Some(&0)); assert_eq!(delta_map.last_key(), Some(&3)); - let mut cursor = delta_map.find(&0).unwrap(); + let mut cursor = delta_map.before(&0).unwrap(); let mut res = vec![]; - while let Some((k, v)) = cursor.key_value() { + while let Some((k, v)) = cursor.next() { res.push((*k, *v)); - cursor.move_next(); } assert_eq!(res, vec![(0, "0"), (1, "1 new"), (3, "3")]); - let mut cursor = delta_map.find(&3).unwrap(); + let mut cursor = delta_map.after(&3).unwrap(); let mut res = vec![]; - while let Some((k, v)) = cursor.key_value() { + while let Some((k, v)) = cursor.prev() { res.push((*k, *v)); - cursor.move_prev(); } assert_eq!(res, vec![(3, "3"), (1, "1 new"), (0, "0")]); } diff --git a/src/utils/pgwire/src/pg_message.rs b/src/utils/pgwire/src/pg_message.rs index 00c9a09b2eda6..58a1cccaf562c 100644 --- a/src/utils/pgwire/src/pg_message.rs +++ b/src/utils/pgwire/src/pg_message.rs @@ -426,7 +426,7 @@ pub enum TransactionStatus { InFailedTransaction, } -impl<'a> BeMessage<'a> { +impl BeMessage<'_> { /// Write message to the given buf. pub fn write(buf: &mut BytesMut, message: &BeMessage<'_>) -> Result<()> { match message { diff --git a/src/utils/pgwire/src/pg_protocol.rs b/src/utils/pgwire/src/pg_protocol.rs index 630feb1ebd28c..5a2120b91f3d1 100644 --- a/src/utils/pgwire/src/pg_protocol.rs +++ b/src/utils/pgwire/src/pg_protocol.rs @@ -1190,7 +1190,7 @@ pub mod truncated_fmt { finished: bool, f: &'a mut Formatter<'b>, } - impl<'a, 'b> Write for TruncatedFormatter<'a, 'b> { + impl Write for TruncatedFormatter<'_, '_> { fn write_str(&mut self, s: &str) -> Result { if self.finished { return Ok(()); @@ -1212,7 +1212,7 @@ pub mod truncated_fmt { pub struct TruncatedFmt<'a, T>(pub &'a T, pub usize); - impl<'a, T> Debug for TruncatedFmt<'a, T> + impl Debug for TruncatedFmt<'_, T> where T: Debug, { @@ -1226,7 +1226,7 @@ pub mod truncated_fmt { } } - impl<'a, T> Display for TruncatedFmt<'a, T> + impl Display for TruncatedFmt<'_, T> where T: Display, { diff --git a/src/utils/pgwire/src/types.rs b/src/utils/pgwire/src/types.rs index c76aa20aac4cd..e8d7dc52101aa 100644 --- a/src/utils/pgwire/src/types.rs +++ b/src/utils/pgwire/src/types.rs @@ -94,7 +94,7 @@ where default_format: Format, } -impl<'a, 'b> FormatIterator<'a, 'b> { +impl<'a> FormatIterator<'a, '_> { pub fn new(provided_formats: &'a [Format], actual_len: usize) -> Result { if !provided_formats.is_empty() && provided_formats.len() != 1