diff --git a/.github/workflows/ci-doc-checker.yml b/.github/workflows/ci-doc-checker.yml index 61b12bf4a9563..bbec1b692fad7 100644 --- a/.github/workflows/ci-doc-checker.yml +++ b/.github/workflows/ci-doc-checker.yml @@ -87,7 +87,7 @@ jobs: ignore: node_modules version: 0.28.1 - behavior-unchange: + behavior-change-unset: runs-on: ubuntu-latest needs: add-doc-label steps: @@ -97,10 +97,14 @@ jobs: githubToken: ${{ secrets.PAT }} find: '[x] Yes, this PR will result in a change in behavior.' replace: '[ ] Yes, this PR will result in a change in behavior.' - + + behavior-unchange-set: + runs-on: ubuntu-latest + needs: add-doc-label + steps: - name: Replace Pull Request Body uses: ivangabriele/find-and-replace-pull-request-body@v1.1.5 with: githubToken: ${{ secrets.PAT }} find: '[ ] No, this PR will not result in a change in behavior.' - replace: '[x] No, this PR will not result in a change in behavior.' \ No newline at end of file + replace: '[x] No, this PR will not result in a change in behavior.' diff --git a/.github/workflows/release-docker-image.yml b/.github/workflows/release-docker-image.yml index c4863dab79d56..632d5c5ee5c9a 100644 --- a/.github/workflows/release-docker-image.yml +++ b/.github/workflows/release-docker-image.yml @@ -1,8 +1,9 @@ name: 'release docker images' -on: - release: - types: [published] +# Don't trigger actions automatically +#on: +# release: +# types: [published] # This allows a subsequently queued workflow run to interrupt previous runs concurrency: @@ -32,7 +33,7 @@ jobs: - name: build artifact docker image run: | - DOCKER_BUILDKIT=1 docker build --rm=true --build-arg RELEASE_VERSION=${RELEASE_VERSION} -f docker/dockerfiles/artifacts/artifact-ubuntu.Dockerfile -t artifact-ubuntu:${RELEASE_VERSION} . + DOCKER_BUILDKIT=1 docker build --rm=true --build-arg RELEASE_VERSION=${RELEASE_VERSION} -f docker/dockerfiles/artifacts/artifact.Dockerfile -t artifact-ubuntu:${RELEASE_VERSION} . - name: build and publish fe docker image run: | diff --git a/be/src/agent/task_worker_pool.cpp b/be/src/agent/task_worker_pool.cpp index 3fc35700cd55b..87ab9310a416e 100644 --- a/be/src/agent/task_worker_pool.cpp +++ b/be/src/agent/task_worker_pool.cpp @@ -71,6 +71,7 @@ #include "storage/task/engine_storage_migration_task.h" #include "storage/update_manager.h" #include "storage/utils.h" +#include "util/misc.h" #include "util/starrocks_metrics.h" #include "util/stopwatch.hpp" #include "util/thread.h" @@ -600,7 +601,8 @@ void* ReportTaskWorkerPool::_worker_thread_callback(void* arg_this) { << ", err=" << status; } - sleep(config::report_task_interval_seconds); + nap_sleep(config::report_task_interval_seconds, + [worker_pool_this] { return worker_pool_this->_stopped.load(); }); } return nullptr; @@ -740,7 +742,8 @@ void* ReportWorkgroupTaskWorkerPool::_worker_thread_callback(void* arg_this) { if (result.__isset.workgroup_ops) { workgroup::WorkGroupManager::instance()->apply(result.workgroup_ops); } - sleep(config::report_workgroup_interval_seconds); + nap_sleep(config::report_workgroup_interval_seconds, + [worker_pool_this] { return worker_pool_this->_stopped.load(); }); } return nullptr; diff --git a/be/src/common/daemon.cpp b/be/src/common/daemon.cpp index 40a49c93f56f5..599d534a1aa98 100644 --- a/be/src/common/daemon.cpp +++ b/be/src/common/daemon.cpp @@ -54,6 +54,7 @@ #include "util/gc_helper.h" #include "util/logging.h" #include "util/mem_info.h" +#include "util/misc.h" #include "util/monotime.h" #include "util/network_util.h" #include "util/starrocks_metrics.h" @@ -100,7 +101,7 @@ void gc_memory(void* arg_this) { auto* daemon = static_cast(arg_this); while (!daemon->stopped()) { - sleep(static_cast(config::memory_maintenance_sleep_time_s)); + nap_sleep(config::memory_maintenance_sleep_time_s, [daemon] { return daemon->stopped(); }); ReleaseColumnPool releaser(kFreeRatio); ForEach(releaser); @@ -184,7 +185,7 @@ void calculate_metrics(void* arg_this) { mem_metrics->update_mem_bytes.value(), mem_metrics->chunk_allocator_mem_bytes.value(), mem_metrics->clone_mem_bytes.value(), mem_metrics->consistency_mem_bytes.value()); - sleep(15); // 15 seconds + nap_sleep(15, [daemon] { return daemon->stopped(); }); } } diff --git a/be/src/runtime/broker_mgr.cpp b/be/src/runtime/broker_mgr.cpp index 1fbe9db2f4049..f7568b65132d0 100644 --- a/be/src/runtime/broker_mgr.cpp +++ b/be/src/runtime/broker_mgr.cpp @@ -42,7 +42,7 @@ #include "runtime/client_cache.h" #include "runtime/exec_env.h" #include "service/backend_options.h" -#include "util/await.h" +#include "util/misc.h" #include "util/starrocks_metrics.h" #include "util/thread.h" @@ -106,9 +106,6 @@ void BrokerMgr::ping(const TNetworkAddress& addr) { } void BrokerMgr::ping_worker() { - Awaitility await; - // timeout in 5 seconds with 200ms check interval - await.timeout(5 * 1000L * 1000).interval(200 * 1000L); while (!_thread_stop) { std::vector addresses; { @@ -120,8 +117,7 @@ void BrokerMgr::ping_worker() { for (auto& addr : addresses) { ping(addr); } - // block until _thread_stop is true or timeout - await.until([this] { return _thread_stop; }); + nap_sleep(5, [this] { return _thread_stop; }); } } diff --git a/be/src/runtime/fragment_mgr.cpp b/be/src/runtime/fragment_mgr.cpp index 5a0750cf0fee3..5a5108d9a407f 100644 --- a/be/src/runtime/fragment_mgr.cpp +++ b/be/src/runtime/fragment_mgr.cpp @@ -59,7 +59,7 @@ #include "runtime/runtime_filter_cache.h" #include "runtime/runtime_filter_worker.h" #include "service/backend_options.h" -#include "util/await.h" +#include "util/misc.h" #include "util/starrocks_metrics.h" #include "util/stopwatch.hpp" #include "util/thread.h" @@ -542,9 +542,6 @@ void FragmentMgr::close() { void FragmentMgr::cancel_worker() { LOG(INFO) << "FragmentMgr cancel worker start working."; - Awaitility await; - // timeout in 1 second and check every 200ms - await.timeout(1000 * 1000).interval(200 * 1000); while (!_stop) { std::vector to_delete; DateTimeValue now = DateTimeValue::local_time(); @@ -560,9 +557,7 @@ void FragmentMgr::cancel_worker() { cancel(id, PPlanFragmentCancelReason::TIMEOUT); LOG(INFO) << "FragmentMgr cancel worker going to cancel timeout fragment " << print_id(id); } - - // block until timeout or _stop is true - await.until([this] { return _stop; }); + nap_sleep(1, [this] { return _stop; }); } LOG(INFO) << "FragmentMgr cancel worker is going to exit."; } diff --git a/be/src/runtime/profile_report_worker.cpp b/be/src/runtime/profile_report_worker.cpp index cfe5ad00f79b0..f787f23610b06 100644 --- a/be/src/runtime/profile_report_worker.cpp +++ b/be/src/runtime/profile_report_worker.cpp @@ -16,7 +16,7 @@ #include "exec/pipeline/query_context.h" #include "runtime/fragment_mgr.h" -#include "util/await.h" +#include "util/misc.h" namespace starrocks { @@ -113,8 +113,6 @@ void ProfileReportWorker::execute() { int32_t interval = config::profile_report_interval; - Awaitility await; - await.timeout(interval * 1000LL * 1000).interval(200 * 1000L); while (!_stop.load(std::memory_order_consume)) { _start_report_profile(); @@ -122,8 +120,7 @@ void ProfileReportWorker::execute() { LOG(WARNING) << "profile_report_interval config is illegal: " << interval << ", force set to 1"; interval = 1; } - // block until timeout or _stop is true - await.until([this] { return _stop.load(std::memory_order_consume); }); + nap_sleep(interval, [this] { return _stop.load(std::memory_order_consume); }); } LOG(INFO) << "ProfileReportWorker going to exit."; } diff --git a/be/src/runtime/result_buffer_mgr.cpp b/be/src/runtime/result_buffer_mgr.cpp index bba3b4a447c4e..44ada75837678 100644 --- a/be/src/runtime/result_buffer_mgr.cpp +++ b/be/src/runtime/result_buffer_mgr.cpp @@ -38,7 +38,7 @@ #include "gen_cpp/InternalService_types.h" #include "runtime/buffer_control_block.h" -#include "util/await.h" +#include "util/misc.h" #include "util/starrocks_metrics.h" #include "util/thread.h" @@ -146,9 +146,6 @@ Status ResultBufferMgr::cancel_at_time(time_t cancel_time, const TUniqueId& quer void ResultBufferMgr::cancel_thread() { LOG(INFO) << "result buffer manager cancel thread begin."; - Awaitility await; - // check every 200ms until 1 second and timeout - await.timeout(1000 * 1000).interval(200 * 1000); while (!_is_stop) { // get query std::vector query_to_cancel; @@ -170,8 +167,7 @@ void ResultBufferMgr::cancel_thread() { for (auto& i : query_to_cancel) { cancel(i); } - // wait until timeout or _is_stop returns true - await.until([this] { return _is_stop; }); + nap_sleep(1, [this] { return _is_stop; }); } LOG(INFO) << "result buffer manager cancel thread finish."; diff --git a/be/src/runtime/routine_load/data_consumer_pool.cpp b/be/src/runtime/routine_load/data_consumer_pool.cpp index 8f0f8bdc281db..94b70ac143568 100644 --- a/be/src/runtime/routine_load/data_consumer_pool.cpp +++ b/be/src/runtime/routine_load/data_consumer_pool.cpp @@ -37,7 +37,7 @@ #include "common/config.h" #include "data_consumer.h" #include "runtime/routine_load/data_consumer_group.h" -#include "util/await.h" +#include "util/misc.h" #include "util/thread.h" namespace starrocks { @@ -173,17 +173,11 @@ void DataConsumerPool::start_bg_worker() { #endif uint32_t interval = 60; - Awaitility await; - // timeout in `interval` seconds and check every 200ms - await.timeout(interval * 1000L * 1000).interval(200 * 1000); while (true) { if (*is_closed) { return; } - - _clean_idle_consumer_bg(); - // block until timeout or *is_closed is true - await.until([&is_closed] { return *is_closed; }); + nap_sleep(interval, [&is_closed] { return *is_closed; }); } }); Thread::set_thread_name(_clean_idle_consumer_thread, "clean_idle_cm"); diff --git a/be/src/runtime/stream_load/transaction_mgr.cpp b/be/src/runtime/stream_load/transaction_mgr.cpp index 030bf802d9fec..b67c2f80542a7 100644 --- a/be/src/runtime/stream_load/transaction_mgr.cpp +++ b/be/src/runtime/stream_load/transaction_mgr.cpp @@ -48,12 +48,12 @@ #include "runtime/stream_load/stream_load_executor.h" #include "runtime/stream_load/stream_load_pipe.h" #include "runtime/stream_load/transaction_mgr.h" -#include "util/await.h" #include "util/byte_buffer.h" #include "util/debug_util.h" #include "util/defer_op.h" #include "util/json_util.h" #include "util/metrics.h" +#include "util/misc.h" #include "util/starrocks_metrics.h" #include "util/string_parser.hpp" #include "util/thrift_rpc_helper.h" @@ -87,12 +87,9 @@ TransactionMgr::TransactionMgr(ExecEnv* exec_env) : _exec_env(exec_env) { ProfilerRegisterThread(); #endif - Awaitility await; - // check every 200ms until timeout with `interval` seconds - await.timeout(interval * 1000L * 1000L).interval(200 * 1000); while (!_is_stopped.load()) { _clean_stream_context(); - await.until([this] { return _is_stopped.load(); }); + nap_sleep(interval, [this] { return _is_stopped.load(); }); } }); Thread::set_thread_name(_transaction_clean_thread, "transaction_clean"); diff --git a/be/src/service/service_be/starrocks_be.cpp b/be/src/service/service_be/starrocks_be.cpp index 273f27e211da4..51807a6c3b0ce 100644 --- a/be/src/service/service_be/starrocks_be.cpp +++ b/be/src/service/service_be/starrocks_be.cpp @@ -211,7 +211,7 @@ void start_be(const std::vector& paths, bool as_cn) { LOG(INFO) << "BE started successfully"; while (!(k_starrocks_exit.load()) && !(k_starrocks_exit_quick.load())) { - sleep(10); + sleep(1); } int exit_step = 1; diff --git a/be/src/util/CMakeLists.txt b/be/src/util/CMakeLists.txt index 784a83f964176..83563bac128ad 100644 --- a/be/src/util/CMakeLists.txt +++ b/be/src/util/CMakeLists.txt @@ -44,6 +44,7 @@ set(UTIL_FILES starrocks_metrics.cpp mem_info.cpp metrics.cpp + misc.cpp murmur_hash3.cpp network_util.cpp parse_util.cpp diff --git a/be/src/util/misc.cpp b/be/src/util/misc.cpp new file mode 100644 index 0000000000000..b79b48c4a49df --- /dev/null +++ b/be/src/util/misc.cpp @@ -0,0 +1,33 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "util/misc.h" + +#include "util/await.h" + +namespace starrocks { + +#ifdef BE_TEST +static constexpr int sleep_interval = 100 * 1000; // 100ms +#else +static constexpr int sleep_interval = 1 * 1000 * 1000; // 1 seconds +#endif + +void nap_sleep(int32_t sleep_secs, std::function stop_condition) { + Awaitility await; + await.timeout(sleep_secs * 1000LL * 1000LL).interval(sleep_interval); + await.until(stop_condition); +} + +} // namespace starrocks diff --git a/be/src/util/misc.h b/be/src/util/misc.h new file mode 100644 index 0000000000000..05cfd41ca605a --- /dev/null +++ b/be/src/util/misc.h @@ -0,0 +1,24 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +namespace starrocks { + +// take a sleep with small intervals until time out by `sleep_secs` or the `stop_condition()` is true +void nap_sleep(int32_t sleep_secs, std::function stop_condition); + +} // namespace starrocks diff --git a/bin/common.sh b/bin/common.sh index 7fda7e065e3f2..e3a356be11854 100755 --- a/bin/common.sh +++ b/bin/common.sh @@ -82,7 +82,7 @@ export_shared_envvars() { # https://github.com/aws/aws-cli/issues/5623 # https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html - export AWS_EC2_METADATA_DISABLED=false + export AWS_EC2_METADATA_DISABLED=${AWS_EC2_METADATA_DISABLED:-false} # =================================================================================== } diff --git a/docker/dockerfiles/toolchains/toolchains-centos7.Dockerfile b/docker/dockerfiles/toolchains/toolchains-centos7.Dockerfile index 968a42d367e90..0b3b82a746e77 100644 --- a/docker/dockerfiles/toolchains/toolchains-centos7.Dockerfile +++ b/docker/dockerfiles/toolchains/toolchains-centos7.Dockerfile @@ -1,17 +1,21 @@ # Build toolchains on centos7, dev-env image can be built based on this image for centos7 -# DOCKER_BUILDKIT=1 docker build --rm=true -f docker/dockerfiles/toolchains/centos7-toolchains.Dockerfile -t toolchains-centos7:latest docker/dockerfiles/toolchains/ +# DOCKER_BUILDKIT=1 docker build --rm=true -f docker/dockerfiles/toolchains/toolchains-centos7.Dockerfile -t toolchains-centos7:latest docker/dockerfiles/toolchains/ -ARG GCC_INSTALL_HOME=/opt/gcc/usr +ARG GCC_INSTALL_HOME=/opt/rh/gcc-toolset-10/root/usr ARG GCC_DOWNLOAD_URL=https://ftp.gnu.org/gnu/gcc/gcc-10.3.0/gcc-10.3.0.tar.gz ARG CMAKE_INSTALL_HOME=/opt/cmake ARG MAVEN_VERSION=3.6.3 ARG MAVEN_INSTALL_HOME=/opt/maven +# Can't upgrade to a later version, due to incompatible changes between 2.31 and 2.32 +ARG BINUTILS_DOWNLOAD_URL=https://ftp.gnu.org/gnu/binutils/binutils-2.30.tar.bz2 -FROM centos:centos7 as gcc-builder +FROM centos:centos7 as base-builder +RUN yum install -y gcc gcc-c++ make automake curl wget gzip gunzip zip bzip2 file texinfo && yum clean metadata + + +FROM base-builder as gcc-builder ARG GCC_INSTALL_HOME ARG GCC_DOWNLOAD_URL - -RUN yum install -y gcc gcc-c++ make automake curl wget gzip gunzip zip bzip2 file texinfo && yum clean metadata RUN mkdir -p /workspace/gcc && \ cd /workspace/gcc && \ wget --no-check-certificate $GCC_DOWNLOAD_URL -O ../gcc.tar.gz && \ @@ -23,6 +27,17 @@ RUN cd /workspace/gcc && mkdir -p /workspace/installed && make DESTDIR=/workspac strip /workspace/installed/${GCC_INSTALL_HOME}/bin/* /workspace/installed/${GCC_INSTALL_HOME}/libexec/gcc/*/*/{cc1,cc1plus,collect2,lto1} +FROM base-builder as binutils-builder +ARG BINUTILS_DOWNLOAD_URL +# build binutils and only install gnu as +RUN mkdir -p /workspace/binutils && \ + cd /workspace/binutils && \ + wget --no-check-certificate $BINUTILS_DOWNLOAD_URL -O ../binutils.tar.bz2 && \ + tar -xjf ../binutils.tar.bz2 --strip-components=1 && \ + ./configure --prefix=/usr && \ + make -j `nproc` && \ + mkdir -p /workspace/installed && cd gas && make DESTDIR=/workspace/installed install + FROM centos:centos7 ARG GCC_INSTALL_HOME @@ -38,6 +53,8 @@ RUN yum install -y epel-release && yum install -y wget unzip bzip2 patch bison b # install gcc COPY --from=gcc-builder /workspace/installed/ / +# install binutils +COPY --from=binutils-builder /workspace/installed/ / # install cmake RUN ARCH=`uname -m` && mkdir -p $CMAKE_INSTALL_HOME && cd $CMAKE_INSTALL_HOME && \ curl -s -k https://cmake.org/files/v3.22/cmake-3.22.4-linux-${ARCH}.tar.gz | tar -xzf - --strip-components=1 && \ diff --git a/docker/dockerfiles/toolchains/toolchains-ubuntu.Dockerfile b/docker/dockerfiles/toolchains/toolchains-ubuntu.Dockerfile index 2378916567e1c..e9d45fbb09205 100644 --- a/docker/dockerfiles/toolchains/toolchains-ubuntu.Dockerfile +++ b/docker/dockerfiles/toolchains/toolchains-ubuntu.Dockerfile @@ -7,7 +7,7 @@ FROM ubuntu:22.04 RUN apt-get update -y && \ apt-get install --no-install-recommends -y \ automake binutils-dev bison byacc ccache flex libiberty-dev libtool maven zip python3 python-is-python3 make cmake gcc g++ default-jdk git patch lld bzip2 \ - wget unzip curl vim tree net-tools openssh-client && \ + wget unzip curl vim tree net-tools openssh-client xz-utils && \ DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata && \ rm -rf /var/lib/apt/lists/* diff --git a/docs/assets/3.11-1.png b/docs/assets/3.11-1.png new file mode 100644 index 0000000000000..5eccad1a9195a Binary files /dev/null and b/docs/assets/3.11-1.png differ diff --git a/docs/assets/3.11-2.png b/docs/assets/3.11-2.png new file mode 100644 index 0000000000000..b56318d5729eb Binary files /dev/null and b/docs/assets/3.11-2.png differ diff --git a/docs/assets/3.11-3.png b/docs/assets/3.11-3.png new file mode 100644 index 0000000000000..80b03bc11eb4d Binary files /dev/null and b/docs/assets/3.11-3.png differ diff --git a/docs/assets/3.11-4.png b/docs/assets/3.11-4.png new file mode 100644 index 0000000000000..df0d941711db0 Binary files /dev/null and b/docs/assets/3.11-4.png differ diff --git a/docs/assets/3.11-5.png b/docs/assets/3.11-5.png new file mode 100644 index 0000000000000..3774df4f9adf4 Binary files /dev/null and b/docs/assets/3.11-5.png differ diff --git a/docs/assets/3.11-6.png b/docs/assets/3.11-6.png new file mode 100644 index 0000000000000..c8852fcaa3bca Binary files /dev/null and b/docs/assets/3.11-6.png differ diff --git a/docs/assets/3.11-7.png b/docs/assets/3.11-7.png new file mode 100644 index 0000000000000..708e4b00b58b6 Binary files /dev/null and b/docs/assets/3.11-7.png differ diff --git a/docs/assets/3.11-8.png b/docs/assets/3.11-8.png new file mode 100644 index 0000000000000..dd9d317274b37 Binary files /dev/null and b/docs/assets/3.11-8.png differ diff --git a/docs/loading/CloudCanal.md b/docs/loading/CloudCanal.md new file mode 100644 index 0000000000000..19a6ec347f86c --- /dev/null +++ b/docs/loading/CloudCanal.md @@ -0,0 +1,90 @@ +# Load data using CloudCanal + +## Introduction + +CloudCanal Community Edition is a free data migration and synchronization platform published by [ClouGence Co., Ltd](https://www.cloudcanalx.com) that integrates Schema Migration, Full Data Migration, verification, Correction, and real-time Incremental Synchronization. +CloudCanal help users build a modern data stack in a simple way. +![image.png](../assets/3.11-1.png) + +## Download + +[CloudCanal Download Link](https://www.cloudcanalx.com) + +[CloudCanal Quick Start](https://www.cloudcanalx.com/us/cc-doc/quick/quick_start) + +## Function Description + +- It is highly recommended to utilize CloudCanal version 2.2.5.0 or higher for efficient data import into StarRocks. +- It is advisable to exercise control over the ingestion frequency when using CloudCanal to import **incremental data** into StarRocks. The default import frequency for writing data from CloudCanal to StarRocks can be adjusted using the `realFlushPauseSec` parameter, which is set to 10 seconds by default. +- In the current community edition with a maximum memory configuration of 2GB, if DataJobs encounter OOM exceptions or significant GC pauses, it is recommended to reduce the batch size to minimize memory usage. + - For Full DataTask, you can adjust the `fullBatchSize` and `fullRingBufferSize` parameters. + - For Incremental DataTask, the `increBatchSize` and `increRingBufferSize` parameters can be adjusted accordingly. +- Supported Source endpoints and features: + + | Source Endpoints \ Feature | Schema Migration | Full Data | Incremental | Verification | + | --- | --- | --- | --- | --- | + | Oracle | Yes | Yes | Yes | Yes | + | PostgreSQL | Yes | Yes | Yes | Yes | + | Greenplum | Yes | Yes | No | Yes | + | MySQL | Yes | Yes | Yes | Yes | + | Kafka | No | No | Yes | No | + | OceanBase | Yes | Yes | Yes | Yes | + | PolarDb for MySQL | Yes | Yes | Yes | Yes | + | Db2 | Yes | Yes | Yes | Yes | + +## Typical example + +CloudCanal allows users to perform operations in a visual interface where users can seamlessly add DataSources and create DataJobs through a visual interface. This enables automated schema migration, full data migration, and real-time incremental synchronization. The following example demonstrates how to migrate and synchronize data from MySQL to StarRocks. The procedures are similar for data synchronization between other data sources and StarRocks. + +### Prerequisites + +First, refer to the [CloudCanal Quick Start](https://www.cloudcanalx.com/us/cc-doc/quick/quick_start) to complete the installation and deployment of the CloudCanal Community Edition. + +### Add DataSource + +- Log in to the CloudCanal platform +- Go to **DataSource Management** -> **Add DataSource** +- Select **StarRocks** from the options for self-built databases + +![image.png](../assets/3.11-2.png) + +> Tips: +> +> - Client Address: The address of the StarRocks server's MySQL client service port. CloudCanal primarily uses this address to query metadata information of the database tables. +> +> - HTTP Address: The HTTP address is mainly used to receive data import requests from CloudCanal. + +### Create DataJob + +Once the DataSource has been added successfully, you can follow these steps to create data migration and synchronization DataJob. + +- Go to **DataJob Management** -> **Create DataJob** in the CloudCanal +- Select the source and target databases for the DataJob +- Click Next Step + +![image.png](../assets/3.11-3.png) + +- Choose **Incremental** and enable **Full Data** +- Select DDL Sync +- Click Next Step + +![image.png](../assets/3.11-4.png) + +- Select the source tables you want to subscribe to. Please note that the target StarRocks tables automatically after Schema Migration are primary key tables, so source tables without a primary key are not currently supported** + +- Click Next Step + +![image.png](../assets/3.11-5.png) + +- Configure the column mapping +- Click Next Step + +![image.png](../assets/3.11-6.png) + +- Create DataJob + +![image.png](../assets/3.11-7.png) + +- Check the status of DataJob. The DataJob will automatically go through the stages of Schema Migration, Full Data, and Incremental after it has been created + +![image.png](../assets/3.11-8.png) diff --git a/fe/fe-core/pom.xml b/fe/fe-core/pom.xml index dbcff132368de..d26253ae8c198 100644 --- a/fe/fe-core/pom.xml +++ b/fe/fe-core/pom.xml @@ -808,6 +808,13 @@ under the License. test + + + org.apache.ranger + ranger-plugins-common + 2.4.0 + + diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/DeltaLakePartitionKey.java b/fe/fe-core/src/main/java/com/starrocks/catalog/DeltaLakePartitionKey.java index 505aa9d8bca0f..41361099010a3 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/DeltaLakePartitionKey.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/DeltaLakePartitionKey.java @@ -14,13 +14,17 @@ package com.starrocks.catalog; +import com.google.common.collect.ImmutableList; + +import java.util.List; + public class DeltaLakePartitionKey extends PartitionKey implements NullablePartitionKey { public DeltaLakePartitionKey() { super(); } @Override - public String nullPartitionValue() { - return DeltaLakeTable.PARTITION_NULL_VALUE; + public List nullPartitionValueList() { + return ImmutableList.of(DeltaLakeTable.PARTITION_NULL_VALUE); } } diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/HivePartitionKey.java b/fe/fe-core/src/main/java/com/starrocks/catalog/HivePartitionKey.java index 85aba0095a367..02f86f60552c5 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/HivePartitionKey.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/HivePartitionKey.java @@ -15,15 +15,18 @@ package com.starrocks.catalog; +import com.google.common.collect.ImmutableList; import com.starrocks.connector.hive.HiveMetaClient; +import java.util.List; + public class HivePartitionKey extends PartitionKey implements NullablePartitionKey { public HivePartitionKey() { super(); } @Override - public String nullPartitionValue() { - return HiveMetaClient.PARTITION_NULL_VALUE; + public List nullPartitionValueList() { + return ImmutableList.of(HiveMetaClient.PARTITION_NULL_VALUE); } } diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/HudiPartitionKey.java b/fe/fe-core/src/main/java/com/starrocks/catalog/HudiPartitionKey.java index e3d269feb9f1f..231ce870e8e73 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/HudiPartitionKey.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/HudiPartitionKey.java @@ -15,15 +15,18 @@ package com.starrocks.catalog; +import com.google.common.collect.ImmutableList; import com.starrocks.connector.hive.HiveMetaClient; +import java.util.List; + public class HudiPartitionKey extends PartitionKey implements NullablePartitionKey { public HudiPartitionKey() { super(); } @Override - public String nullPartitionValue() { - return HiveMetaClient.HUDI_PARTITION_NULL_VALUE; + public List nullPartitionValueList() { + return ImmutableList.of(HiveMetaClient.PARTITION_NULL_VALUE, HiveMetaClient.HUDI_PARTITION_NULL_VALUE); } } diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/IcebergPartitionKey.java b/fe/fe-core/src/main/java/com/starrocks/catalog/IcebergPartitionKey.java index c08f33bff59c7..4bbfb6236551c 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/IcebergPartitionKey.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/IcebergPartitionKey.java @@ -15,15 +15,18 @@ package com.starrocks.catalog; +import com.google.common.collect.ImmutableList; import com.starrocks.connector.iceberg.IcebergApiConverter; +import java.util.List; + public class IcebergPartitionKey extends PartitionKey implements NullablePartitionKey { public IcebergPartitionKey() { super(); } @Override - public String nullPartitionValue() { - return IcebergApiConverter.PARTITION_NULL_VALUE; + public List nullPartitionValueList() { + return ImmutableList.of(IcebergApiConverter.PARTITION_NULL_VALUE); } } diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/JDBCPartitionKey.java b/fe/fe-core/src/main/java/com/starrocks/catalog/JDBCPartitionKey.java index 4809c68058c62..0b636dee03759 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/JDBCPartitionKey.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/JDBCPartitionKey.java @@ -15,6 +15,9 @@ package com.starrocks.catalog; +import com.google.common.collect.ImmutableList; + +import java.util.List; public class JDBCPartitionKey extends PartitionKey implements NullablePartitionKey { public JDBCPartitionKey() { @@ -22,7 +25,7 @@ public JDBCPartitionKey() { } @Override - public String nullPartitionValue() { - return JDBCTable.PARTITION_NULL_VALUE; + public List nullPartitionValueList() { + return ImmutableList.of(JDBCTable.PARTITION_NULL_VALUE); } } diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/NullablePartitionKey.java b/fe/fe-core/src/main/java/com/starrocks/catalog/NullablePartitionKey.java index 8a439a6fd87c4..a55518afbcf9c 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/NullablePartitionKey.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/NullablePartitionKey.java @@ -15,8 +15,12 @@ package com.starrocks.catalog; +import com.google.common.collect.ImmutableList; + +import java.util.List; + public interface NullablePartitionKey { - default String nullPartitionValue() { - return ""; + default List nullPartitionValueList() { + return ImmutableList.of(""); } } diff --git a/fe/fe-core/src/main/java/com/starrocks/catalog/PartitionKey.java b/fe/fe-core/src/main/java/com/starrocks/catalog/PartitionKey.java index 1f36477f338e9..1b4b3f2b1b7ba 100644 --- a/fe/fe-core/src/main/java/com/starrocks/catalog/PartitionKey.java +++ b/fe/fe-core/src/main/java/com/starrocks/catalog/PartitionKey.java @@ -64,6 +64,10 @@ public class PartitionKey implements Comparable, Writable { private static final Logger LOG = LogManager.getLogger(PartitionKey.class); private List keys; private List types; + // Records the string corresponding to partition value when the partition value is null + // for hive, it's __HIVE_DEFAULT_PARTITION__ + // for hudi, it's __HIVE_DEFAULT_PARTITION__ or default + private String nullPartitionValue = ""; private static final DateLiteral SHADOW_DATE_LITERAL = new DateLiteral(0, 0, 0); private static final DateLiteral SHADOW_DATETIME_LITERAL = new DateLiteral(0, 0, 0, 0, 0, 0, 0); @@ -80,6 +84,14 @@ public PartitionKey(List keyValue, List keyType) { types = keyType; } + public void setNullPartitionValue(String nullPartitionValue) { + this.nullPartitionValue = nullPartitionValue; + } + + public String getNullPartitionValue() { + return nullPartitionValue; + } + // Factory methods public static PartitionKey createInfinityPartitionKey(List columns, boolean isMax) throws AnalysisException { diff --git a/fe/fe-core/src/main/java/com/starrocks/common/Config.java b/fe/fe-core/src/main/java/com/starrocks/common/Config.java index 32d18fb59ed73..95b91ae39c511 100644 --- a/fe/fe-core/src/main/java/com/starrocks/common/Config.java +++ b/fe/fe-core/src/main/java/com/starrocks/common/Config.java @@ -1562,6 +1562,13 @@ public class Config extends ConfigBase { @ConfField(mutable = true) public static boolean authorization_enable_admin_user_protection = false; + /** + * When set to true, guava cache is used to cache the privilege collection + * for a specified user. + */ + @ConfField(mutable = true) + public static boolean authorization_enable_priv_collection_cache = true; + /** * In some cases, some tablets may have all replicas damaged or lost. * At this time, the data has been lost, and the damaged tablets @@ -2461,4 +2468,7 @@ public class Config extends ConfigBase { */ @ConfField(mutable = true) public static int primary_key_disk_schedule_time = 3600; // 1h + + @ConfField(mutable = true) + public static String access_control = "native"; } diff --git a/fe/fe-core/src/main/java/com/starrocks/connector/PartitionUtil.java b/fe/fe-core/src/main/java/com/starrocks/connector/PartitionUtil.java index 55faab3617ede..0ac48524ec46c 100644 --- a/fe/fe-core/src/main/java/com/starrocks/connector/PartitionUtil.java +++ b/fe/fe-core/src/main/java/com/starrocks/connector/PartitionUtil.java @@ -122,7 +122,8 @@ public static PartitionKey createPartitionKey(List values, List if (rawValue == null) { rawValue = "null"; } - if (((NullablePartitionKey) partitionKey).nullPartitionValue().equals(rawValue)) { + if (((NullablePartitionKey) partitionKey).nullPartitionValueList().contains(rawValue)) { + partitionKey.setNullPartitionValue(rawValue); exprValue = NullLiteral.create(type); } else { exprValue = LiteralExpr.create(rawValue, type); @@ -191,7 +192,7 @@ public static List fromPartitionKey(PartitionKey key) { List values = new ArrayList<>(literalValues.size()); for (LiteralExpr value : literalValues) { if (value instanceof NullLiteral) { - values.add(((NullablePartitionKey) key).nullPartitionValue()); + values.add(key.getNullPartitionValue()); } else if (value instanceof BoolLiteral) { BoolLiteral boolValue = ((BoolLiteral) value); values.add(String.valueOf(boolValue.getValue())); diff --git a/fe/fe-core/src/main/java/com/starrocks/connector/parser/trino/ComplexFunctionCallTransformer.java b/fe/fe-core/src/main/java/com/starrocks/connector/parser/trino/ComplexFunctionCallTransformer.java index 2461d86c0e36a..d68911b2ad462 100644 --- a/fe/fe-core/src/main/java/com/starrocks/connector/parser/trino/ComplexFunctionCallTransformer.java +++ b/fe/fe-core/src/main/java/com/starrocks/connector/parser/trino/ComplexFunctionCallTransformer.java @@ -16,6 +16,7 @@ import com.google.common.collect.ImmutableList; import com.starrocks.analysis.CastExpr; +import com.starrocks.analysis.CollectionElementExpr; import com.starrocks.analysis.Expr; import com.starrocks.analysis.FunctionCallExpr; import com.starrocks.analysis.IntLiteral; @@ -71,6 +72,11 @@ public static Expr transform(String functionName, Expr... args) { throw new SemanticException("date_diff function must have 3 arguments"); } return new FunctionCallExpr("date_diff", ImmutableList.of(args[0], args[2], args[1])); + } else if (functionName.equalsIgnoreCase("element_at")) { + if (args.length != 2) { + throw new SemanticException("element_at function must have 2 arguments"); + } + return new CollectionElementExpr(args[0], args[1]); } return null; } diff --git a/fe/fe-core/src/main/java/com/starrocks/privilege/AccessControl.java b/fe/fe-core/src/main/java/com/starrocks/privilege/AccessControl.java index 098b0eb8de199..cd37dfc680408 100644 --- a/fe/fe-core/src/main/java/com/starrocks/privilege/AccessControl.java +++ b/fe/fe-core/src/main/java/com/starrocks/privilege/AccessControl.java @@ -14,9 +14,12 @@ package com.starrocks.privilege; +import com.starrocks.analysis.Expr; import com.starrocks.analysis.TableName; import com.starrocks.catalog.Database; import com.starrocks.catalog.Function; +import com.starrocks.catalog.Type; +import com.starrocks.qe.ConnectContext; import com.starrocks.sql.ast.UserIdentity; import java.util.Set; @@ -130,4 +133,12 @@ default void checkStorageVolumeAction(UserIdentity currentUser, Set roleId default void checkAnyActionOnStorageVolume(UserIdentity currentUser, Set roleIds, String storageVolume) { AccessDeniedException.reportAccessDenied("ANY", ObjectType.STORAGE_VOLUME, storageVolume); } + + default Expr getColumnMaskingPolicy(ConnectContext currentUser, TableName tableName, String columnName, Type type) { + return null; + } + + default Expr getRowAccessPolicy(ConnectContext currentUser, TableName tableName) { + return null; + } } \ No newline at end of file diff --git a/fe/fe-core/src/main/java/com/starrocks/privilege/AccessControlProvider.java b/fe/fe-core/src/main/java/com/starrocks/privilege/AccessControlProvider.java index db82d644b258e..6a2b6e1cba318 100644 --- a/fe/fe-core/src/main/java/com/starrocks/privilege/AccessControlProvider.java +++ b/fe/fe-core/src/main/java/com/starrocks/privilege/AccessControlProvider.java @@ -14,15 +14,21 @@ package com.starrocks.privilege; +import com.starrocks.catalog.InternalCatalog; import com.starrocks.sql.analyzer.AuthorizerStmtVisitor; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + public class AccessControlProvider { protected final AuthorizerStmtVisitor privilegeCheckerVisitor; - protected final AccessControl accessControl; + public final Map catalogToAccessControl; public AccessControlProvider(AuthorizerStmtVisitor privilegeCheckerVisitor, AccessControl accessControl) { this.privilegeCheckerVisitor = privilegeCheckerVisitor; - this.accessControl = accessControl; + + this.catalogToAccessControl = new ConcurrentHashMap<>(); + this.catalogToAccessControl.put(InternalCatalog.DEFAULT_INTERNAL_CATALOG_NAME, accessControl); } public AuthorizerStmtVisitor getPrivilegeCheckerVisitor() { @@ -30,6 +36,23 @@ public AuthorizerStmtVisitor getPrivilegeCheckerVisitor() { } public AccessControl getAccessControlOrDefault(String catalogName) { - return this.accessControl; + if (catalogName == null) { + return catalogToAccessControl.get(InternalCatalog.DEFAULT_INTERNAL_CATALOG_NAME); + } + + AccessControl catalogAccessController = catalogToAccessControl.get(catalogName); + if (catalogAccessController != null) { + return catalogAccessController; + } else { + return catalogToAccessControl.get(InternalCatalog.DEFAULT_INTERNAL_CATALOG_NAME); + } + } + + public void setAccessControl(String catalog, AccessControl accessControl) { + catalogToAccessControl.put(catalog, accessControl); + } + + public void removeAccessControl(String catalog) { + catalogToAccessControl.remove(catalog); } } diff --git a/fe/fe-core/src/main/java/com/starrocks/privilege/AuthorizationMgr.java b/fe/fe-core/src/main/java/com/starrocks/privilege/AuthorizationMgr.java index 2c82594561152..1d240b793ed55 100644 --- a/fe/fe-core/src/main/java/com/starrocks/privilege/AuthorizationMgr.java +++ b/fe/fe-core/src/main/java/com/starrocks/privilege/AuthorizationMgr.java @@ -790,7 +790,11 @@ public short getProviderPluginVersion() { protected PrivilegeCollectionV2 mergePrivilegeCollection(UserIdentity userIdentity, Set roleIds) throws PrivilegeException { try { - return ctxToMergedPrivilegeCollections.get(new Pair<>(userIdentity, roleIds)); + if (Config.authorization_enable_priv_collection_cache) { + return ctxToMergedPrivilegeCollections.get(new Pair<>(userIdentity, roleIds)); + } else { + return loadPrivilegeCollection(userIdentity, roleIds); + } } catch (ExecutionException e) { String errMsg = String.format("failed merge privilege collection on %s with roles %s", userIdentity, roleIds); PrivilegeException exception = new PrivilegeException(errMsg); diff --git a/fe/fe-core/src/main/java/com/starrocks/privilege/PrivilegeType.java b/fe/fe-core/src/main/java/com/starrocks/privilege/PrivilegeType.java index 3aa39593794d7..b7fc3360de86d 100644 --- a/fe/fe-core/src/main/java/com/starrocks/privilege/PrivilegeType.java +++ b/fe/fe-core/src/main/java/com/starrocks/privilege/PrivilegeType.java @@ -43,6 +43,9 @@ public String name() { } } + // ANY is not in VALID_PRIVILEGE_TYPE, ANY is not a Privilege Type that users can use directly + public static final PrivilegeType ANY = new PrivilegeType(0, "ANY"); + public static final PrivilegeType GRANT = new PrivilegeType(1, "GRANT"); public static final PrivilegeType NODE = new PrivilegeType(2, "NODE"); public static final PrivilegeType OPERATE = new PrivilegeType(3, "OPERATE"); diff --git a/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/RangerStarRocksAccessRequest.java b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/RangerStarRocksAccessRequest.java new file mode 100644 index 0000000000000..22db329695ea4 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/RangerStarRocksAccessRequest.java @@ -0,0 +1,58 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.starrocks.privilege.ranger; + +import com.starrocks.catalog.MaterializedView; +import com.starrocks.sql.ast.UserIdentity; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.ranger.plugin.policyengine.RangerAccessRequestImpl; +import org.apache.ranger.plugin.policyengine.RangerAccessResourceImpl; + +import java.util.Arrays; +import java.util.Date; +import java.util.HashSet; +import java.util.Set; + +public class RangerStarRocksAccessRequest extends RangerAccessRequestImpl { + private static final Logger LOG = LogManager.getLogger(MaterializedView.class); + + private RangerStarRocksAccessRequest() { + } + + public static RangerStarRocksAccessRequest createAccessRequest(RangerAccessResourceImpl resource, UserIdentity user, + String accessType) { + UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user.getQualifiedUser()); + String[] groups = ugi.getGroupNames(); + Set userGroups = null; + if (groups != null && groups.length > 0) { + userGroups = new HashSet<>(Arrays.asList(groups)); + } + RangerStarRocksAccessRequest request = new RangerStarRocksAccessRequest(); + request.setUser(user.getQualifiedUser()); + request.setUserGroups(userGroups); + request.setAccessType(accessType); + request.setResource(resource); + request.setClientIPAddress(user.getHost()); + request.setClientType("starrocks"); + request.setClusterName("starrocks"); + request.setAccessTime(new Date()); + + LOG.debug("RangerStarRocksAccessRequest | " + request.toString()); + + return request; + } +} \ No newline at end of file diff --git a/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/SecurityPolicyRewriteRule.java b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/SecurityPolicyRewriteRule.java new file mode 100644 index 0000000000000..d8e5244d3d7e5 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/SecurityPolicyRewriteRule.java @@ -0,0 +1,94 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.starrocks.privilege.ranger; + +import com.starrocks.analysis.Expr; +import com.starrocks.analysis.SlotRef; +import com.starrocks.analysis.TableName; +import com.starrocks.catalog.Column; +import com.starrocks.catalog.InternalCatalog; +import com.starrocks.common.Config; +import com.starrocks.qe.ConnectContext; +import com.starrocks.sql.analyzer.Authorizer; +import com.starrocks.sql.ast.QueryStatement; +import com.starrocks.sql.ast.Relation; +import com.starrocks.sql.ast.SelectList; +import com.starrocks.sql.ast.SelectListItem; +import com.starrocks.sql.ast.SelectRelation; +import com.starrocks.sql.ast.TableRelation; +import com.starrocks.sql.ast.ViewRelation; +import com.starrocks.sql.parser.NodePosition; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class SecurityPolicyRewriteRule { + public static QueryStatement buildView(ConnectContext context, Relation relation, TableName tableName) { + if (!Config.access_control.equals("ranger") && + (tableName.getCatalog() == null + || tableName.getCatalog().equals(InternalCatalog.DEFAULT_INTERNAL_CATALOG_NAME))) { + return null; + } + + if (relation instanceof TableRelation && ((TableRelation) relation).isSyncMVQuery()) { + return null; + } + + List columns; + if (relation instanceof ViewRelation) { + ViewRelation viewRelation = (ViewRelation) relation; + columns = viewRelation.getView().getBaseSchema(); + } else if (relation instanceof TableRelation) { + TableRelation tableRelation = (TableRelation) relation; + columns = tableRelation.getTable().getBaseSchema(); + } else { + return null; + } + + boolean hasPolicy = false; + List selectListItemList = new ArrayList<>(); + for (Column column : columns) { + Expr maskingExpr = Authorizer.getColumnMaskingPolicy( + context, tableName, column.getName(), column.getType()); + + if (maskingExpr != null) { + hasPolicy = true; + selectListItemList.add(new SelectListItem(maskingExpr, column.getName(), NodePosition.ZERO)); + } else { + selectListItemList.add(new SelectListItem(new SlotRef(tableName, column.getName()), column.getName(), + NodePosition.ZERO)); + } + } + + Expr rowAccessExpr = Authorizer.getRowAccessPolicy(context, tableName); + if (rowAccessExpr != null) { + hasPolicy = true; + } + + if (!hasPolicy) { + return null; + } + + SelectRelation selectRelation = new SelectRelation( + new SelectList(selectListItemList, false), + relation, + rowAccessExpr, + null, + null); + selectRelation.setOrderBy(Collections.emptyList()); + return new QueryStatement(selectRelation); + } +} diff --git a/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/hive/HiveAccessType.java b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/hive/HiveAccessType.java new file mode 100644 index 0000000000000..1ab2bb551ef31 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/hive/HiveAccessType.java @@ -0,0 +1,19 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.starrocks.privilege.ranger.hive; + +public enum HiveAccessType { + NONE, CREATE, ALTER, DROP, INDEX, LOCK, SELECT, UPDATE, USE, READ, WRITE, ALL, SERVICEADMIN, TEMPUDFADMIN; +} diff --git a/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/hive/RangerHiveAccessControl.java b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/hive/RangerHiveAccessControl.java new file mode 100644 index 0000000000000..22a5677e65b91 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/hive/RangerHiveAccessControl.java @@ -0,0 +1,164 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.starrocks.privilege.ranger.hive; + +import com.google.common.collect.Lists; +import com.starrocks.analysis.Expr; +import com.starrocks.analysis.TableName; +import com.starrocks.catalog.Type; +import com.starrocks.privilege.AccessControl; +import com.starrocks.privilege.AccessDeniedException; +import com.starrocks.privilege.ObjectType; +import com.starrocks.privilege.PrivilegeType; +import com.starrocks.privilege.ranger.RangerStarRocksAccessRequest; +import com.starrocks.privilege.ranger.starrocks.RangerStarRocksResource; +import com.starrocks.qe.ConnectContext; +import com.starrocks.sql.ast.UserIdentity; +import com.starrocks.sql.parser.SqlParser; +import org.apache.commons.lang.StringUtils; +import org.apache.ranger.plugin.audit.RangerDefaultAuditHandler; +import org.apache.ranger.plugin.model.RangerPolicy; +import org.apache.ranger.plugin.model.RangerServiceDef; +import org.apache.ranger.plugin.policyengine.RangerAccessResult; +import org.apache.ranger.plugin.policyengine.RangerPolicyEngine; +import org.apache.ranger.plugin.service.RangerBasePlugin; + +import java.util.Set; + +import static java.util.Locale.ENGLISH; + +public class RangerHiveAccessControl implements AccessControl { + private final RangerBasePlugin rangerPlugin; + + public RangerHiveAccessControl(String serviceName) { + rangerPlugin = new RangerBasePlugin("hive", serviceName, "hive"); + rangerPlugin.init(); + rangerPlugin.setResultProcessor(new RangerDefaultAuditHandler()); + } + + @Override + public void checkDbAction(UserIdentity currentUser, Set roleIds, String catalogName, String db, + PrivilegeType privilegeType) { + RangerHiveResource resource = new RangerHiveResource(ObjectType.DATABASE, Lists.newArrayList(db)); + if (!hasPermission(resource, currentUser, privilegeType)) { + AccessDeniedException.reportAccessDenied(privilegeType.name(), ObjectType.DATABASE, db); + } + } + + @Override + public void checkAnyActionOnDb(UserIdentity currentUser, Set roleIds, String catalogName, String db) { + RangerHiveResource resource = new RangerHiveResource(ObjectType.DATABASE, Lists.newArrayList(db)); + if (!hasPermission(resource, currentUser, PrivilegeType.ANY)) { + AccessDeniedException.reportAccessDenied(PrivilegeType.ANY.name(), ObjectType.DATABASE, db); + } + } + + @Override + public void checkTableAction(UserIdentity currentUser, Set roleIds, TableName tableName, PrivilegeType privilegeType) { + RangerHiveResource resource = new RangerHiveResource(ObjectType.TABLE, + Lists.newArrayList(tableName.getDb(), tableName.getTbl())); + if (!hasPermission(resource, currentUser, privilegeType)) { + AccessDeniedException.reportAccessDenied(privilegeType.name(), ObjectType.TABLE, tableName.getTbl()); + } + } + + @Override + public void checkAnyActionOnTable(UserIdentity currentUser, Set roleIds, TableName tableName) { + RangerHiveResource resource = new RangerHiveResource(ObjectType.TABLE, + Lists.newArrayList(tableName.getDb(), tableName.getTbl())); + if (!hasPermission(resource, currentUser, PrivilegeType.ANY)) { + AccessDeniedException.reportAccessDenied(PrivilegeType.ANY.name(), ObjectType.TABLE, tableName.getTbl()); + } + } + + @Override + public Expr getColumnMaskingPolicy(ConnectContext currentUser, TableName tableName, String columnName, Type type) { + RangerStarRocksAccessRequest request = RangerStarRocksAccessRequest.createAccessRequest( + new RangerStarRocksResource(tableName.getCatalog(), tableName.getDb(), tableName.getTbl(), columnName), + currentUser.getCurrentUserIdentity(), PrivilegeType.SELECT.name().toLowerCase(ENGLISH)); + + RangerAccessResult result = rangerPlugin.evalDataMaskPolicies(request, null); + if (result.isMaskEnabled()) { + String maskType = result.getMaskType(); + RangerServiceDef.RangerDataMaskTypeDef maskTypeDef = result.getMaskTypeDef(); + String transformer = null; + + if (maskTypeDef != null) { + transformer = maskTypeDef.getTransformer(); + } + + if (StringUtils.equalsIgnoreCase(maskType, RangerPolicy.MASK_TYPE_NULL)) { + transformer = "NULL"; + } else if (StringUtils.equalsIgnoreCase(maskType, RangerPolicy.MASK_TYPE_CUSTOM)) { + String maskedValue = result.getMaskedValue(); + + if (maskedValue == null) { + transformer = "NULL"; + } else { + transformer = maskedValue; + } + } + + if (StringUtils.isNotEmpty(transformer)) { + transformer = transformer.replace("{col}", columnName).replace("{type}", type.toSql()); + } + + return SqlParser.parseSqlToExpr(transformer, currentUser.getSessionVariable().getSqlMode()); + } else { + return null; + } + } + + @Override + public Expr getRowAccessPolicy(ConnectContext currentUser, TableName tableName) { + RangerStarRocksAccessRequest request = RangerStarRocksAccessRequest.createAccessRequest( + new RangerStarRocksResource(ObjectType.TABLE, + Lists.newArrayList(tableName.getCatalog(), tableName.getDb(), tableName.getTbl())), + currentUser.getCurrentUserIdentity(), PrivilegeType.SELECT.name().toLowerCase(ENGLISH)); + RangerAccessResult result = rangerPlugin.evalRowFilterPolicies(request, null); + if (result != null && result.isRowFilterEnabled()) { + return SqlParser.parseSqlToExpr(result.getFilterExpr(), currentUser.getSessionVariable().getSqlMode()); + } else { + return null; + } + } + + public HiveAccessType convertToAccessType(PrivilegeType privilegeType) { + if (privilegeType == PrivilegeType.SELECT) { + return HiveAccessType.SELECT; + } else { + return HiveAccessType.NONE; + } + } + + private boolean hasPermission(RangerHiveResource resource, UserIdentity user, PrivilegeType privilegeType) { + String accessType; + if (privilegeType.equals(PrivilegeType.ANY)) { + accessType = RangerPolicyEngine.ANY_ACCESS; + } else { + HiveAccessType hiveAccessType = convertToAccessType(privilegeType); + accessType = hiveAccessType.name().toLowerCase(ENGLISH); + } + + RangerStarRocksAccessRequest request = RangerStarRocksAccessRequest.createAccessRequest(resource, user, accessType); + + RangerAccessResult result = rangerPlugin.isAccessAllowed(request); + if (result != null && result.getIsAllowed()) { + return true; + } else { + return false; + } + } +} diff --git a/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/hive/RangerHiveResource.java b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/hive/RangerHiveResource.java new file mode 100644 index 0000000000000..538b0fec3b1b1 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/hive/RangerHiveResource.java @@ -0,0 +1,32 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.starrocks.privilege.ranger.hive; + +import com.starrocks.privilege.ObjectType; +import org.apache.ranger.plugin.policyengine.RangerAccessResourceImpl; + +import java.util.List; +import java.util.Locale; + +public class RangerHiveResource extends RangerAccessResourceImpl { + public RangerHiveResource(ObjectType objectType, List objectTokens) { + if (objectType.equals(ObjectType.DATABASE)) { + setValue(ObjectType.DATABASE.name().toLowerCase(Locale.ENGLISH), objectTokens.get(0)); + } else if (objectType.equals(ObjectType.TABLE)) { + setValue(ObjectType.DATABASE.name().toLowerCase(Locale.ENGLISH), objectTokens.get(0)); + setValue(ObjectType.TABLE.name().toLowerCase(Locale.ENGLISH), objectTokens.get(1)); + } + } +} diff --git a/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/RangerStarRocksAccessControl.java b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/RangerStarRocksAccessControl.java new file mode 100644 index 0000000000000..f9b702464f246 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/RangerStarRocksAccessControl.java @@ -0,0 +1,372 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.starrocks.privilege.ranger.starrocks; + +import com.google.common.collect.Lists; +import com.starrocks.analysis.Expr; +import com.starrocks.analysis.TableName; +import com.starrocks.catalog.Database; +import com.starrocks.catalog.Function; +import com.starrocks.catalog.InternalCatalog; +import com.starrocks.catalog.Table; +import com.starrocks.catalog.Type; +import com.starrocks.privilege.AccessControl; +import com.starrocks.privilege.AccessDeniedException; +import com.starrocks.privilege.ObjectType; +import com.starrocks.privilege.PrivilegeType; +import com.starrocks.privilege.ranger.RangerStarRocksAccessRequest; +import com.starrocks.qe.ConnectContext; +import com.starrocks.server.GlobalStateMgr; +import com.starrocks.sql.ast.UserIdentity; +import com.starrocks.sql.parser.SqlParser; +import org.apache.commons.lang.StringUtils; +import org.apache.ranger.plugin.audit.RangerDefaultAuditHandler; +import org.apache.ranger.plugin.model.RangerPolicy; +import org.apache.ranger.plugin.model.RangerServiceDef; +import org.apache.ranger.plugin.policyengine.RangerAccessResult; +import org.apache.ranger.plugin.policyengine.RangerPolicyEngine; +import org.apache.ranger.plugin.service.RangerBasePlugin; + +import java.util.Set; + +import static java.util.Locale.ENGLISH; + +public class RangerStarRocksAccessControl implements AccessControl { + private static RangerBasePlugin rangerPlugin = null; + + public RangerStarRocksAccessControl() { + rangerPlugin = new RangerBasePlugin("starrocks", "starrocks"); + rangerPlugin.init(); // this will initialize policy engine and policy refresher + rangerPlugin.setResultProcessor(new RangerDefaultAuditHandler()); + } + + @Override + public void checkSystemAction(UserIdentity currentUser, Set roleIds, PrivilegeType privilegeType) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.SYSTEM, null); + if (!hasPermission(resource, currentUser, privilegeType)) { + AccessDeniedException.reportAccessDenied(privilegeType.name(), ObjectType.SYSTEM, null); + } + } + + @Override + public void checkCatalogAction(UserIdentity currentUser, Set roleIds, String catalogName, PrivilegeType privilegeType) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.CATALOG, Lists.newArrayList(catalogName)); + if (!hasPermission(resource, currentUser, privilegeType)) { + AccessDeniedException.reportAccessDenied(privilegeType.name(), ObjectType.CATALOG, catalogName); + } + } + + @Override + public void checkAnyActionOnCatalog(UserIdentity currentUser, Set roleIds, String catalogName) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.CATALOG, Lists.newArrayList(catalogName)); + if (!hasPermission(resource, currentUser, PrivilegeType.ANY)) { + AccessDeniedException.reportAccessDenied(PrivilegeType.ANY.name(), ObjectType.CATALOG, catalogName); + } + } + + @Override + public void checkDbAction(UserIdentity currentUser, Set roleIds, String catalogName, String db, + PrivilegeType privilegeType) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.DATABASE, Lists.newArrayList(catalogName, db)); + if (!hasPermission(resource, currentUser, privilegeType)) { + AccessDeniedException.reportAccessDenied(privilegeType.name(), ObjectType.DATABASE, db); + } + } + + @Override + public void checkAnyActionOnDb(UserIdentity currentUser, Set roleIds, String catalogName, String db) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.DATABASE, + Lists.newArrayList(catalogName, db)); + if (!hasPermission(resource, currentUser, PrivilegeType.ANY)) { + AccessDeniedException.reportAccessDenied(PrivilegeType.ANY.name(), ObjectType.DATABASE, db); + } + } + + @Override + public void checkTableAction(UserIdentity currentUser, Set roleIds, TableName tableName, PrivilegeType privilegeType) { + String catalog = tableName.getCatalog() == null ? InternalCatalog.DEFAULT_INTERNAL_CATALOG_NAME : tableName.getCatalog(); + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.TABLE, + Lists.newArrayList(catalog, tableName.getDb(), tableName.getTbl())); + if (!hasPermission(resource, currentUser, privilegeType)) { + AccessDeniedException.reportAccessDenied(privilegeType.name(), ObjectType.TABLE, tableName.getTbl()); + } + } + + @Override + public void checkAnyActionOnTable(UserIdentity currentUser, Set roleIds, TableName tableName) { + String catalog = tableName.getCatalog() == null ? InternalCatalog.DEFAULT_INTERNAL_CATALOG_NAME : tableName.getCatalog(); + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.TABLE, + Lists.newArrayList(catalog, tableName.getDb(), tableName.getTbl())); + if (!hasPermission(resource, currentUser, PrivilegeType.ANY)) { + AccessDeniedException.reportAccessDenied(PrivilegeType.ANY.name(), ObjectType.TABLE, tableName.getTbl()); + } + } + + @Override + public void checkAnyActionOnAnyTable(UserIdentity currentUser, Set roleIds, String catalog, String db) { + Database database = GlobalStateMgr.getCurrentState().getMetadataMgr().getDb(catalog, db); + for (Table table : database.getTables()) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.TABLE, + Lists.newArrayList(catalog, database.getFullName(), table.getName())); + if (hasPermission(resource, currentUser, PrivilegeType.ANY)) { + return; + } + } + AccessDeniedException.reportAccessDenied(PrivilegeType.ANY.name(), ObjectType.TABLE, db); + } + + @Override + public void checkViewAction(UserIdentity currentUser, Set roleIds, TableName tableName, PrivilegeType privilegeType) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.VIEW, + Lists.newArrayList(tableName.getDb(), tableName.getTbl())); + if (!hasPermission(resource, currentUser, privilegeType)) { + AccessDeniedException.reportAccessDenied(privilegeType.name(), ObjectType.VIEW, tableName.getTbl()); + } + } + + @Override + public void checkAnyActionOnView(UserIdentity currentUser, Set roleIds, TableName tableName) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.VIEW, + Lists.newArrayList(tableName.getDb(), tableName.getTbl())); + if (!hasPermission(resource, currentUser, PrivilegeType.ANY)) { + AccessDeniedException.reportAccessDenied(PrivilegeType.ANY.name(), ObjectType.VIEW, tableName.getTbl()); + } + } + + @Override + public void checkAnyActionOnAnyView(UserIdentity currentUser, Set roleIds, String db) { + Database database = GlobalStateMgr.getServingState().getDb(db); + for (Table table : database.getViews()) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.VIEW, + Lists.newArrayList(database.getFullName(), table.getName())); + if (hasPermission(resource, currentUser, PrivilegeType.ANY)) { + return; + } + } + AccessDeniedException.reportAccessDenied(PrivilegeType.ANY.name(), ObjectType.VIEW, db); + } + + @Override + public void checkMaterializedViewAction(UserIdentity currentUser, Set roleIds, TableName tableName, + PrivilegeType privilegeType) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.MATERIALIZED_VIEW, + Lists.newArrayList(tableName.getDb(), tableName.getTbl())); + if (!hasPermission(resource, currentUser, privilegeType)) { + AccessDeniedException.reportAccessDenied(privilegeType.name(), ObjectType.MATERIALIZED_VIEW, tableName.getTbl()); + } + } + + @Override + public void checkAnyActionOnMaterializedView(UserIdentity currentUser, Set roleIds, TableName tableName) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.MATERIALIZED_VIEW, + Lists.newArrayList(tableName.getDb(), tableName.getTbl())); + if (!hasPermission(resource, currentUser, PrivilegeType.ANY)) { + AccessDeniedException.reportAccessDenied(PrivilegeType.ANY.name(), + ObjectType.MATERIALIZED_VIEW, tableName.getTbl()); + } + } + + @Override + public void checkAnyActionOnAnyMaterializedView(UserIdentity currentUser, Set roleIds, String db) { + Database database = GlobalStateMgr.getServingState().getDb(db); + for (Table table : database.getMaterializedViews()) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.VIEW, + Lists.newArrayList(database.getFullName(), table.getName())); + if (hasPermission(resource, currentUser, PrivilegeType.ANY)) { + return; + } + } + AccessDeniedException.reportAccessDenied(PrivilegeType.ANY.name(), ObjectType.MATERIALIZED_VIEW, db); + } + + @Override + public void checkFunctionAction(UserIdentity currentUser, Set roleIds, Database database, Function function, + PrivilegeType privilegeType) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.FUNCTION, + Lists.newArrayList(database.getFullName(), function.getSignature())); + if (!hasPermission(resource, currentUser, privilegeType)) { + AccessDeniedException.reportAccessDenied(privilegeType.name(), ObjectType.FUNCTION, function.getSignature()); + } + } + + @Override + public void checkAnyActionOnFunction(UserIdentity currentUser, Set roleIds, String database, Function function) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.FUNCTION, + Lists.newArrayList(database, function.getSignature())); + if (!hasPermission(resource, currentUser, PrivilegeType.ANY)) { + AccessDeniedException.reportAccessDenied(PrivilegeType.ANY.name(), ObjectType.FUNCTION, function.getSignature()); + } + } + + @Override + public void checkAnyActionOnAnyFunction(UserIdentity currentUser, Set roleIds, String db) { + Database database = GlobalStateMgr.getServingState().getDb(db); + for (Function function : database.getFunctions()) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.FUNCTION, + Lists.newArrayList(database.getFullName(), function.getSignature())); + if (hasPermission(resource, currentUser, PrivilegeType.ANY)) { + return; + } + } + AccessDeniedException.reportAccessDenied(PrivilegeType.ANY.name(), ObjectType.FUNCTION, db); + } + + @Override + public void checkGlobalFunctionAction(UserIdentity currentUser, Set roleIds, Function function, + PrivilegeType privilegeType) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.GLOBAL_FUNCTION, + Lists.newArrayList(function.getSignature())); + if (!hasPermission(resource, currentUser, privilegeType)) { + AccessDeniedException.reportAccessDenied(privilegeType.name(), ObjectType.GLOBAL_FUNCTION, function.getSignature()); + } + } + + @Override + public void checkAnyActionOnGlobalFunction(UserIdentity currentUser, Set roleIds, Function function) { + if (!currentUser.equals(UserIdentity.ROOT)) { + AccessDeniedException.reportAccessDenied("ANY", ObjectType.GLOBAL_FUNCTION, function.getSignature()); + } + } + + /** + * Check whether current user has specified privilege action on any object(table/view/mv) in the db. + */ + @Override + public void checkActionInDb(UserIdentity userIdentity, Set roleIds, String db, PrivilegeType privilegeType) { + Database database = GlobalStateMgr.getCurrentState().getDb(db); + for (Table table : database.getTables()) { + if (table.isView()) { + checkViewAction(userIdentity, roleIds, new TableName(database.getFullName(), table.getName()), privilegeType); + } else if (table.isMaterializedView()) { + checkMaterializedViewAction(userIdentity, roleIds, + new TableName(database.getFullName(), table.getName()), privilegeType); + } else { + checkTableAction(userIdentity, roleIds, new TableName(database.getFullName(), table.getName()), privilegeType); + } + } + } + + @Override + public void checkResourceAction(UserIdentity currentUser, Set roleIds, String name, PrivilegeType privilegeType) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.RESOURCE, Lists.newArrayList(name)); + if (!hasPermission(resource, currentUser, privilegeType)) { + AccessDeniedException.reportAccessDenied(privilegeType.name(), ObjectType.RESOURCE, name); + } + } + + @Override + public void checkAnyActionOnResource(UserIdentity currentUser, Set roleIds, String name) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.RESOURCE, Lists.newArrayList(name)); + if (!hasPermission(resource, currentUser, PrivilegeType.ANY)) { + AccessDeniedException.reportAccessDenied(PrivilegeType.ANY.name(), ObjectType.RESOURCE, name); + } + } + + @Override + public void checkResourceGroupAction(UserIdentity currentUser, Set roleIds, String name, PrivilegeType privilegeType) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.RESOURCE_GROUP, Lists.newArrayList(name)); + if (!hasPermission(resource, currentUser, privilegeType)) { + AccessDeniedException.reportAccessDenied(privilegeType.name(), ObjectType.RESOURCE_GROUP, name); + } + } + + @Override + public void checkStorageVolumeAction(UserIdentity currentUser, Set roleIds, String storageVolume, + PrivilegeType privilegeType) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.STORAGE_VOLUME, + Lists.newArrayList(storageVolume)); + if (!hasPermission(resource, currentUser, privilegeType)) { + AccessDeniedException.reportAccessDenied(privilegeType.name(), ObjectType.STORAGE_VOLUME, storageVolume); + } + } + + @Override + public void checkAnyActionOnStorageVolume(UserIdentity currentUser, Set roleIds, String storageVolume) { + RangerStarRocksResource resource = new RangerStarRocksResource(ObjectType.STORAGE_VOLUME, + Lists.newArrayList(storageVolume)); + if (!hasPermission(resource, currentUser, PrivilegeType.ANY)) { + AccessDeniedException.reportAccessDenied(PrivilegeType.ANY.name(), ObjectType.STORAGE_VOLUME, storageVolume); + } + } + + @Override + public Expr getColumnMaskingPolicy(ConnectContext currentUser, TableName tableName, String columnName, Type type) { + RangerStarRocksAccessRequest request = RangerStarRocksAccessRequest.createAccessRequest( + new RangerStarRocksResource(tableName.getCatalog(), tableName.getDb(), tableName.getTbl(), columnName), + currentUser.getCurrentUserIdentity(), PrivilegeType.SELECT.name().toLowerCase(ENGLISH)); + + RangerAccessResult result = rangerPlugin.evalDataMaskPolicies(request, null); + if (result.isMaskEnabled()) { + String maskType = result.getMaskType(); + RangerServiceDef.RangerDataMaskTypeDef maskTypeDef = result.getMaskTypeDef(); + String transformer = null; + + if (maskTypeDef != null) { + transformer = maskTypeDef.getTransformer(); + } + + if (StringUtils.equalsIgnoreCase(maskType, RangerPolicy.MASK_TYPE_NULL)) { + transformer = "NULL"; + } else if (StringUtils.equalsIgnoreCase(maskType, RangerPolicy.MASK_TYPE_CUSTOM)) { + String maskedValue = result.getMaskedValue(); + + if (maskedValue == null) { + transformer = "NULL"; + } else { + transformer = maskedValue; + } + } + + if (StringUtils.isNotEmpty(transformer)) { + transformer = transformer.replace("{col}", columnName).replace("{type}", type.toSql()); + } + + return SqlParser.parseSqlToExpr(transformer, currentUser.getSessionVariable().getSqlMode()); + } else { + return null; + } + } + + @Override + public Expr getRowAccessPolicy(ConnectContext currentUser, TableName tableName) { + RangerStarRocksAccessRequest request = RangerStarRocksAccessRequest.createAccessRequest( + new RangerStarRocksResource(ObjectType.TABLE, + Lists.newArrayList(tableName.getCatalog(), tableName.getDb(), tableName.getTbl())), + currentUser.getCurrentUserIdentity(), PrivilegeType.SELECT.name().toLowerCase(ENGLISH)); + RangerAccessResult result = rangerPlugin.evalRowFilterPolicies(request, null); + if (result != null && result.isRowFilterEnabled()) { + return SqlParser.parseSqlToExpr(result.getFilterExpr(), currentUser.getSessionVariable().getSqlMode()); + } else { + return null; + } + } + + private boolean hasPermission(RangerStarRocksResource resource, UserIdentity user, PrivilegeType privilegeType) { + String accessType; + if (privilegeType.equals(PrivilegeType.ANY)) { + accessType = RangerPolicyEngine.ANY_ACCESS; + } else { + accessType = privilegeType.name().toLowerCase(ENGLISH); + } + + RangerStarRocksAccessRequest request = RangerStarRocksAccessRequest.createAccessRequest(resource, user, accessType); + RangerAccessResult result = rangerPlugin.isAccessAllowed(request); + if (result != null && result.getIsAllowed()) { + return true; + } else { + return false; + } + } +} \ No newline at end of file diff --git a/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/RangerStarRocksResource.java b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/RangerStarRocksResource.java new file mode 100644 index 0000000000000..ccdc2741003a5 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/RangerStarRocksResource.java @@ -0,0 +1,60 @@ +// Copyright 2021-present StarRocks, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.starrocks.privilege.ranger.starrocks; + +import com.starrocks.privilege.ObjectType; +import org.apache.ranger.plugin.policyengine.RangerAccessResourceImpl; + +import java.util.List; +import java.util.Locale; + +public class RangerStarRocksResource extends RangerAccessResourceImpl { + public RangerStarRocksResource(ObjectType objectType, List objectTokens) { + if (objectType.equals(ObjectType.CATALOG)) { + setValue(ObjectType.CATALOG.name().toLowerCase(Locale.ENGLISH), objectTokens.get(0)); + } else if (objectType.equals(ObjectType.DATABASE)) { + setValue(ObjectType.CATALOG.name().toLowerCase(Locale.ENGLISH), objectTokens.get(0)); + setValue(ObjectType.DATABASE.name().toLowerCase(Locale.ENGLISH), objectTokens.get(1)); + } else if (objectType.equals(ObjectType.TABLE)) { + setValue(ObjectType.CATALOG.name().toLowerCase(Locale.ENGLISH), objectTokens.get(0)); + setValue(ObjectType.DATABASE.name().toLowerCase(Locale.ENGLISH), objectTokens.get(1)); + setValue(ObjectType.TABLE.name().toLowerCase(Locale.ENGLISH), objectTokens.get(2)); + } else if (objectType.equals(ObjectType.VIEW)) { + setValue(ObjectType.DATABASE.name().toLowerCase(Locale.ENGLISH), objectTokens.get(0)); + setValue(ObjectType.VIEW.name().toLowerCase(Locale.ENGLISH), objectTokens.get(1)); + } else if (objectType.equals(ObjectType.MATERIALIZED_VIEW)) { + setValue(ObjectType.DATABASE.name().toLowerCase(Locale.ENGLISH), objectTokens.get(0)); + setValue(ObjectType.MATERIALIZED_VIEW.name().toLowerCase(Locale.ENGLISH), objectTokens.get(1)); + } else if (objectType.equals(ObjectType.FUNCTION)) { + setValue(ObjectType.DATABASE.name().toLowerCase(Locale.ENGLISH), objectTokens.get(0)); + setValue(ObjectType.FUNCTION.name().toLowerCase(Locale.ENGLISH), objectTokens.get(1)); + } else if (objectType.equals(ObjectType.GLOBAL_FUNCTION)) { + setValue(ObjectType.GLOBAL_FUNCTION.name().toLowerCase(Locale.ENGLISH), objectTokens.get(0)); + } else if (objectType.equals(ObjectType.RESOURCE)) { + setValue(ObjectType.RESOURCE.name().toLowerCase(Locale.ENGLISH), objectTokens.get(0)); + } else if (objectType.equals(ObjectType.RESOURCE_GROUP)) { + setValue(ObjectType.RESOURCE_GROUP.name().toLowerCase(Locale.ENGLISH), objectTokens.get(0)); + } else if (objectType.equals(ObjectType.STORAGE_VOLUME)) { + setValue(ObjectType.STORAGE_VOLUME.name().toLowerCase(Locale.ENGLISH), objectTokens.get(0)); + } + } + + public RangerStarRocksResource(String catalogName, String dbName, String tableName, String columnName) { + setValue(ObjectType.CATALOG.name().toLowerCase(Locale.ENGLISH), catalogName); + setValue(ObjectType.DATABASE.name().toLowerCase(Locale.ENGLISH), dbName); + setValue(ObjectType.TABLE.name().toLowerCase(Locale.ENGLISH), tableName); + setValue("column", columnName); + } +} \ No newline at end of file diff --git a/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/conf/ranger-policymgr-ssl.xml b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/conf/ranger-policymgr-ssl.xml new file mode 100644 index 0000000000000..a729d4a0b482a --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/conf/ranger-policymgr-ssl.xml @@ -0,0 +1,49 @@ + + + + + + + xasecure.policymgr.clientssl.keystore + starrocksservice-clientcert.jks + + Java Keystore files + + + + xasecure.policymgr.clientssl.truststore + cacerts-xasecure.jks + + java truststore file + + + + xasecure.policymgr.clientssl.keystore.credential.file + jceks://file/tmp/keystore-starrocksservice-ssl.jceks + + java keystore credential file + + + + xasecure.policymgr.clientssl.truststore.credential.file + jceks://file/tmp/truststore-starrocksservice-ssl.jceks + + java truststore credential file + + + diff --git a/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/conf/ranger-servicedef-starrocks.json b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/conf/ranger-servicedef-starrocks.json new file mode 100644 index 0000000000000..b7cca3e7a913a --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/conf/ranger-servicedef-starrocks.json @@ -0,0 +1,695 @@ +{ + "name": "starrocks", + "displayName": "starrocks", + "implClass": "org.apache.ranger.services.starrocks.RangerServiceStarRocks", + "label": "StarRocks", + "description": "StarRocks", + "resources": [ + { + "itemId": 1, + "name": "catalog", + "type": "string", + "level": 10, + "parent": "", + "mandatory": true, + "isValidLeaf": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": true, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": true, + "ignoreCase": true + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "StarRocks Catalog", + "description": "StarRocks Catalog", + "accessTypeRestrictions": [ + "usage", + "create database", + "drop", + "alter" + ] + }, + { + "itemId": 2, + "name": "database", + "type": "string", + "level": 20, + "parent": "catalog", + "mandatory": true, + "isValidLeaf": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": true, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": true, + "ignoreCase": true + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "StarRocks Database", + "description": "StarRocks Database", + "accessTypeRestrictions": [ + "create table", + "drop", + "alter", + "create view", + "create function", + "create materialized view" + ] + }, + { + "itemId": 3, + "name": "table", + "type": "string", + "level": 30, + "parent": "database", + "mandatory": true, + "isValidLeaf": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": true, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": true, + "ignoreCase": true + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "StarRocks Table", + "description": "StarRocks Table", + "accessTypeRestrictions": [ + "delete", + "drop", + "insert", + "select", + "alter", + "export", + "update" + ] + }, + { + "itemId": 4, + "name": "column", + "type": "string", + "level": 40, + "parent": "table", + "mandatory": true, + "isValidLeaf": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": true, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": true, + "ignoreCase": true + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "StarRocks Column", + "description": "StarRocks Column", + "accessTypeRestrictions": [ + "select" + ] + }, + { + "itemId": 5, + "name": "view", + "type": "string", + "level": 30, + "parent": "database", + "mandatory": true, + "isValidLeaf": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": true, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": true, + "ignoreCase": true + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "StarRocks View", + "description": "StarRocks View", + "accessTypeRestrictions": [ + "select", + "drop", + "alter" + ] + }, + { + "itemId": 6, + "name": "materialized_view", + "type": "string", + "level": 30, + "parent": "database", + "mandatory": true, + "isValidLeaf": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": true, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": true, + "ignoreCase": true + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "StarRocks Materialized View", + "description": "StarRocks Materialized View", + "accessTypeRestrictions": [ + "select", + "refresh", + "drop", + "alter" + ] + }, + { + "itemId": 7, + "name": "function", + "type": "string", + "level": 30, + "parent": "database", + "mandatory": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": true, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": true, + "ignoreCase": true + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "StarRocks Function", + "description": "StarRocks Function", + "accessTypeRestrictions": [ + "usage", + "drop" + ] + }, + { + "itemId": 8, + "name": "global_function", + "type": "string", + "level": 10, + "parent": "", + "mandatory": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": true, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": true, + "ignoreCase": true + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "StarRocks Global Function", + "description": "StarRocks Global Function", + "accessTypeRestrictions": [ + "usage", + "drop" + ] + }, + { + "itemId": 9, + "name": "resource", + "type": "string", + "level": 10, + "parent": "", + "mandatory": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": true, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": true, + "ignoreCase": true + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "StarRocks Resource", + "description": "StarRocks Resource", + "accessTypeRestrictions": [ + "usage", + "alter", + "drop" + ] + }, + { + "itemId": 10, + "name": "resource_group", + "type": "string", + "level": 10, + "parent": "", + "mandatory": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": true, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": true, + "ignoreCase": true + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "StarRocks Resource Group", + "description": "StarRocks Resource Group", + "accessTypeRestrictions": [ + "alter", + "drop" + ] + }, + { + "itemId": 11, + "name": "storage", + "type": "string", + "level": 10, + "parent": "", + "mandatory": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": true, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": true, + "ignoreCase": true + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "StarRocks Storage", + "description": "StarRocks Storage", + "accessTypeRestrictions": [ + "drop", + "alter", + "usage" + ] + }, + { + "itemId": 12, + "name": "user", + "type": "string", + "level": 10, + "parent": "", + "mandatory": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": true, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": true, + "ignoreCase": true + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "StarRocks Storage", + "description": "StarRocks Storage", + "accessTypeRestrictions": [ + "impersonate" + ] + }, + { + "itemId": 13, + "name": "system", + "type": "string", + "level": 10, + "parent": "", + "mandatory": true, + "isValidLeaf": true, + "lookupSupported": true, + "recursiveSupported": false, + "excludesSupported": true, + "matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher", + "matcherOptions": { + "wildCard": true, + "ignoreCase": true + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "StarRocks System", + "description": "StarRocks System", + "accessTypeRestrictions": [ + "grant", + "node", + "create resource", + "plugin", + "file", + "blacklist", + "operate", + "create external catalog", + "repository", + "create global function", + "create storage volume" + ] + } + ], + "accessTypes": [ + { + "itemId": 1, + "name": "grant", + "label": "GRANT" + }, + { + "itemId": 2, + "name": "node", + "label": "NODE" + }, + { + "itemId": 3, + "name": "operate", + "label": "OPERATE" + }, + { + "itemId": 4, + "name": "delete", + "label": "DELETE" + }, + { + "itemId": 5, + "name": "drop", + "label": "DROP" + }, + { + "itemId": 6, + "name": "insert", + "label": "INSERT" + }, + { + "itemId": 7, + "name": "select", + "label": "SELECT" + }, + { + "itemId": 8, + "name": "alter", + "label": "ALTER" + }, + { + "itemId": 9, + "name": "export", + "label": "EXPORT" + }, + { + "itemId": 10, + "name": "update", + "label": "UPDATE" + }, + { + "itemId": 11, + "name": "usage", + "label": "USAGE" + }, + { + "itemId": 12, + "name": "plugin", + "label": "PLUGIN" + }, + { + "itemId": 13, + "name": "file", + "label": "FILE" + }, + { + "itemId": 14, + "name": "blacklist", + "label": "BLACKLIST" + }, + { + "itemId": 15, + "name": "repository", + "label": "REPOSITORY" + }, + { + "itemId": 16, + "name": "refresh", + "label": "REFRESH" + }, + { + "itemId": 17, + "name": "impersonate", + "label": "IMPERSONATE" + }, + { + "itemId": 18, + "name": "create database", + "label": "CREATE DATABASE" + }, + { + "itemId": 19, + "name": "create table", + "label": "CREATE TABLE" + }, + { + "itemId": 20, + "name": "create view", + "label": "CREATE VIEW" + }, + { + "itemId": 21, + "name": "create function", + "label": "CREATE FUNCTION" + }, + { + "itemId": 22, + "name": "create global function", + "label": "CREATE GLOBAL FUNCTION" + }, + { + "itemId": 23, + "name": "create materialized view", + "label": "CREATE MATERIALIZED VIEW" + }, + { + "itemId": 24, + "name": "create resource", + "label": "CREATE RESOURCE" + }, + { + "itemId": 25, + "name": "create resource group", + "label": "CREATE RESOURCE GROUP" + }, + { + "itemId": 26, + "name": "create external catalog", + "label": "CREATE EXTERNAL CATALOG" + }, + { + "itemId": 27, + "name": "create storage volume", + "label": "CREATE STORAGE VOLUME" + } + ], + "configs": [ + { + "itemId": 1, + "name": "username", + "type": "string", + "mandatory": true, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "Username" + }, + { + "itemId": 2, + "name": "password", + "type": "password", + "mandatory": false, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "Password" + }, + { + "itemId": 3, + "name": "jdbc.driverClassName", + "type": "string", + "mandatory": true, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "defaultValue": "com.mysql.cj.jdbc.Driver" + }, + { + "itemId": 4, + "name": "jdbc.url", + "type": "string", + "mandatory": true, + "defaultValue": "jdbc:mysql://127.0.0.1:9030", + "validationRegEx": "", + "validationMessage": "", + "uiHint": "" + } + ], + "enums": [ + ], + "contextEnrichers": [ + ], + "policyConditions": [ + { + "itemId": 100, + "name": "ip-range", + "evaluator": "org.apache.ranger.plugin.conditionevaluator.RangerIpMatcher", + "evaluatorOptions": { + }, + "validationRegEx": "", + "validationMessage": "", + "uiHint": "", + "label": "IP Address Range", + "description": "IP Address Range" + } + ], + "dataMaskDef": { + "accessTypes": [ + { + "name": "select" + } + ], + "resources": [ + { + "name": "catalog", + "matcherOptions": { + "wildCard": "true" + }, + "lookupSupported": true, + "uiHint": "{ \"singleValue\":true }" + }, + { + "name": "database", + "matcherOptions": { + "wildCard": "true" + }, + "lookupSupported": true, + "uiHint": "{ \"singleValue\":true }" + }, + { + "name": "table", + "matcherOptions": { + "wildCard": "true" + }, + "lookupSupported": true, + "uiHint": "{ \"singleValue\":true }" + }, + { + "name": "column", + "matcherOptions": { + "wildCard": "true" + }, + "lookupSupported": true, + "uiHint": "{ \"singleValue\":true }" + } + ], + "maskTypes": [ + { + "itemId": 1, + "name": "MASK", + "label": "Redact", + "description": "Replace lowercase with 'x', uppercase with 'X', digits with '0'", + "transformer": "cast(regexp_replace(regexp_replace(regexp_replace({col},'([A-Z])', 'X'),'([a-z])','x'),'([0-9])','0') as {type})", + "dataMaskOptions": { + } + }, + { + "itemId": 2, + "name": "MASK_SHOW_LAST_4", + "label": "Partial mask: show last 4", + "description": "Show last 4 characters; replace rest with 'X'", + "transformer": "cast(regexp_replace({col}, '(.*)(.{4}$)', x -> regexp_replace(x[1], '.', 'X') || x[2]) as {type})" + }, + { + "itemId": 3, + "name": "MASK_SHOW_FIRST_4", + "label": "Partial mask: show first 4", + "description": "Show first 4 characters; replace rest with 'x'", + "transformer": "cast(regexp_replace({col}, '(^.{4})(.*)', x -> x[1] || regexp_replace(x[2], '.', 'X')) as {type})" + }, + { + "itemId": 4, + "name": "MASK_HASH", + "label": "Hash", + "description": "Hash the value of a varchar with sha256", + "transformer": "cast(to_hex(sha256(to_utf8({col}))) as {type})" + }, + { + "itemId": 5, + "name": "MASK_NULL", + "label": "Nullify", + "description": "Replace with NULL" + }, + { + "itemId": 6, + "name": "MASK_NONE", + "label": "Unmasked (retain original value)", + "description": "No masking" + }, + { + "itemId": 12, + "name": "MASK_DATE_SHOW_YEAR", + "label": "Date: show only year", + "description": "Date: show only year", + "transformer": "date_trunc('year', {col})" + }, + { + "itemId": 13, + "name": "CUSTOM", + "label": "Custom", + "description": "Custom" + } + ] + }, + "rowFilterDef": { + "accessTypes": [ + { + "name": "select" + } + ], + "resources": [ + { + "name": "catalog", + "matcherOptions": { + "wildCard": "true" + }, + "lookupSupported": true, + "mandatory": true, + "uiHint": "{ \"singleValue\":true }" + }, + { + "name": "database", + "matcherOptions": { + "wildCard": "true" + }, + "lookupSupported": true, + "mandatory": true, + "uiHint": "{ \"singleValue\":true }" + }, + { + "name": "table", + "matcherOptions": { + "wildCard": "true" + }, + "lookupSupported": true, + "mandatory": true, + "uiHint": "{ \"singleValue\":true }" + } + ] + } +} diff --git a/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/conf/ranger-starrocks-audit.xml b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/conf/ranger-starrocks-audit.xml new file mode 100644 index 0000000000000..e6032bb908124 --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/conf/ranger-starrocks-audit.xml @@ -0,0 +1,178 @@ + + + + + + xasecure.audit.is.enabled + true + + + + + xasecure.audit.hdfs.is.enabled + false + + + + xasecure.audit.hdfs.is.async + true + + + + xasecure.audit.hdfs.async.max.queue.size + 1048576 + + + + xasecure.audit.hdfs.async.max.flush.interval.ms + 30000 + + + + xasecure.audit.hdfs.config.encoding + + + + + xasecure.audit.hdfs.config.destination.directory + hdfs://NAMENODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd% + + + + xasecure.audit.hdfs.config.destination.file + %hostname%-audit.log + + + + xasecure.audit.hdfs.config.destination.flush.interval.seconds + 900 + + + + xasecure.audit.hdfs.config.destination.rollover.interval.seconds + 86400 + + + + xasecure.audit.hdfs.config.destination.open.retry.interval.seconds + 60 + + + + xasecure.audit.hdfs.config.local.buffer.directory + /var/log/starrocks/audit + + + + xasecure.audit.hdfs.config.local.buffer.file + %time:yyyyMMdd-HHmm.ss%.log + + + + xasecure.audit.hdfs.config.local.buffer.file.buffer.size.bytes + 8192 + + + + xasecure.audit.hdfs.config.local.buffer.flush.interval.seconds + 60 + + + + xasecure.audit.hdfs.config.local.buffer.rollover.interval.seconds + 600 + + + + xasecure.audit.hdfs.config.local.archive.directory + /var/log/starrocks/audit/archive + + + + xasecure.audit.hdfs.config.local.archive.max.file.count + 10 + + + + xasecure.audit.log4j.is.enabled + false + + + + xasecure.audit.log4j.is.async + false + + + + xasecure.audit.log4j.async.max.queue.size + 10240 + + + + xasecure.audit.log4j.async.max.flush.interval.ms + 30000 + + + + + + xasecure.audit.starrocks.is.enabled + true + + + + xasecure.audit.starrocks.async.max.queue.size + 1 + + + + xasecure.audit.starrocks.async.max.flush.interval.ms + 1000 + + + + xasecure.audit.starrocks.broker_list + localhost:9092 + + + + xasecure.audit.starrocks.topic_name + ranger_audits + + + + + xasecure.audit.solr.is.enabled + true + + + + xasecure.audit.solr.async.max.queue.size + 1 + + + + xasecure.audit.solr.async.max.flush.interval.ms + 100 + + + + xasecure.audit.solr.solr_url + http://127.0.0.1:6083/solr/ranger_audits + + diff --git a/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/conf/ranger-starrocks-security.xml b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/conf/ranger-starrocks-security.xml new file mode 100644 index 0000000000000..bdfe321f8181e --- /dev/null +++ b/fe/fe-core/src/main/java/com/starrocks/privilege/ranger/starrocks/conf/ranger-starrocks-security.xml @@ -0,0 +1,74 @@ + + + + + ranger.plugin.starrocks.service.name + starrocks + + Name of the Ranger service containing policies for this StarRocks instance + + + + + ranger.plugin.starrocks.policy.source.impl + org.apache.ranger.admin.client.RangerAdminRESTClient + + Class to retrieve policies from the source + + + + + ranger.plugin.starrocks.policy.rest.url + http://localhost:6080 + + URL to Ranger Admin + + + + + ranger.plugin.starrocks.policy.rest.ssl.config.file + /etc/hadoop/conf/ranger-policymgr-ssl.xml + + Path to the file containing SSL details to contact Ranger Admin + + + + + ranger.plugin.starrocks.policy.pollIntervalMs + 30000 + + How often to poll for changes in policies? + + + + + ranger.plugin.starrocks.policy.rest.client.connection.timeoutMs + 30000 + + S3 Plugin RangerRestClient Connection Timeout in Milli Seconds + + + + + ranger.plugin.starrocks.policy.rest.client.read.timeoutMs + 30000 + + S3 Plugin RangerRestClient read Timeout in Milli Seconds + + + diff --git a/fe/fe-core/src/main/java/com/starrocks/qe/HDFSBackendSelector.java b/fe/fe-core/src/main/java/com/starrocks/qe/HDFSBackendSelector.java index a69f0b66ccc37..28ca66fe94010 100644 --- a/fe/fe-core/src/main/java/com/starrocks/qe/HDFSBackendSelector.java +++ b/fe/fe-core/src/main/java/com/starrocks/qe/HDFSBackendSelector.java @@ -225,7 +225,7 @@ public void computeScanRangeAssignment() throws UserException { } long totalSize = computeTotalSize(); - long avgNodeScanRangeBytes = totalSize / workerProvider.getAllWorkers().size() + 1; + long avgNodeScanRangeBytes = totalSize / Math.max(workerProvider.getAllWorkers().size(), 1) + 1; for (ComputeNode computeNode : workerProvider.getAllWorkers()) { assignedScansPerComputeNode.put(computeNode, 0L); diff --git a/fe/fe-core/src/main/java/com/starrocks/scheduler/TaskRunHistory.java b/fe/fe-core/src/main/java/com/starrocks/scheduler/TaskRunHistory.java index cb4d8b2630f93..c8545679cbc4f 100644 --- a/fe/fe-core/src/main/java/com/starrocks/scheduler/TaskRunHistory.java +++ b/fe/fe-core/src/main/java/com/starrocks/scheduler/TaskRunHistory.java @@ -66,7 +66,7 @@ public List getAllHistory() { public void forceGC() { List allHistory = getAllHistory(); - int startIndex = Config.task_runs_max_history_number; + int startIndex = Math.min(allHistory.size(), Config.task_runs_max_history_number); allHistory.subList(startIndex, allHistory.size()) .forEach(taskRunStatus -> removeTask(taskRunStatus.getQueryId())); } diff --git a/fe/fe-core/src/main/java/com/starrocks/server/CatalogMgr.java b/fe/fe-core/src/main/java/com/starrocks/server/CatalogMgr.java index 7f45013c992b9..22141995da5e1 100644 --- a/fe/fe-core/src/main/java/com/starrocks/server/CatalogMgr.java +++ b/fe/fe-core/src/main/java/com/starrocks/server/CatalogMgr.java @@ -47,6 +47,9 @@ import com.starrocks.persist.metablock.SRMetaBlockID; import com.starrocks.persist.metablock.SRMetaBlockReader; import com.starrocks.persist.metablock.SRMetaBlockWriter; +import com.starrocks.privilege.NativeAccessControl; +import com.starrocks.privilege.ranger.hive.RangerHiveAccessControl; +import com.starrocks.sql.analyzer.Authorizer; import com.starrocks.sql.ast.CreateCatalogStmt; import com.starrocks.sql.ast.DropCatalogStmt; import org.apache.logging.log4j.LogManager; @@ -116,6 +119,13 @@ public void createCatalog(String type, String catalogName, String comment, Map } Table table = resolveTable(tableRelation); + Relation r; if (table instanceof View) { View view = (View) table; QueryStatement queryStatement = view.getQueryStatement(); ViewRelation viewRelation = new ViewRelation(tableName, view, queryStatement); viewRelation.setAlias(tableRelation.getAlias()); - return viewRelation; + + r = viewRelation; } else if (table instanceof HiveView) { HiveView hiveView = (HiveView) table; QueryStatement queryStatement = hiveView.getQueryStatement(); @@ -297,7 +300,8 @@ private Relation resolveTableRef(Relation relation, Scope scope, Set view.setInlineViewDefWithSqlMode(hiveView.getInlineViewDef(), 0); ViewRelation viewRelation = new ViewRelation(tableName, view, queryStatement); viewRelation.setAlias(tableRelation.getAlias()); - return viewRelation; + + r = viewRelation; } else { if (tableRelation.getTemporalClause() != null) { if (table.getType() != Table.TableType.MYSQL) { @@ -309,11 +313,25 @@ private Relation resolveTableRef(Relation relation, Scope scope, Set if (table.isSupported()) { tableRelation.setTable(table); - return tableRelation; + r = tableRelation; } else { throw unsupportedException("Unsupported scan table type: " + table.getType()); } } + + if (r.isPolicyRewritten()) { + return r; + } + assert tableName != null; + QueryStatement policyRewriteQuery = SecurityPolicyRewriteRule.buildView(session, r, tableName); + if (policyRewriteQuery == null) { + return r; + } else { + r.setPolicyRewritten(true); + SubqueryRelation subqueryRelation = new SubqueryRelation(policyRewriteQuery); + subqueryRelation.setAlias(tableName); + return subqueryRelation; + } } else { if (relation.getResolveTableName() != null) { if (aliasSet.contains(relation.getResolveTableName())) { diff --git a/fe/fe-core/src/main/java/com/starrocks/sql/ast/Relation.java b/fe/fe-core/src/main/java/com/starrocks/sql/ast/Relation.java index d65401f37c98c..d8cc2bfc18cc5 100644 --- a/fe/fe-core/src/main/java/com/starrocks/sql/ast/Relation.java +++ b/fe/fe-core/src/main/java/com/starrocks/sql/ast/Relation.java @@ -30,6 +30,10 @@ public abstract class Relation implements ParseNode { protected TableName alias; protected List explicitColumnNames; + // It is used to record whether the Relation has been rewritten by the Security Policy, + // to prevent the Relation from being rewritten multiple times due to multiple Analyzes + private boolean policyRewritten = false; + protected final NodePosition pos; protected Relation(NodePosition pos) { @@ -71,6 +75,14 @@ public boolean isDualRelation() { return false; } + public void setPolicyRewritten(boolean policyRewritten) { + this.policyRewritten = policyRewritten; + } + + public boolean isPolicyRewritten() { + return policyRewritten; + } + @Override public NodePosition getPos() { return pos; diff --git a/fe/fe-core/src/test/java/com/starrocks/connector/PartitionUtilTest.java b/fe/fe-core/src/test/java/com/starrocks/connector/PartitionUtilTest.java index 92d5dc0972b7c..ab63ac40daa0a 100644 --- a/fe/fe-core/src/test/java/com/starrocks/connector/PartitionUtilTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/connector/PartitionUtilTest.java @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. - package com.starrocks.connector; import com.google.common.collect.ImmutableList; @@ -23,6 +22,7 @@ import com.starrocks.analysis.DateLiteral; import com.starrocks.analysis.LiteralExpr; import com.starrocks.catalog.Column; +import com.starrocks.catalog.DeltaLakeTable; import com.starrocks.catalog.HiveTable; import com.starrocks.catalog.JDBCTable; import com.starrocks.catalog.PartitionKey; @@ -35,6 +35,7 @@ import com.starrocks.connector.exception.StarRocksConnectorException; import com.starrocks.connector.hive.HiveMetaClient; import com.starrocks.connector.hive.HivePartitionName; +import com.starrocks.connector.iceberg.IcebergApiConverter; import mockit.Expectations; import mockit.Mock; import mockit.MockUp; @@ -68,7 +69,39 @@ public void testCreatePartitionKey() throws Exception { @Test public void testCreateHudiPartitionKey() throws AnalysisException { PartitionKey partitionKey = createPartitionKey( - Lists.newArrayList("1", "a", "3.0", HiveMetaClient.HUDI_PARTITION_NULL_VALUE), partColumns, Table.TableType.HUDI); + Lists.newArrayList("1", "a", "3.0", HiveMetaClient.HUDI_PARTITION_NULL_VALUE), partColumns, + Table.TableType.HUDI); + Assert.assertEquals("(\"1\", \"a\", \"3.0\", \"NULL\")", partitionKey.toSql()); + List res = PartitionUtil.fromPartitionKey(partitionKey); + Assert.assertEquals("1", res.get(0)); + Assert.assertEquals("a", res.get(1)); + Assert.assertEquals("3.0", res.get(2)); + Assert.assertEquals(HiveMetaClient.HUDI_PARTITION_NULL_VALUE, res.get(3)); + + partitionKey = createPartitionKey( + Lists.newArrayList("1", "a", "3.0", HiveMetaClient.PARTITION_NULL_VALUE), partColumns, + Table.TableType.HUDI); + Assert.assertEquals("(\"1\", \"a\", \"3.0\", \"NULL\")", partitionKey.toSql()); + res = PartitionUtil.fromPartitionKey(partitionKey); + Assert.assertEquals("1", res.get(0)); + Assert.assertEquals("a", res.get(1)); + Assert.assertEquals("3.0", res.get(2)); + Assert.assertEquals(HiveMetaClient.PARTITION_NULL_VALUE, res.get(3)); + } + + @Test + public void testCreateIcebergPartitionKey() throws AnalysisException { + PartitionKey partitionKey = createPartitionKey( + Lists.newArrayList("1", "a", "3.0", IcebergApiConverter.PARTITION_NULL_VALUE), partColumns, + Table.TableType.ICEBERG); + Assert.assertEquals("(\"1\", \"a\", \"3.0\", \"NULL\")", partitionKey.toSql()); + } + + @Test + public void testCreateDeltaLakePartitionKey() throws AnalysisException { + PartitionKey partitionKey = createPartitionKey( + Lists.newArrayList("1", "a", "3.0", DeltaLakeTable.PARTITION_NULL_VALUE), partColumns, + Table.TableType.DELTALAKE); Assert.assertEquals("(\"1\", \"a\", \"3.0\", \"NULL\")", partitionKey.toSql()); } @@ -100,7 +133,7 @@ public void testGetSuffixNameIllegal() { @Test public void testToPartitionValues() { - String partitionNames = "a=1/b=2/c=3"; + String partitionNames = "a=1/b=2/c=3"; Assert.assertEquals(Lists.newArrayList("1", "2", "3"), toPartitionValues(partitionNames)); } diff --git a/fe/fe-core/src/test/java/com/starrocks/connector/parser/trino/TrinoQueryTest.java b/fe/fe-core/src/test/java/com/starrocks/connector/parser/trino/TrinoQueryTest.java index 4bd1c1c824f96..e97c6d840db6b 100644 --- a/fe/fe-core/src/test/java/com/starrocks/connector/parser/trino/TrinoQueryTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/connector/parser/trino/TrinoQueryTest.java @@ -324,6 +324,20 @@ public void testSelectArray() throws Exception { " | : 7: expr\n" + " | common expressions:\n" + " | : 3: c2[1]"); + + sql = "select element_at(array[1,2,3], 1)"; + assertPlanContains(sql, "[1,2,3][1]"); + + sql = "select element_at(c1, 2) from test_array"; + assertPlanContains(sql, "2: c1[2]"); + + sql = "select element_at(array[1,2,3], 1, 0)"; + try { + getFragmentPlan(sql); + Assert.fail(); + } catch (Exception e) { + Assert.assertTrue(e.getMessage().contains("element_at function must have 2 arguments")); + } } @Test diff --git a/fe/fe-core/src/test/java/com/starrocks/qe/HDFSBackendSelectorTest.java b/fe/fe-core/src/test/java/com/starrocks/qe/HDFSBackendSelectorTest.java index 084d976def26d..92c6fe8769a6b 100644 --- a/fe/fe-core/src/test/java/com/starrocks/qe/HDFSBackendSelectorTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/qe/HDFSBackendSelectorTest.java @@ -150,6 +150,23 @@ public void addCustomProperties(String name, String value) { System.out.printf("%s -> %d bytes\n", entry.getKey(), entry.getValue()); Assert.assertTrue(Math.abs(entry.getValue() - avg) < variance); } + + // test empty compute nodes + workerProvider = new DefaultWorkerProvider( + ImmutableMap.of(), + ImmutableMap.of(), + ImmutableMap.of(), + ImmutableMap.of(), + true + ); + selector = + new HDFSBackendSelector(hdfsScanNode, locations, assignment, workerProvider, false, false); + try { + selector.computeScanRangeAssignment(); + Assert.fail(); + } catch (Exception e) { + Assert.assertEquals("Failed to find backend to execute", e.getMessage()); + } } @Test diff --git a/fe/fe-core/src/test/java/com/starrocks/scheduler/TaskManagerTest.java b/fe/fe-core/src/test/java/com/starrocks/scheduler/TaskManagerTest.java index b54bc384df70f..e96cf66720183 100644 --- a/fe/fe-core/src/test/java/com/starrocks/scheduler/TaskManagerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/scheduler/TaskManagerTest.java @@ -415,6 +415,21 @@ public void testForceGC() { Config.task_runs_max_history_number = 10000; } + @Test + public void testForceGC2() { + TaskRunManager taskRunManager = new TaskRunManager(); + for (int i = 0; i < 10; i++) { + TaskRunStatus taskRunStatus = new TaskRunStatus(); + taskRunStatus.setQueryId("test" + i); + taskRunStatus.setTaskName("test" + i); + taskRunManager.getTaskRunHistory().addHistory(taskRunStatus); + } + Config.task_runs_max_history_number = 20; + taskRunManager.getTaskRunHistory().forceGC(); + Assert.assertEquals(10, taskRunManager.getTaskRunHistory().getAllHistory().size()); + Config.task_runs_max_history_number = 10000; + } + private LocalDateTime parseLocalDateTime(String str) throws Exception { Date date = TimeUtils.parseDate(str, PrimitiveType.DATETIME); return LocalDateTime.ofInstant(date.toInstant(), ZoneId.systemDefault()); diff --git a/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/PrivilegeCheckerTest.java b/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/PrivilegeCheckerTest.java index c0b8757da9556..9d83d3f39fd86 100644 --- a/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/PrivilegeCheckerTest.java +++ b/fe/fe-core/src/test/java/com/starrocks/sql/analyzer/PrivilegeCheckerTest.java @@ -2462,6 +2462,16 @@ public void testDropGlobalFunc() throws Exception { "Access denied; you need (at least one of) the DROP privilege(s) on GLOBAL FUNCTION " + "my_udf_json_get(VARCHAR,VARCHAR) for this operation"); + // test disable privilege collection cache + Config.authorization_enable_priv_collection_cache = false; + verifyGrantRevoke( + "drop global function my_udf_json_get (string, string);", + "grant drop on GLOBAL FUNCTION my_udf_json_get(string,string) to test", + "revoke drop on GLOBAL FUNCTION my_udf_json_get(string,string) from test", + "Access denied; you need (at least one of) the DROP privilege(s) on GLOBAL FUNCTION " + + "my_udf_json_get(VARCHAR,VARCHAR) for this operation"); + Config.authorization_enable_priv_collection_cache = true; + Config.enable_udf = true; ctxToTestUser(); try {