diff --git a/.github/workflows/core.yml b/.github/workflows/core.yml
index e4a4bd1264d..b7004b15daa 100644
--- a/.github/workflows/core.yml
+++ b/.github/workflows/core.yml
@@ -397,14 +397,6 @@ jobs:
- name: Make IRkernel available to Jupyter
run: |
R -e "IRkernel::installspec()"
- - name: run spark-3.2 tests with scala-2.12 and python-${{ matrix.python }}
- run: |
- rm -rf spark/interpreter/metastore_db
- ./mvnw verify -pl spark-submit,spark/interpreter -am -Dtest=org/apache/zeppelin/spark/* -Pspark-3.2 -Pspark-scala-2.12 -Phadoop3 -Pintegration -DfailIfNoTests=false ${MAVEN_ARGS}
- - name: run spark-3.2 tests with scala-2.13 and python-${{ matrix.python }}
- run: |
- rm -rf spark/interpreter/metastore_db
- ./mvnw verify -pl spark-submit,spark/interpreter -am -Dtest=org/apache/zeppelin/spark/* -Pspark-3.2 -Pspark-scala-2.13 -Phadoop3 -Pintegration -DfailIfNoTests=false ${MAVEN_ARGS}
- name: run spark-3.3 tests with scala-2.12 and python-${{ matrix.python }}
run: |
rm -rf spark/interpreter/metastore_db
diff --git a/Dockerfile b/Dockerfile
index 4f4fef399b3..6f1777e0862 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -21,9 +21,9 @@ ENV MAVEN_OPTS="-Xms1024M -Xmx2048M -XX:MaxMetaspaceSize=1024m -XX:-UseGCOverhea
# Allow npm and bower to run with root privileges
RUN echo "unsafe-perm=true" > ~/.npmrc && \
echo '{ "allow_root": true }' > ~/.bowerrc && \
- ./mvnw -B package -DskipTests -Pbuild-distr -Pspark-3.3 -Pinclude-hadoop -Phadoop3 -Pspark-scala-2.12 -Pweb-classic -Pweb-dist && \
+ ./mvnw -B package -DskipTests -Pbuild-distr -Pspark-3.4 -Pinclude-hadoop -Phadoop3 -Pspark-scala-2.12 -Pweb-classic -Pweb-dist && \
# Example with doesn't compile all interpreters
- # ./mvnw -B package -DskipTests -Pbuild-distr -Pspark-3.2 -Pinclude-hadoop -Phadoop3 -Pspark-scala-2.12 -Pweb-classic -Pweb-dist -pl '!groovy,!livy,!hbase,!file,!flink' && \
+ # ./mvnw -B package -DskipTests -Pbuild-distr -Pspark-3.4 -Pinclude-hadoop -Phadoop3 -Pspark-scala-2.12 -Pweb-classic -Pweb-dist -pl '!groovy,!livy,!hbase,!file,!flink' && \
mv /workspace/zeppelin/zeppelin-distribution/target/zeppelin-*-bin/zeppelin-*-bin /opt/zeppelin/ && \
# Removing stuff saves time, because docker creates a temporary layer
rm -rf ~/.m2 && \
diff --git a/conf/zeppelin-env.cmd.template b/conf/zeppelin-env.cmd.template
index 2d5bb40dbbb..15c88fd4ca8 100644
--- a/conf/zeppelin-env.cmd.template
+++ b/conf/zeppelin-env.cmd.template
@@ -64,7 +64,7 @@ REM however, it is not encouraged when you can define SPARK_HOME
REM
REM Options read in YARN client mode
REM set HADOOP_CONF_DIR REM yarn-site.xml is located in configuration directory in HADOOP_CONF_DIR.
-REM Pyspark (supported with Spark 1.2.1 and above)
+REM Pyspark (supported with Spark 3.3 and above)
REM To configure pyspark, you need to set spark distribution's path to 'spark.home' property in Interpreter setting screen in Zeppelin GUI
REM set PYSPARK_PYTHON REM path to the python command. must be the same path on the driver(Zeppelin) and all workers.
REM set PYTHONPATH
diff --git a/conf/zeppelin-env.sh.template b/conf/zeppelin-env.sh.template
index 9c228cbadbc..e27a688becd 100644
--- a/conf/zeppelin-env.sh.template
+++ b/conf/zeppelin-env.sh.template
@@ -87,7 +87,7 @@
##
# Options read in YARN client mode
# export HADOOP_CONF_DIR # yarn-site.xml is located in configuration directory in HADOOP_CONF_DIR.
-# Pyspark (supported with Spark 1.2.1 and above)
+# Pyspark (supported with Spark 3.3 and above)
# To configure pyspark, you need to set spark distribution's path to 'spark.home' property in Interpreter setting screen in Zeppelin GUI
# export PYSPARK_PYTHON # path to the python command. must be the same path on the driver(Zeppelin) and all workers.
# export PYTHONPATH
diff --git a/docs/interpreter/spark.md b/docs/interpreter/spark.md
index 1fa02b5b2fb..680ca054b3b 100644
--- a/docs/interpreter/spark.md
+++ b/docs/interpreter/spark.md
@@ -385,7 +385,7 @@ You can also choose `scoped` mode. For `scoped` per note mode, Zeppelin creates
## SparkContext, SQLContext, SparkSession, ZeppelinContext
-SparkContext, SQLContext, SparkSession (for spark 2.x, 3.x) and ZeppelinContext are automatically created and exposed as variable names `sc`, `sqlContext`, `spark` and `z` respectively, in Scala, Python and R environments.
+SparkContext, SparkSession and ZeppelinContext are automatically created and exposed as variable names `sc`, `spark` and `z` respectively, in Scala, Python and R environments.
> Note that Scala/Python/R environment shares the same SparkContext, SQLContext, SparkSession and ZeppelinContext instance.
diff --git a/docs/setup/basics/how_to_build.md b/docs/setup/basics/how_to_build.md
index 8c8cd947f84..99951a9353a 100644
--- a/docs/setup/basics/how_to_build.md
+++ b/docs/setup/basics/how_to_build.md
@@ -83,7 +83,7 @@ You can directly start Zeppelin by running the following command after successfu
To be noticed, the spark profiles here only affect the unit test (no need to specify `SPARK_HOME`) of spark interpreter.
Zeppelin doesn't require you to build with different spark to make different versions of spark work in Zeppelin.
-You can run different versions of Spark in Zeppelin as long as you specify `SPARK_HOME`. Actually Zeppelin supports all the versions of Spark from 3.2 to 3.5.
+You can run different versions of Spark in Zeppelin as long as you specify `SPARK_HOME`. Actually Zeppelin supports all the versions of Spark from 3.3 to 3.5.
To build with a specific Spark version or scala versions, define one or more of the following profiles and options:
@@ -97,7 +97,6 @@ Available profiles are
-Pspark-3.5
-Pspark-3.4
-Pspark-3.3
--Pspark-3.2
```
minor version can be adjusted by `-Dspark.version=x.x.x`
diff --git a/docs/setup/deployment/flink_and_spark_cluster.md b/docs/setup/deployment/flink_and_spark_cluster.md
index 76f9063cf13..df5df80d9ad 100644
--- a/docs/setup/deployment/flink_and_spark_cluster.md
+++ b/docs/setup/deployment/flink_and_spark_cluster.md
@@ -42,8 +42,8 @@ Assuming the minimal install, there are several programs that we will need to in
- git
- openssh-server
-- OpenJDK 7
-- Maven 3.1+
+- OpenJDK 11
+- Maven
For git, openssh-server, and OpenJDK 7 we will be using the apt package manager.
@@ -60,17 +60,10 @@ sudo apt-get install git
sudo apt-get install openssh-server
```
-##### OpenJDK 7
+##### OpenJDK 11
```bash
-sudo apt-get install openjdk-7-jdk openjdk-7-jre-lib
-```
-*A note for those using Ubuntu 16.04*: To install `openjdk-7` on Ubuntu 16.04, one must add a repository. [Source](http://askubuntu.com/questions/761127/ubuntu-16-04-and-openjdk-7)
-
-```bash
-sudo add-apt-repository ppa:openjdk-r/ppa
-sudo apt-get update
-sudo apt-get install openjdk-7-jdk openjdk-7-jre-lib
+sudo apt-get install openjdk-11-jdk
```
### Installing Zeppelin
@@ -92,26 +85,23 @@ cd zeppelin
Package Zeppelin.
```bash
-./mvnw clean package -DskipTests -Pspark-3.2 -Dflink.version=1.1.3 -Pscala-2.11
+./mvnw clean package -DskipTests -Pspark-3.5 -Pflink-1.17
```
`-DskipTests` skips build tests- you're not developing (yet), so you don't need to do tests, the clone version *should* build.
-`-Pspark-3.2` tells maven to build a Zeppelin with Spark 3.2. This is important because Zeppelin has its own Spark interpreter and the versions must be the same.
+`-Pspark-3.5` tells maven to build a Zeppelin with Spark 3.5. This is important because Zeppelin has its own Spark interpreter and the versions must be the same.
-`-Dflink.version=1.1.3` tells maven specifically to build Zeppelin with Flink version 1.1.3.
+`-Pflink-1.17` tells maven to build a Zeppelin with Flink 1.17.
--`-Pscala-2.11` tells maven to build with Scala v2.11.
-
-
-**Note:** You can build against any version of Spark that has a Zeppelin build profile available. The key is to make sure you check out the matching version of Spark to build. At the time of this writing, Spark 3.2 was the most recent Spark version available.
+**Note:** You can build against any version of Spark that has a Zeppelin build profile available. The key is to make sure you check out the matching version of Spark to build. At the time of this writing, Spark 3.5 was the most recent Spark version available.
**Note:** On build failures. Having installed Zeppelin close to 30 times now, I will tell you that sometimes the build fails for seemingly no reason.
As long as you didn't edit any code, it is unlikely the build is failing because of something you did. What does tend to happen, is some dependency that maven is trying to download is unreachable. If your build fails on this step here are some tips:
- Don't get discouraged.
- Scroll up and read through the logs. There will be clues there.
-- Retry (that is, run the `./mvnw clean package -DskipTests -Pspark-3.2` again)
+- Retry (that is, run the `./mvnw clean package -DskipTests -Pspark-3.5` again)
- If there were clues that a dependency couldn't be downloaded wait a few hours or even days and retry again. Open source software when compiling is trying to download all of the dependencies it needs, if a server is off-line there is nothing you can do but wait for it to come back.
- Make sure you followed all of the steps carefully.
- Ask the community to help you. Go [here](http://zeppelin.apache.org/community.html) and join the user mailing list. People are there to help you. Make sure to copy and paste the build output (everything that happened in the console) and include that in your message.
@@ -225,16 +215,16 @@ Building from source is recommended where possible, for simplicity in this tuto
To download the Flink Binary use `wget`
```bash
-wget "http://mirror.cogentco.com/pub/apache/flink/flink-1.16.2/flink-1.16.2-bin-scala_2.12.tgz"
-tar -xzvf flink-1.16.2-bin-scala_2.12.tgz
+wget "https://archive.apache.org/dist/flink/flink-1.17.1/flink-1.17.1-bin-scala_2.12.tgz"
+tar -xzvf flink-1.17.1-bin-scala_2.12.tgz
```
-This will download Flink 1.16.2.
+This will download Flink 1.17.1.
Start the Flink Cluster.
```bash
-flink-1.16.2/bin/start-cluster.sh
+flink-1.17.1/bin/start-cluster.sh
```
###### Building From source
@@ -243,13 +233,13 @@ If you wish to build Flink from source, the following will be instructive. Note
See the [Flink Installation guide](https://github.com/apache/flink/blob/master/README.md) for more detailed instructions.
-Return to the directory where you have been downloading, this tutorial assumes that is `$HOME`. Clone Flink, check out release-1.1.3-rc2, and build.
+Return to the directory where you have been downloading, this tutorial assumes that is `$HOME`. Clone Flink, check out release-1.17.1, and build.
```bash
cd $HOME
git clone https://github.com/apache/flink.git
cd flink
-git checkout release-1.1.3-rc2
+git checkout release-1.17.1
mvn clean install -DskipTests
```
@@ -271,8 +261,8 @@ If no task managers are present, restart the Flink cluster with the following co
(if binaries)
```bash
-flink-1.1.3/bin/stop-cluster.sh
-flink-1.1.3/bin/start-cluster.sh
+flink-1.17.1/bin/stop-cluster.sh
+flink-1.17.1/bin/start-cluster.sh
```
@@ -284,7 +274,7 @@ build-target/bin/start-cluster.sh
```
-##### Spark 1.6 Cluster
+##### Spark Cluster
###### Download Binaries
@@ -295,12 +285,12 @@ Using binaries is also
To download the Spark Binary use `wget`
```bash
-wget "https://dlcdn.apache.org/spark/spark-3.4.1/spark-3.4.1-bin-hadoop3.tgz"
-tar -xzvf spark-3.4.1-bin-hadoop3.tgz
-mv spark-3.4.1-bin-hadoop3 spark
+wget "https://archive.apache.org/dist/spark/spark-3.5.2/spark-3.5.2-bin-hadoop3.tgz"
+tar -xzvf spark-3.5.2-bin-hadoop3.tgz
+mv spark-3.5.2-bin-hadoop3 spark
```
-This will download Spark 3.4.1, compatible with Hadoop 3. You do not have to install Hadoop for this binary to work, but if you are using Hadoop, please change `3` to your appropriate version.
+This will download Spark 3.5.2, compatible with Hadoop 3. You do not have to install Hadoop for this binary to work, but if you are using Hadoop, please change `3` to your appropriate version.
###### Building From source
@@ -308,21 +298,18 @@ Spark is an extraordinarily large project, which takes considerable time to down
See the [Spark Installation](https://github.com/apache/spark/blob/master/README.md) guide for more detailed instructions.
-Return to the directory where you have been downloading, this tutorial assumes that is $HOME. Clone Spark, check out branch-1.6, and build.
-**Note:** Recall, we're only checking out 1.6 because it is the most recent Spark for which a Zeppelin profile exists at
- the time of writing. You are free to check out other version, just make sure you build Zeppelin against the correct version of Spark. However if you use Spark 2.0, the word count example will need to be changed as Spark 2.0 is not compatible with the following examples.
-
+Return to the directory where you have been downloading, this tutorial assumes that is $HOME. Clone Spark, check out branch-3.5, and build.
```bash
cd $HOME
```
-Clone, check out, and build Spark version 1.6.x.
+Clone, check out, and build Spark version 3.5.x.
```bash
git clone https://github.com/apache/spark.git
cd spark
-git checkout branch-1.6
+git checkout branch-3.5
mvn clean package -DskipTests
```
diff --git a/spark/interpreter/pom.xml b/spark/interpreter/pom.xml
index 3156a170537..81e79dcf37d 100644
--- a/spark/interpreter/pom.xml
+++ b/spark/interpreter/pom.xml
@@ -40,10 +40,6 @@
3.0.3
2.7
- 4.1.19
- 4.2.4
- 4.1.17
-
3.4.1
3.21.12
@@ -222,27 +218,6 @@
-
- org.datanucleus
- datanucleus-core
- ${datanucleus.core.version}
- test
-
-
-
- org.datanucleus
- datanucleus-api-jdo
- ${datanucleus.apijdo.version}
- test
-
-
-
- org.datanucleus
- datanucleus-rdbms
- ${datanucleus.rdbms.version}
- test
-
-
org.mockito
mockito-core
@@ -589,16 +564,6 @@
0.10.9.5
-
-
- spark-3.2
-
- 3.2.4
- 2.5.0
- 0.10.9.5
-
-
-
diff --git a/spark/interpreter/src/main/resources/interpreter-setting.json b/spark/interpreter/src/main/resources/interpreter-setting.json
index eb3a4ef65f3..70c00dc9772 100644
--- a/spark/interpreter/src/main/resources/interpreter-setting.json
+++ b/spark/interpreter/src/main/resources/interpreter-setting.json
@@ -159,7 +159,7 @@
"envName": null,
"propertyName": "zeppelin.spark.deprecatedMsg.show",
"defaultValue": true,
- "description": "Whether show the spark deprecated message, spark 2.2 and before are deprecated. Zeppelin will display warning message by default",
+ "description": "Whether show the spark deprecated message, prior Spark 3.3 are deprecated. Zeppelin will display warning message by default",
"type": "checkbox"
}
},
diff --git a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkShimsTest.java b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkShimsTest.java
index b8720f86e52..5bd9cbba69b 100644
--- a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkShimsTest.java
+++ b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkShimsTest.java
@@ -97,7 +97,7 @@ public void setUp() {
when(mockContext.getIntpEventClient()).thenReturn(mockIntpEventClient);
try {
- sparkShims = SparkShims.getInstance(SparkVersion.SPARK_3_2_0.toString(), new Properties(), null);
+ sparkShims = SparkShims.getInstance(SparkVersion.SPARK_3_3_0.toString(), new Properties(), null);
} catch (Throwable e1) {
throw new RuntimeException("All SparkShims are tried, but no one can be created.");
}
diff --git a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkSqlInterpreterTest.java b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkSqlInterpreterTest.java
index 20594c4bde2..05556ba4e0b 100644
--- a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkSqlInterpreterTest.java
+++ b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkSqlInterpreterTest.java
@@ -292,14 +292,7 @@ void testDDL() throws InterpreterException, IOException {
InterpreterContext context = getInterpreterContext();
InterpreterResult ret = sqlInterpreter.interpret("create table t1(id int, name string)", context);
assertEquals(InterpreterResult.Code.SUCCESS, ret.code(), context.out.toString());
- // spark 1.x will still return DataFrame with non-empty columns.
- // org.apache.spark.sql.DataFrame = [result: string]
- if (!sparkInterpreter.getSparkContext().version().startsWith("1.")) {
- assertTrue(ret.message().isEmpty());
- } else {
- assertEquals(Type.TABLE, ret.message().get(0).getType());
- assertEquals("result\n", ret.message().get(0).getData());
- }
+ assertTrue(ret.message().isEmpty());
// create the same table again
ret = sqlInterpreter.interpret("create table t1(id int, name string)", context);
diff --git a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkVersionTest.java b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkVersionTest.java
index a454854a7fc..06aa392e4ca 100644
--- a/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkVersionTest.java
+++ b/spark/interpreter/src/test/java/org/apache/zeppelin/spark/SparkVersionTest.java
@@ -48,14 +48,14 @@ void testSparkVersion() {
assertEquals(SparkVersion.SPARK_3_5_0, SparkVersion.fromVersionString("3.5.0.2.5.0.0-1245"));
// test newer than
- assertTrue(SparkVersion.SPARK_3_5_0.newerThan(SparkVersion.SPARK_3_2_0));
+ assertTrue(SparkVersion.SPARK_3_5_0.newerThan(SparkVersion.SPARK_3_3_0));
assertTrue(SparkVersion.SPARK_3_5_0.newerThanEquals(SparkVersion.SPARK_3_5_0));
- assertFalse(SparkVersion.SPARK_3_2_0.newerThan(SparkVersion.SPARK_3_5_0));
+ assertFalse(SparkVersion.SPARK_3_3_0.newerThan(SparkVersion.SPARK_3_5_0));
// test older than
- assertTrue(SparkVersion.SPARK_3_2_0.olderThan(SparkVersion.SPARK_3_5_0));
- assertTrue(SparkVersion.SPARK_3_2_0.olderThanEquals(SparkVersion.SPARK_3_2_0));
- assertFalse(SparkVersion.SPARK_3_5_0.olderThan(SparkVersion.SPARK_3_2_0));
+ assertTrue(SparkVersion.SPARK_3_3_0.olderThan(SparkVersion.SPARK_3_5_0));
+ assertTrue(SparkVersion.SPARK_3_5_0.olderThanEquals(SparkVersion.SPARK_3_5_0));
+ assertFalse(SparkVersion.SPARK_3_5_0.olderThan(SparkVersion.SPARK_3_3_0));
// test newerThanEqualsPatchVersion
assertTrue(SparkVersion.fromVersionString("2.3.1")
diff --git a/spark/pom.xml b/spark/pom.xml
index f189c6c23f4..f3eb50d00b7 100644
--- a/spark/pom.xml
+++ b/spark/pom.xml
@@ -33,11 +33,6 @@
Zeppelin Spark Support
- 3.2.9
- 3.2.6
- 3.2.10
-
-
3.4.1
2.5.0
0.10.9.7
diff --git a/spark/spark-shims/src/main/java/org/apache/zeppelin/spark/SparkVersion.java b/spark/spark-shims/src/main/java/org/apache/zeppelin/spark/SparkVersion.java
index 27e0378f536..ff018c03697 100644
--- a/spark/spark-shims/src/main/java/org/apache/zeppelin/spark/SparkVersion.java
+++ b/spark/spark-shims/src/main/java/org/apache/zeppelin/spark/SparkVersion.java
@@ -25,15 +25,13 @@
public class SparkVersion {
private static final Logger LOGGER = LoggerFactory.getLogger(SparkVersion.class);
- public static final SparkVersion SPARK_3_2_0 = SparkVersion.fromVersionString("3.2.0");
-
public static final SparkVersion SPARK_3_3_0 = SparkVersion.fromVersionString("3.3.0");
public static final SparkVersion SPARK_3_5_0 = SparkVersion.fromVersionString("3.5.0");
public static final SparkVersion SPARK_4_0_0 = SparkVersion.fromVersionString("4.0.0");
- public static final SparkVersion MIN_SUPPORTED_VERSION = SPARK_3_2_0;
+ public static final SparkVersion MIN_SUPPORTED_VERSION = SPARK_3_3_0;
public static final SparkVersion UNSUPPORTED_FUTURE_VERSION = SPARK_4_0_0;
private int version;
diff --git a/zeppelin-interpreter-integration/src/test/java/org/apache/zeppelin/integration/SparkIntegrationTest32.java b/zeppelin-interpreter-integration/src/test/java/org/apache/zeppelin/integration/SparkIntegrationTest32.java
deleted file mode 100644
index 27c511e64e1..00000000000
--- a/zeppelin-interpreter-integration/src/test/java/org/apache/zeppelin/integration/SparkIntegrationTest32.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.zeppelin.integration;
-
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.DisplayName;
-import org.junit.jupiter.api.Nested;
-import java.io.IOException;
-
-public class SparkIntegrationTest32 {
-
- @Nested
- @DisplayName("Hadoop3")
- public class Hadoop3 extends SparkIntegrationTest {
-
- @BeforeEach
- public void downloadSpark() throws IOException {
- prepareSpark("3.2.0", "3.2");
- }
- }
-}
diff --git a/zeppelin-interpreter-integration/src/test/java/org/apache/zeppelin/integration/ZeppelinSparkClusterTest32.java b/zeppelin-interpreter-integration/src/test/java/org/apache/zeppelin/integration/ZeppelinSparkClusterTest32.java
deleted file mode 100644
index 1f1b7692450..00000000000
--- a/zeppelin-interpreter-integration/src/test/java/org/apache/zeppelin/integration/ZeppelinSparkClusterTest32.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.zeppelin.integration;
-
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.DisplayName;
-import org.junit.jupiter.api.Nested;
-
-public class ZeppelinSparkClusterTest32 {
-
- @Nested
- @DisplayName("Hadoop3")
- public class Hadoop3 extends ZeppelinSparkClusterTest {
-
- @BeforeEach
- public void downloadSpark() throws Exception {
- prepareSpark("3.2.0", "3.2");
- }
- }
-}