16
16
*/
17
17
package org .apache .spark .deploy .k8s
18
18
19
- import org .apache .spark .{ SPARK_VERSION => sparkVersion }
19
+ import org .apache .spark .SPARK_VERSION
20
20
import org .apache .spark .internal .Logging
21
21
import org .apache .spark .internal .config .ConfigBuilder
22
22
import org .apache .spark .network .util .ByteUnit
23
23
24
- package object config extends Logging {
24
+ private [spark] object config extends Logging {
25
25
26
- private [spark] val KUBERNETES_NAMESPACE =
26
+ val KUBERNETES_NAMESPACE =
27
27
ConfigBuilder (" spark.kubernetes.namespace" )
28
28
.doc(" The namespace that will be used for running the driver and executor pods. When using" +
29
29
" spark-submit in cluster mode, this can also be passed to spark-submit via the" +
30
30
" --kubernetes-namespace command line argument." )
31
31
.stringConf
32
32
.createWithDefault(" default" )
33
33
34
- private [spark] val EXECUTOR_DOCKER_IMAGE =
34
+ val EXECUTOR_DOCKER_IMAGE =
35
35
ConfigBuilder (" spark.kubernetes.executor.docker.image" )
36
36
.doc(" Docker image to use for the executors. Specify this using the standard Docker tag" +
37
37
" format." )
38
38
.stringConf
39
- .createWithDefault(s " spark-executor: $sparkVersion " )
39
+ .createWithDefault(s " spark-executor: $SPARK_VERSION " )
40
40
41
- private [spark] val DOCKER_IMAGE_PULL_POLICY =
41
+ val DOCKER_IMAGE_PULL_POLICY =
42
42
ConfigBuilder (" spark.kubernetes.docker.image.pullPolicy" )
43
43
.doc(" Docker image pull policy when pulling any docker image in Kubernetes integration" )
44
44
.stringConf
45
45
.createWithDefault(" IfNotPresent" )
46
46
47
- private [spark] val APISERVER_AUTH_DRIVER_CONF_PREFIX =
47
+ val APISERVER_AUTH_DRIVER_CONF_PREFIX =
48
48
" spark.kubernetes.authenticate.driver"
49
- private [spark] val APISERVER_AUTH_DRIVER_MOUNTED_CONF_PREFIX =
49
+ val APISERVER_AUTH_DRIVER_MOUNTED_CONF_PREFIX =
50
50
" spark.kubernetes.authenticate.driver.mounted"
51
- private [spark] val OAUTH_TOKEN_CONF_SUFFIX = " oauthToken"
52
- private [spark] val OAUTH_TOKEN_FILE_CONF_SUFFIX = " oauthTokenFile"
53
- private [spark] val CLIENT_KEY_FILE_CONF_SUFFIX = " clientKeyFile"
54
- private [spark] val CLIENT_CERT_FILE_CONF_SUFFIX = " clientCertFile"
55
- private [spark] val CA_CERT_FILE_CONF_SUFFIX = " caCertFile"
51
+ val OAUTH_TOKEN_CONF_SUFFIX = " oauthToken"
52
+ val OAUTH_TOKEN_FILE_CONF_SUFFIX = " oauthTokenFile"
53
+ val CLIENT_KEY_FILE_CONF_SUFFIX = " clientKeyFile"
54
+ val CLIENT_CERT_FILE_CONF_SUFFIX = " clientCertFile"
55
+ val CA_CERT_FILE_CONF_SUFFIX = " caCertFile"
56
56
57
- private [spark] val KUBERNETES_SERVICE_ACCOUNT_NAME =
57
+ val KUBERNETES_SERVICE_ACCOUNT_NAME =
58
58
ConfigBuilder (s " $APISERVER_AUTH_DRIVER_CONF_PREFIX.serviceAccountName " )
59
59
.doc(" Service account that is used when running the driver pod. The driver pod uses" +
60
60
" this service account when requesting executor pods from the API server. If specific" +
@@ -66,49 +66,49 @@ package object config extends Logging {
66
66
// Note that while we set a default for this when we start up the
67
67
// scheduler, the specific default value is dynamically determined
68
68
// based on the executor memory.
69
- private [spark] val KUBERNETES_EXECUTOR_MEMORY_OVERHEAD =
69
+ val KUBERNETES_EXECUTOR_MEMORY_OVERHEAD =
70
70
ConfigBuilder (" spark.kubernetes.executor.memoryOverhead" )
71
71
.doc(" The amount of off-heap memory (in megabytes) to be allocated per executor. This" +
72
72
" is memory that accounts for things like VM overheads, interned strings, other native" +
73
73
" overheads, etc. This tends to grow with the executor size. (typically 6-10%)." )
74
74
.bytesConf(ByteUnit .MiB )
75
75
.createOptional
76
76
77
- private [spark] val KUBERNETES_EXECUTOR_LABEL_PREFIX = " spark.kubernetes.executor.label."
78
- private [spark] val KUBERNETES_EXECUTOR_ANNOTATION_PREFIX = " spark.kubernetes.executor.annotation."
77
+ val KUBERNETES_EXECUTOR_LABEL_PREFIX = " spark.kubernetes.executor.label."
78
+ val KUBERNETES_EXECUTOR_ANNOTATION_PREFIX = " spark.kubernetes.executor.annotation."
79
79
80
- private [spark] val KUBERNETES_DRIVER_POD_NAME =
80
+ val KUBERNETES_DRIVER_POD_NAME =
81
81
ConfigBuilder (" spark.kubernetes.driver.pod.name" )
82
82
.doc(" Name of the driver pod." )
83
83
.stringConf
84
84
.createOptional
85
85
86
- private [spark] val KUBERNETES_EXECUTOR_POD_NAME_PREFIX =
86
+ val KUBERNETES_EXECUTOR_POD_NAME_PREFIX =
87
87
ConfigBuilder (" spark.kubernetes.executor.podNamePrefix" )
88
88
.doc(" Prefix to use in front of the executor pod names." )
89
89
.internal()
90
90
.stringConf
91
91
.createWithDefault(" spark" )
92
92
93
- private [spark] val KUBERNETES_ALLOCATION_BATCH_SIZE =
93
+ val KUBERNETES_ALLOCATION_BATCH_SIZE =
94
94
ConfigBuilder (" spark.kubernetes.allocation.batch.size" )
95
95
.doc(" Number of pods to launch at once in each round of executor allocation." )
96
96
.intConf
97
97
.checkValue(value => value > 0 , " Allocation batch size should be a positive integer" )
98
98
.createWithDefault(5 )
99
99
100
- private [spark] val KUBERNETES_ALLOCATION_BATCH_DELAY =
100
+ val KUBERNETES_ALLOCATION_BATCH_DELAY =
101
101
ConfigBuilder (" spark.kubernetes.allocation.batch.delay" )
102
102
.doc(" Number of seconds to wait between each round of executor allocation." )
103
103
.longConf
104
104
.checkValue(value => value > 0 , s " Allocation batch delay should be a positive integer " )
105
105
.createWithDefault(1 )
106
106
107
- private [spark] val KUBERNETES_EXECUTOR_LIMIT_CORES =
107
+ val KUBERNETES_EXECUTOR_LIMIT_CORES =
108
108
ConfigBuilder (" spark.kubernetes.executor.limit.cores" )
109
109
.doc(" Specify the hard cpu limit for a single executor pod" )
110
110
.stringConf
111
111
.createOptional
112
112
113
- private [spark] val KUBERNETES_NODE_SELECTOR_PREFIX = " spark.kubernetes.node.selector."
113
+ val KUBERNETES_NODE_SELECTOR_PREFIX = " spark.kubernetes.node.selector."
114
114
}
0 commit comments