Skip to content
This repository has been archived by the owner on Mar 17, 2020. It is now read-only.

Refact zepp #8

Merged
merged 8 commits into from
Jul 5, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
name: zeppelin-spark
version: 0.0.24
description: An umbrella Helm chart for Zeppelin and Spark
home: https://banzaicloud.com
sources:
- https://github.com/banzaicloud/zeppelin
- https://github.com/banzaicloud/spark
keywords:
- spark
- zeppelin
maintainers:
- name: Banzai Cloud
email: [email protected]
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
name: spark
version: 0.0.19
description: A Helm chart for Spark in Kubernetes
home: https://banzaicloud.com
sources:
- https://github.com/banzaicloud/spark
keywords:
- spark
- spotguide
icon: https://spark.apache.org/images/spark-logo-trademark.png
maintainers:
- name: Banzai Cloud
email: [email protected]
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
name: spark-hs
version: 0.2.3
description: A Helm chart for Spark HS in Kubernetes
home: https://banzaicloud.com
sources:
- https://github.com/banzaicloud/spark
keywords:
- spark
- history-server
maintainers:
- name: Banzai Cloud
email: [email protected]
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "spark-hs.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}

{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "spark-hs.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}

{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "spark-hs.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
{{- define "logDirectoryTag" }}
{{- if eq .Values.sparkEventLogStorage.cloudProvider "amazon"}}
{{- printf "s3a://%s" .Values.sparkEventLogStorage.logDirectory }}
{{- else if eq .Values.sparkEventLogStorage.cloudProvider "alibaba"}}
{{- printf "oss://%s" .Values.sparkEventLogStorage.logDirectory }}
{{- else if eq .Values.sparkEventLogStorage.cloudProvider "azure"}}
{{- printf "wasb://%s@%s.blob.core.windows.net" .Values.sparkEventLogStorage.logDirectory .Values.sparkEventLogStorage.azureStorageAccountName }}
{{- else if eq .Values.sparkEventLogStorage.cloudProvider "google"}}
{{- printf "gs://%s" .Values.sparkEventLogStorage.logDirectory }}
{{- else if eq .Values.sparkEventLogStorage.cloudProvider "oracle"}}
{{- printf "oci://%s@%s" .Values.sparkEventLogStorage.logDirectory .Values.sparkEventLogStorage.oracleNamespace }}
{{- end }}
{{- end }}
{{- $schema := .Values.sparkEventLogStorage.cloudProvider }}
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: {{ template "spark-hs.fullname" . }}
labels:
app: {{ template "spark-hs.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
spec:
replicas: {{ .Values.replicaCount }}
template:
metadata:
labels:
app: {{ template "spark-hs.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
spec:
initContainers:
- name: {{ .Chart.Name }}-init-config
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
envFrom:
- secretRef:
name: {{ default (printf "%s" (include "spark-hs.fullname" .)) .Values.sparkEventLogStorage.secretName }}
command:
- '/bin/sh'
- '-c'
- >
envsubst < /opt/spark/prepared_conf/spark-defaults.conf > /opt/spark/conf/spark-defaults.conf;
volumeMounts:
- name: config-volume
mountPath: /opt/spark/conf/
- name: config-init-volume
mountPath: /opt/spark/prepared_conf/spark-defaults.conf
subPath: spark-defaults.conf
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- '/opt/spark/sbin/start-history-server.sh'
env:
- name: SPARK_NO_DAEMONIZE
value: "false"
- name: SPARK_HISTORY_OPTS
value: {{ printf "-Dspark.history.fs.logDirectory=%s" (include "logDirectoryTag" . )}}
volumeMounts:
- name: config-volume
mountPath: /opt/spark/conf/spark-defaults.conf
subPath: spark-defaults.conf
{{- if eq $schema "google" }}
- name: secret-volume
mountPath: /opt/spark/conf/secret/google.json
subPath: google.json
{{- else if eq $schema "oracle" }}
- name: secret-volume
mountPath: /opt/spark/conf/secret/api_key
subPath: api_key
{{- end }}
ports:
- name: http
containerPort: {{ .Values.service.internalPort }}
protocol: TCP
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
resources:
{{ toYaml .Values.resources | indent 12 }}
volumes:
- name: config-volume
emptyDir: {}
- name: config-init-volume
configMap:
name: {{ template "spark-hs.fullname" . }}-spark-hs-config
{{- if or (eq $schema "google") ((eq $schema "oracle")) }}
- name: secret-volume
secret:
secretName: {{ default (printf "%s" (include "spark-hs.fullname" .)) .Values.sparkEventLogStorage.secretName }}
{{- end }}
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
{{- if .Values.ingress.enabled -}}
{{- $serviceName := include "spark-hs.fullname" . -}}
{{- $servicePort := .Values.service.externalPort -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ template "spark-hs.fullname" . }}
labels:
app: {{ template "spark-hs.name" . }}
chart: {{ template "spark-hs.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
{{- $url := splitList "/" . }}
- host: {{ first $url }}
http:
paths:
- path: /{{ rest $url | join "/" }}
backend:
serviceName: {{ $serviceName }}
servicePort: {{ $servicePort }}
{{- end}}
{{- end }}
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
{{- if eq .Values.sparkEventLogStorage.secretName "" }}
{{- $schema := .Values.sparkEventLogStorage.cloudProvider }}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "spark-hs.fullname" . }}
labels:
app: {{ template "spark-hs.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
type: Opaque
data:
{{- if eq $schema "amazon" }}
AWS_ACCESS_KEY_ID: {{ .Values.sparkEventLogStorage.awsAccessKeyId | b64enc | quote }}
AWS_SECRET_ACCESS_KEY: {{ .Values.sparkEventLogStorage.awsSecretAccessKey | b64enc | quote }}
{{- if ne .Values.sparkEventLogStorage.endpoint "" }}
ENDPOINT: {{ .Values.sparkEventLogStorage.endpoint | b64enc | quote }}
{{- end}}
{{- else if eq $schema "azure" }}
storageAccount: {{ .Values.sparkEventLogStorage.azureStorageAccountName | b64enc | quote }}
accessKey: {{ .Values.sparkEventLogStorage.azureStorageAccessKey | b64enc | quote }}
{{- else if eq $schema "alibaba" }}
ALIBABA_ACCESS_KEY_ID: {{ .Values.sparkEventLogStorage.aliAccessKeyId | b64enc | quote }}
ALIBABA_ACCESS_KEY_SECRET: {{ .Values.sparkEventLogStorage.aliSecretAccessKey | b64enc | quote }}
{{- else if eq $schema "google" }}
google.json: {{ .Values.sparkEventLogStorage.googleJson | quote }}
{{- else if eq $schema "oracle" }}
api_key: {{ .Values.sparkEventLogStorage.apiKey | b64enc | quote }}
tenancy_ocid: {{ .Values.sparkEventLogStorage.oracleTenancyId | b64enc | quote }}
user_ocid: {{ .Values.sparkEventLogStorage.oracleUserId | b64enc | quote }}
api_key_fingerprint: {{ .Values.sparkEventLogStorage.oracleApiKeyFingerprint | b64enc | quote }}
{{- end }}
{{- end }}
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "spark-hs.fullname" . }}
labels:
app: {{ template "spark-hs.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.externalPort }}
targetPort: {{ .Values.service.internalPort }}
protocol: TCP
name: {{ .Chart.Name }}
selector:
app: {{ template "spark-hs.fullname" . }}
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
{{- $schema := .Values.sparkEventLogStorage.cloudProvider }}
kind: ConfigMap
apiVersion: v1
metadata:
name: {{ template "spark-hs.fullname" . }}-spark-hs-config
data:
spark-defaults.conf: |-
{{- if .Values.sparkEventLogStorage.logDirectory }}

{{- if eq $schema "amazon" }}
spark.hadoop.fs.s3a.access.key=$AWS_ACCESS_KEY_ID
spark.hadoop.fs.s3a.secret.key=$AWS_SECRET_ACCESS_KEY
{{- if .Values.sparkEventLogStorage.endpoint }}
spark.hadoop.fs.s3a.endpoint=$ENDPOINT
{{- end}}
{{- else if eq $schema "alibaba" }}
spark.hadoop.fs.oss.accessKeyId=$ALIBABA_ACCESS_KEY_ID
spark.hadoop.fs.oss.accessKeySecret=$ALIBABA_ACCESS_KEY_SECRET
spark.hadoop.fs.oss.impl=org.apache.hadoop.fs.aliyun.oss.AliyunOSSFileSystem
spark.hadoop.fs.oss.endpoint={{ default (printf "oss-%s.aliyuncs.com" .Values.sparkEventLogStorage.aliOssRegion) .Values.sparkEventLogStorage.aliOssEndpoint }}

{{- else if eq $schema "azure" }}
spark.hadoop.fs.azure.account.key.$storageAccount.blob.core.windows.net=$accessKey

{{- else if eq $schema "google" }}
spark.hadoop.google.cloud.auth.service.account.enable=true
spark.hadoop.google.cloud.auth.service.account.json.keyfile=/opt/spark/conf/secret/google.json

{{- else if eq $schema "oracle" }}
spark.hadoop.fs.oci.client.hostname={{ default (printf "https://objectstorage.%s.oraclecloud.com" .Values.sparkEventLogStorage.oracleRegion) .Values.sparkEventLogStorage.oracleHost }}
spark.hadoop.fs.oci.client.auth.tenantId=$tenancy_ocid
spark.hadoop.fs.oci.client.auth.userId=$user_ocid
spark.hadoop.fs.oci.client.auth.fingerprint=$api_key_fingerprint
spark.hadoop.fs.oci.client.auth.pemfilepath=/opt/spark/conf/secret/api_key

{{- end }}

{{- end }}
Loading